From c0cb54856f5bfcd7aa53d4872c2fa8b49a5cd393 Mon Sep 17 00:00:00 2001 From: yangge Date: Wed, 3 Jul 2024 20:02:33 +0800 Subject: [PATCH] mm/gup: clear the LRU flag of a page before adding to LRU batch ANBZ: #27845 commit 33dfe9204f29b415bbc0abb1a50642d1ba94f5e9 upstream. If a large number of CMA memory are configured in system (for example, the CMA memory accounts for 50% of the system memory), starting a virtual virtual machine with device passthrough, it will call pin_user_pages_remote(..., FOLL_LONGTERM, ...) to pin memory. Normally if a page is present and in CMA area, pin_user_pages_remote() will migrate the page from CMA area to non-CMA area because of FOLL_LONGTERM flag. But the current code will cause the migration failure due to unexpected page refcounts, and eventually cause the virtual machine fail to start. If a page is added in LRU batch, its refcount increases one, remove the page from LRU batch decreases one. Page migration requires the page is not referenced by others except page mapping. Before migrating a page, we should try to drain the page from LRU batch in case the page is in it, however, folio_test_lru() is not sufficient to tell whether the page is in LRU batch or not, if the page is in LRU batch, the migration will fail. To solve the problem above, we modify the logic of adding to LRU batch. Before adding a page to LRU batch, we clear the LRU flag of the page so that we can check whether the page is in LRU batch by folio_test_lru(page). It's quite valuable, because likely we don't want to blindly drain the LRU batch simply because there is some unexpected reference on a page, as described above. This change makes the LRU flag of a page invisible for longer, which may impact some programs. For example, as long as a page is on a LRU batch, we cannot isolate it, and we cannot check if it's an LRU page. Further, a page can now only be on exactly one LRU batch. This doesn't seem to matter much, because a new page is allocated from buddy and added to the lru batch, or be isolated, it's LRU flag may also be invisible for a long time. Hygon-SIG: commit 33dfe9204f29 upstream mm/gup: clear the LRU flag of a page before adding to LRU batch Link: https://lkml.kernel.org/r/1720075944-27201-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1720008153-16035-1-git-send-email-yangge1116@126.com Fixes: 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region") Signed-off-by: yangge Cc: Aneesh Kumar K.V Cc: Baolin Wang Cc: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Hugh Dickins Signed-off-by: Andrew Morton [ hly: Backport syntax from linux-6.x to this repo. ] Signed-off-by: hanliyang Cc: Hygon-arch@list.openanolis.cn --- mm/swap.c | 40 ++++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 299d09b2d285..c41880fa6dd9 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -210,10 +210,6 @@ static void pagevec_lru_move_fn(struct pagevec *pvec, for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; - /* block memcg migration during page moving between lru */ - if (!TestClearPageLRU(page)) - continue; - lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); (*move_fn)(page, lruvec); @@ -257,11 +253,16 @@ static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && - !PageUnevictable(page) && PageLRU(page)) { + !PageUnevictable(page)) { struct pagevec *pvec; unsigned long flags; get_page(page); + if (!TestClearPageLRU(page)) { + put_page(page); + return; + } + local_lock_irqsave(&lru_rotate.lock, flags); pvec = this_cpu_ptr(&lru_rotate.pvec); if (pagevec_add_and_need_flush(pvec, page)) @@ -349,12 +350,18 @@ static bool need_activate_page_drain(int cpu) void activate_page(struct page *page) { page = compound_head(page); - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { + if (!PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec; local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.activate_page); get_page(page); + if (!TestClearPageLRU(page)) { + put_page(page); + local_unlock(&lru_pvecs.lock); + return; + } + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page); local_unlock(&lru_pvecs.lock); @@ -707,6 +714,11 @@ void deactivate_file_page(struct page *page) if (likely(get_page_unless_zero(page))) { struct pagevec *pvec; + if (!TestClearPageLRU(page)) { + put_page(page); + return; + } + local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); @@ -726,13 +738,19 @@ void deactivate_file_page(struct page *page) */ void deactivate_page(struct page *page) { - if (PageLRU(page) && !PageUnevictable(page) && + if (!PageUnevictable(page) && (PageActive(page) || lru_gen_enabled())) { struct pagevec *pvec; local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); get_page(page); + if (!TestClearPageLRU(page)) { + put_page(page); + local_unlock(&lru_pvecs.lock); + return; + } + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn); local_unlock(&lru_pvecs.lock); @@ -748,13 +766,19 @@ void deactivate_page(struct page *page) */ void mark_page_lazyfree(struct page *page) { - if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && + if (PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { struct pagevec *pvec; local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); get_page(page); + if (!TestClearPageLRU(page)) { + put_page(page); + local_unlock(&lru_pvecs.lock); + return; + } + if (pagevec_add_and_need_flush(pvec, page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); local_unlock(&lru_pvecs.lock); -- Gitee