diff options
author | Alistair Popple <apopple@nvidia.com> | 2021-06-30 18:54:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-01 11:06:03 -0700 |
commit | cd62734ca60dbb2ab5bb19c8d837dd9990955310 (patch) | |
tree | 395ad43b1e316da7658d2b0ee074174735a79716 /mm | |
parent | 4dd845b5a3e57ad07f26ef808707b064696fe34b (diff) |
mm/rmap: split try_to_munlock from try_to_unmap
The behaviour of try_to_unmap_one() is difficult to follow because it
performs different operations based on a fairly large set of flags used in
different combinations.
TTU_MUNLOCK is one such flag. However it is exclusively used by
try_to_munlock() which specifies no other flags. Therefore rather than
overload try_to_unmap_one() with unrelated behaviour split this out into
it's own function and remove the flag.
Link: https://lkml.kernel.org/r/20210616105937.23201-4-apopple@nvidia.com
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mlock.c | 12 | ||||
-rw-r--r-- | mm/rmap.c | 66 |
2 files changed, 55 insertions, 23 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index df590fda5688..4ab757ab6fe8 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -108,7 +108,7 @@ void mlock_vma_page(struct page *page) /* * Finish munlock after successful page isolation * - * Page must be locked. This is a wrapper for try_to_munlock() + * Page must be locked. This is a wrapper for page_mlock() * and putback_lru_page() with munlock accounting. */ static void __munlock_isolated_page(struct page *page) @@ -118,7 +118,7 @@ static void __munlock_isolated_page(struct page *page) * and we don't need to check all the other vmas. */ if (page_mapcount(page) > 1) - try_to_munlock(page); + page_mlock(page); /* Did try_to_unlock() succeed or punt? */ if (!PageMlocked(page)) @@ -158,7 +158,7 @@ static void __munlock_isolation_failed(struct page *page) * munlock()ed or munmap()ed, we want to check whether other vmas hold the * page locked so that we can leave it on the unevictable lru list and not * bother vmscan with it. However, to walk the page's rmap list in - * try_to_munlock() we must isolate the page from the LRU. If some other + * page_mlock() we must isolate the page from the LRU. If some other * task has removed the page from the LRU, we won't be able to do that. * So we clear the PageMlocked as we might not get another chance. If we * can't isolate the page, we leave it for putback_lru_page() and vmscan @@ -168,7 +168,7 @@ unsigned int munlock_vma_page(struct page *page) { int nr_pages; - /* For try_to_munlock() and to serialize with page migration */ + /* For page_mlock() and to serialize with page migration */ BUG_ON(!PageLocked(page)); VM_BUG_ON_PAGE(PageTail(page), page); @@ -205,7 +205,7 @@ static int __mlock_posix_error_return(long retval) * * The fast path is available only for evictable pages with single mapping. * Then we can bypass the per-cpu pvec and get better performance. - * when mapcount > 1 we need try_to_munlock() which can fail. + * when mapcount > 1 we need page_mlock() which can fail. * when !page_evictable(), we need the full redo logic of putback_lru_page to * avoid leaving evictable page in unevictable list. * @@ -414,7 +414,7 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec, * * We don't save and restore VM_LOCKED here because pages are * still on lru. In unmap path, pages might be scanned by reclaim - * and re-mlocked by try_to_{munlock|unmap} before we unmap and + * and re-mlocked by page_mlock/try_to_unmap before we unmap and * free them. This will result in freeing mlocked pages. */ void munlock_vma_pages_range(struct vm_area_struct *vma, diff --git a/mm/rmap.c b/mm/rmap.c index b9986c8db524..73c0ff1c73ab 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1411,10 +1411,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (flags & TTU_SYNC) pvmw.flags = PVMW_SYNC; - /* munlock has nothing to gain from examining un-locked vmas */ - if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) - return true; - if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && is_zone_device_page(page) && !is_device_private_page(page)) return true; @@ -1476,8 +1472,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, page_vma_mapped_walk_done(&pvmw); break; } - if (flags & TTU_MUNLOCK) - continue; } /* Unexpected PMD-mapped THP? */ @@ -1790,20 +1784,58 @@ void try_to_unmap(struct page *page, enum ttu_flags flags) rmap_walk(page, &rwc); } +/* + * Walks the vma's mapping a page and mlocks the page if any locked vma's are + * found. Once one is found the page is locked and the scan can be terminated. + */ +static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, + unsigned long address, void *unused) +{ + struct page_vma_mapped_walk pvmw = { + .page = page, + .vma = vma, + .address = address, + }; + + /* An un-locked vma doesn't have any pages to lock, continue the scan */ + if (!(vma->vm_flags & VM_LOCKED)) + return true; + + while (page_vma_mapped_walk(&pvmw)) { + /* + * Need to recheck under the ptl to serialise with + * __munlock_pagevec_fill() after VM_LOCKED is cleared in + * munlock_vma_pages_range(). + */ + if (vma->vm_flags & VM_LOCKED) { + /* PTE-mapped THP are never mlocked */ + if (!PageTransCompound(page)) + mlock_vma_page(page); + page_vma_mapped_walk_done(&pvmw); + } + + /* + * no need to continue scanning other vma's if the page has + * been locked. + */ + return false; + } + + return true; +} + /** - * try_to_munlock - try to munlock a page - * @page: the page to be munlocked + * page_mlock - try to mlock a page + * @page: the page to be mlocked * - * Called from munlock code. Checks all of the VMAs mapping the page - * to make sure nobody else has this page mlocked. The page will be - * returned with PG_mlocked cleared if no other vmas have it mlocked. + * Called from munlock code. Checks all of the VMAs mapping the page and mlocks + * the page if any are found. The page will be returned with PG_mlocked cleared + * if it is not mapped by any locked vmas. */ - -void try_to_munlock(struct page *page) +void page_mlock(struct page *page) { struct rmap_walk_control rwc = { - .rmap_one = try_to_unmap_one, - .arg = (void *)TTU_MUNLOCK, + .rmap_one = page_mlock_one, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, @@ -1855,7 +1887,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * - * When called from try_to_munlock(), the mmap_lock of the mm containing the vma + * When called from page_mlock(), the mmap_lock of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. @@ -1908,7 +1940,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * - * When called from try_to_munlock(), the mmap_lock of the mm containing the vma + * When called from page_mlock(), the mmap_lock of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. |