diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-03-28 22:58:28 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-25 20:56:31 -0700 |
commit | 412ad5fbe9285fd8066d3b977db0cd7fb39f671d (patch) | |
tree | 1689ab1370dfdfd09fd147e5a1a8d8c572384eaf /mm | |
parent | 7e8347413e5bc4d54712942dad43bfcf2501ab3b (diff) |
mm: remove vma_address()
Convert the three remaining callers to call vma_pgoff_address() directly.
This removes an ambiguity where we'd check just one page if passed a tail
page and all N pages if passed a head page.
Also add better kernel-doc for vma_pgoff_address().
Link: https://lkml.kernel.org/r/20240328225831.1765286-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 23 | ||||
-rw-r--r-- | mm/rmap.c | 12 |
2 files changed, 17 insertions, 18 deletions
diff --git a/mm/internal.h b/mm/internal.h index cf7799e29391..f4ef48d57b1c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -804,9 +804,14 @@ void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); -/* - * Return the start of user virtual address at the specific offset within - * a vma. +/** + * vma_pgoff_address - Find the virtual address a page range is mapped at + * @pgoff: The page offset within its object. + * @nr_pages: The number of pages to consider. + * @vma: The vma which maps this object. + * + * If any page in this range is mapped by this VMA, return the first address + * where any of these pages appear. Otherwise, return -EFAULT. */ static inline unsigned long vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, @@ -830,18 +835,6 @@ vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, } /* - * Return the start of user virtual address of a page within a vma. - * Returns -EFAULT if all of the page is outside the range of vma. - * If page is a compound head, the entire compound page is considered. - */ -static inline unsigned long -vma_address(struct page *page, struct vm_area_struct *vma) -{ - VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ - return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma); -} - -/* * Then at what user virtual address will none of the range be found in vma? * Assumes that vma_address() already returned a good starting address. */ diff --git a/mm/rmap.c b/mm/rmap.c index 5ee9e338d09b..4b08b1a06688 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -775,6 +775,8 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { struct folio *folio = page_folio(page); + pgoff_t pgoff; + if (folio_test_anon(folio)) { struct anon_vma *page__anon_vma = folio_anon_vma(folio); /* @@ -790,7 +792,9 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) return -EFAULT; } - return vma_address(page, vma); + /* The !page__anon_vma above handles KSM folios */ + pgoff = folio->index + folio_page_idx(folio, page); + return vma_pgoff_address(pgoff, 1, vma); } /* @@ -2588,7 +2592,8 @@ static void rmap_walk_anon(struct folio *folio, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_address(&folio->page, vma); + unsigned long address = vma_pgoff_address(pgoff_start, + folio_nr_pages(folio), vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2649,7 +2654,8 @@ static void rmap_walk_file(struct folio *folio, lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_address(&folio->page, vma); + unsigned long address = vma_pgoff_address(pgoff_start, + folio_nr_pages(folio), vma); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); |