diff options
author | Vishal Moola (Oracle) <vishal.moola@gmail.com> | 2023-10-20 11:33:29 -0700 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-10-25 16:47:14 -0700 |
commit | dbf85c21e4aff90912b5d7755d2b25611f9191e9 (patch) | |
tree | 6743395a1f1817af35f9df274d4dc16159f5a837 /mm/khugepaged.c | |
parent | 5c07ebb372d66423e508ecfb8e00324f8797f072 (diff) |
mm/khugepaged: convert is_refcount_suitable() to use folios
Both callers of is_refcount_suitable() have been converted to use
folios, so convert it to take in a folio. Both callers only operate on
head pages of folios so mapcount/refcount conversions here are trivial.
Removes 3 calls to compound head, and removes 315 bytes of kernel text.
Link: https://lkml.kernel.org/r/20231020183331.10770-4-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/khugepaged.c')
-rw-r--r-- | mm/khugepaged.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6c4b5af43371..9efd8ff68f06 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -524,15 +524,15 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, } } -static bool is_refcount_suitable(struct page *page) +static bool is_refcount_suitable(struct folio *folio) { int expected_refcount; - expected_refcount = total_mapcount(page); - if (PageSwapCache(page)) - expected_refcount += compound_nr(page); + expected_refcount = folio_mapcount(folio); + if (folio_test_swapcache(folio)) + expected_refcount += folio_nr_pages(folio); - return page_count(page) == expected_refcount; + return folio_ref_count(folio) == expected_refcount; } static int __collapse_huge_page_isolate(struct vm_area_struct *vma, @@ -625,7 +625,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, * but not from this process. The other process cannot write to * the page, only trigger CoW. */ - if (!is_refcount_suitable(&folio->page)) { + if (!is_refcount_suitable(folio)) { folio_unlock(folio); result = SCAN_PAGE_COUNT; goto out; @@ -1371,7 +1371,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, * has excessive GUP pins (i.e. 512). Anyway the same check * will be done again later the risk seems low. */ - if (!is_refcount_suitable(&folio->page)) { + if (!is_refcount_suitable(folio)) { result = SCAN_PAGE_COUNT; goto out_unmap; } |