summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2024-03-26 20:28:25 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:56:19 -0700
commit5691753d73a233a97c13f7d389f15d20596d062d (patch)
tree0614a92084ad463a434d638118097bab426ef7ee /mm
parentb002a7b0a58aceaed0e3c5dd80d6f549df6490b2 (diff)
mm: convert huge_zero_page to huge_zero_folio
With all callers of is_huge_zero_page() converted, we can now switch the huge_zero_page itself from being a compound page to a folio. Link: https://lkml.kernel.org/r/20240326202833.523759-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 712263e3b1f6..6d510d635b1d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -74,7 +74,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
struct shrink_control *sc);
static atomic_t huge_zero_refcount;
-struct page *huge_zero_page __read_mostly;
+struct folio *huge_zero_folio __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
unsigned long huge_anon_orders_always __read_mostly;
unsigned long huge_anon_orders_madvise __read_mostly;
@@ -192,24 +192,24 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
static bool get_huge_zero_page(void)
{
- struct page *zero_page;
+ struct folio *zero_folio;
retry:
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
return true;
- zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
+ zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
HPAGE_PMD_ORDER);
- if (!zero_page) {
+ if (!zero_folio) {
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return false;
}
preempt_disable();
- if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
+ if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) {
preempt_enable();
- __free_pages(zero_page, compound_order(zero_page));
+ folio_put(zero_folio);
goto retry;
}
- WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
+ WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio));
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
@@ -227,10 +227,10 @@ static void put_huge_zero_page(void)
BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
}
-struct page *mm_get_huge_zero_page(struct mm_struct *mm)
+struct folio *mm_get_huge_zero_folio(struct mm_struct *mm)
{
if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
- return READ_ONCE(huge_zero_page);
+ return READ_ONCE(huge_zero_folio);
if (!get_huge_zero_page())
return NULL;
@@ -238,7 +238,7 @@ struct page *mm_get_huge_zero_page(struct mm_struct *mm)
if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags))
put_huge_zero_page();
- return READ_ONCE(huge_zero_page);
+ return READ_ONCE(huge_zero_folio);
}
void mm_put_huge_zero_page(struct mm_struct *mm)
@@ -258,10 +258,10 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
- struct page *zero_page = xchg(&huge_zero_page, NULL);
- BUG_ON(zero_page == NULL);
+ struct folio *zero_folio = xchg(&huge_zero_folio, NULL);
+ BUG_ON(zero_folio == NULL);
WRITE_ONCE(huge_zero_pfn, ~0UL);
- __free_pages(zero_page, compound_order(zero_page));
+ folio_put(zero_folio);
return HPAGE_PMD_NR;
}
@@ -1340,7 +1340,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* since we already have a zero page to copy. It just takes a
* reference.
*/
- mm_get_huge_zero_page(dst_mm);
+ mm_get_huge_zero_folio(dst_mm);
goto out_zero_page;
}