diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-01-11 14:29:12 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-02-02 22:33:00 -0800 |
commit | f8baa6be0368b5d21be34e8bf071b563b0f77584 (patch) | |
tree | bb06ad2d853fba9ca96cd5714a1c80da5847f49c /mm/huge_memory.c | |
parent | 8991de90e99755b13026b1db32d1fa52e94c6a96 (diff) |
mm/huge_memory: convert get_deferred_split_queue() to take a folio
Removes a few calls to compound_head().
Link: https://lkml.kernel.org/r/20230111142915.1001531-27-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7aedfe7cf5df..c23b0e01734b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -559,10 +559,11 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) } #ifdef CONFIG_MEMCG -static inline struct deferred_split *get_deferred_split_queue(struct page *page) +static inline +struct deferred_split *get_deferred_split_queue(struct folio *folio) { - struct mem_cgroup *memcg = page_memcg(compound_head(page)); - struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); + struct mem_cgroup *memcg = folio_memcg(folio); + struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); if (memcg) return &memcg->deferred_split_queue; @@ -570,9 +571,10 @@ static inline struct deferred_split *get_deferred_split_queue(struct page *page) return &pgdat->deferred_split_queue; } #else -static inline struct deferred_split *get_deferred_split_queue(struct page *page) +static inline +struct deferred_split *get_deferred_split_queue(struct folio *folio) { - struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); + struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); return &pgdat->deferred_split_queue; } @@ -2650,7 +2652,7 @@ bool can_split_folio(struct folio *folio, int *pextra_pins) int split_huge_page_to_list(struct page *page, struct list_head *list) { struct folio *folio = page_folio(page); - struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page); + struct deferred_split *ds_queue = get_deferred_split_queue(folio); XA_STATE(xas, &folio->mapping->i_pages, folio->index); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; @@ -2801,7 +2803,7 @@ out: void free_transhuge_page(struct page *page) { struct folio *folio = (struct folio *)page; - struct deferred_split *ds_queue = get_deferred_split_queue(page); + struct deferred_split *ds_queue = get_deferred_split_queue(folio); unsigned long flags; spin_lock_irqsave(&ds_queue->split_queue_lock, flags); @@ -2816,7 +2818,7 @@ void free_transhuge_page(struct page *page) void deferred_split_huge_page(struct page *page) { struct folio *folio = page_folio(page); - struct deferred_split *ds_queue = get_deferred_split_queue(page); + struct deferred_split *ds_queue = get_deferred_split_queue(folio); #ifdef CONFIG_MEMCG struct mem_cgroup *memcg = folio_memcg(folio); #endif |