summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@linux.intel.com>2020-12-15 12:34:33 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 14:48:04 -0800
commit2a5e4e340b0fe0f8d402196a466887db6a270b9b (patch)
tree71aa773a657d70b1ccf7442b3c5605d8c8356d91 /mm
parent6168d0da2b479ce25a4647de194045de1bdd1f1d (diff)
mm/lru: introduce relock_page_lruvec()
Add relock_page_lruvec() to replace repeated same code, no functional change. When testing for relock we can avoid the need for RCU locking if we simply compare the page pgdat and memcg pointers versus those that the lruvec is holding. By doing this we can avoid the extra pointer walks and accesses of the memory cgroup. In addition we can avoid the checks entirely if lruvec is currently NULL. [alex.shi@linux.alibaba.com: use page_memcg()] Link: https://lkml.kernel.org/r/66d8e79d-7ec6-bfbc-1c82-bf32db3ae5b7@linux.alibaba.com Link: https://lkml.kernel.org/r/1604566549-62481-19-git-send-email-alex.shi@linux.alibaba.com Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com> Acked-by: Hugh Dickins <hughd@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Tejun Heo <tj@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: "Chen, Rong A" <rong.a.chen@intel.com> Cc: Daniel Jordan <daniel.m.jordan@oracle.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Jann Horn <jannh@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mika Penttilä <mika.penttila@nextfour.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Shakeel Butt <shakeelb@google.com> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Wei Yang <richard.weiyang@gmail.com> Cc: Yang Shi <yang.shi@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/swap.c33
-rw-r--r--mm/vmscan.c12
3 files changed, 10 insertions, 46 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index ab164a675c25..55b3b3672977 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -277,16 +277,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
* so we can spare the get_page() here.
*/
if (TestClearPageLRU(page)) {
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page,
- page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
-
+ lruvec = relock_page_lruvec_irq(page, lruvec);
del_page_from_lru_list(page, lruvec,
page_lru(page));
continue;
diff --git a/mm/swap.c b/mm/swap.c
index ba9fc21b24ea..2cca7141470c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -210,19 +210,12 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
/* block memcg migration during page moving between lru */
if (!TestClearPageLRU(page))
continue;
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
-
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
(*move_fn)(page, lruvec);
SetPageLRU(page);
@@ -918,17 +911,12 @@ void release_pages(struct page **pages, int nr)
}
if (PageLRU(page)) {
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page,
- page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec,
- flags);
+ struct lruvec *prev_lruvec = lruvec;
+
+ lruvec = relock_page_lruvec_irqsave(page, lruvec,
+ &flags);
+ if (prev_lruvec != lruvec)
lock_batch = 0;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
@@ -1033,15 +1021,8 @@ void __pagevec_lru_add(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
__pagevec_lru_add_fn(page, lruvec);
}
if (lruvec)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b27b5dba3fdd..60705ea598ee 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1883,8 +1883,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(mem_cgroup_page_lruvec(page, page_pgdat(page))
- != lruvec, page);
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
lru = page_lru(page);
nr_pages = thp_nr_pages(page);
@@ -4273,7 +4272,6 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
int nr_pages;
- struct lruvec *new_lruvec;
if (PageTransTail(page))
continue;
@@ -4285,13 +4283,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
if (!TestClearPageLRU(page))
continue;
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
-
+ lruvec = relock_page_lruvec_irq(page, lruvec);
if (page_evictable(page) && PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);