summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/swap.c33
-rw-r--r--mm/vmscan.c12
3 files changed, 10 insertions, 46 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index ab164a675c25..55b3b3672977 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -277,16 +277,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
* so we can spare the get_page() here.
*/
if (TestClearPageLRU(page)) {
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page,
- page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
-
+ lruvec = relock_page_lruvec_irq(page, lruvec);
del_page_from_lru_list(page, lruvec,
page_lru(page));
continue;
diff --git a/mm/swap.c b/mm/swap.c
index ba9fc21b24ea..2cca7141470c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -210,19 +210,12 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
/* block memcg migration during page moving between lru */
if (!TestClearPageLRU(page))
continue;
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
-
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
(*move_fn)(page, lruvec);
SetPageLRU(page);
@@ -918,17 +911,12 @@ void release_pages(struct page **pages, int nr)
}
if (PageLRU(page)) {
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page,
- page_pgdat(page));
- if (new_lruvec != lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec,
- flags);
+ struct lruvec *prev_lruvec = lruvec;
+
+ lruvec = relock_page_lruvec_irqsave(page, lruvec,
+ &flags);
+ if (prev_lruvec != lruvec)
lock_batch = 0;
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
@@ -1033,15 +1021,8 @@ void __pagevec_lru_add(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct lruvec *new_lruvec;
-
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irqrestore(lruvec, flags);
- lruvec = lock_page_lruvec_irqsave(page, &flags);
- }
+ lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
__pagevec_lru_add_fn(page, lruvec);
}
if (lruvec)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b27b5dba3fdd..60705ea598ee 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1883,8 +1883,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
* All pages were isolated from the same lruvec (and isolation
* inhibits memcg migration).
*/
- VM_BUG_ON_PAGE(mem_cgroup_page_lruvec(page, page_pgdat(page))
- != lruvec, page);
+ VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
lru = page_lru(page);
nr_pages = thp_nr_pages(page);
@@ -4273,7 +4272,6 @@ void check_move_unevictable_pages(struct pagevec *pvec)
for (i = 0; i < pvec->nr; i++) {
struct page *page = pvec->pages[i];
int nr_pages;
- struct lruvec *new_lruvec;
if (PageTransTail(page))
continue;
@@ -4285,13 +4283,7 @@ void check_move_unevictable_pages(struct pagevec *pvec)
if (!TestClearPageLRU(page))
continue;
- new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
- if (lruvec != new_lruvec) {
- if (lruvec)
- unlock_page_lruvec_irq(lruvec);
- lruvec = lock_page_lruvec_irq(page);
- }
-
+ lruvec = relock_page_lruvec_irq(page, lruvec);
if (page_evictable(page) && PageUnevictable(page)) {
enum lru_list lru = page_lru_base_type(page);