From 3e32cb2e0a12b6915056ff04601cf1bb9b44f967 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:31 -0800 Subject: mm: memcontrol: lockless page counters Memory is internally accounted in bytes, using spinlock-protected 64-bit counters, even though the smallest accounting delta is a page. The counter interface is also convoluted and does too many things. Introduce a new lockless word-sized page counter API, then change all memory accounting over to it. The translation from and to bytes then only happens when interfacing with userspace. The removed locking overhead is noticable when scaling beyond the per-cpu charge caches - on a 4-socket machine with 144-threads, the following test shows the performance differences of 288 memcgs concurrently running a page fault benchmark: vanilla: 18631648.500498 task-clock (msec) # 140.643 CPUs utilized ( +- 0.33% ) 1,380,638 context-switches # 0.074 K/sec ( +- 0.75% ) 24,390 cpu-migrations # 0.001 K/sec ( +- 8.44% ) 1,843,305,768 page-faults # 0.099 M/sec ( +- 0.00% ) 50,134,994,088,218 cycles # 2.691 GHz ( +- 0.33% ) stalled-cycles-frontend stalled-cycles-backend 8,049,712,224,651 instructions # 0.16 insns per cycle ( +- 0.04% ) 1,586,970,584,979 branches # 85.176 M/sec ( +- 0.05% ) 1,724,989,949 branch-misses # 0.11% of all branches ( +- 0.48% ) 132.474343877 seconds time elapsed ( +- 0.21% ) lockless: 12195979.037525 task-clock (msec) # 133.480 CPUs utilized ( +- 0.18% ) 832,850 context-switches # 0.068 K/sec ( +- 0.54% ) 15,624 cpu-migrations # 0.001 K/sec ( +- 10.17% ) 1,843,304,774 page-faults # 0.151 M/sec ( +- 0.00% ) 32,811,216,801,141 cycles # 2.690 GHz ( +- 0.18% ) stalled-cycles-frontend stalled-cycles-backend 9,999,265,091,727 instructions # 0.30 insns per cycle ( +- 0.10% ) 2,076,759,325,203 branches # 170.282 M/sec ( +- 0.12% ) 1,656,917,214 branch-misses # 0.08% of all branches ( +- 0.55% ) 91.369330729 seconds time elapsed ( +- 0.45% ) On top of improved scalability, this also gets rid of the icky long long types in the very heart of memcg, which is great for 32 bit and also makes the code a lot more readable. Notable differences between the old and new API: - res_counter_charge() and res_counter_charge_nofail() become page_counter_try_charge() and page_counter_charge() resp. to match the more common kernel naming scheme of try_do()/do() - res_counter_uncharge_until() is only ever used to cancel a local counter and never to uncharge bigger segments of a hierarchy, so it's replaced by the simpler page_counter_cancel() - res_counter_set_limit() is replaced by page_counter_limit(), which expects its callers to serialize against themselves - res_counter_memparse_write_strategy() is replaced by page_counter_limit(), which rounds down to the nearest page size - rather than up. This is more reasonable for explicitely requested hard upper limits. - to keep charging light-weight, page_counter_try_charge() charges speculatively, only to roll back if the result exceeds the limit. Because of this, a failing bigger charge can temporarily lock out smaller charges that would otherwise succeed. The error is bounded to the difference between the smallest and the biggest possible charge size, so for memcg, this means that a failing THP charge can send base page charges into reclaim upto 2MB (4MB) before the limit would have been reached. This should be acceptable. [akpm@linux-foundation.org: add includes for WARN_ON_ONCE and memparse] [akpm@linux-foundation.org: add includes for WARN_ON_ONCE, memparse, strncmp, and PAGE_SIZE] Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Cc: Tejun Heo Cc: David Rientjes Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 633 ++++++++++++++++++++++++++------------------------------ 1 file changed, 295 insertions(+), 338 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d6ac0e33e150..4129ad74e93b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -25,7 +25,7 @@ * GNU General Public License for more details. */ -#include +#include #include #include #include @@ -165,7 +165,7 @@ struct mem_cgroup_per_zone { struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; struct rb_node tree_node; /* RB tree node */ - unsigned long long usage_in_excess;/* Set to the value by which */ + unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ @@ -198,7 +198,7 @@ static struct mem_cgroup_tree soft_limit_tree __read_mostly; struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; - u64 threshold; + unsigned long threshold; }; /* For threshold */ @@ -284,10 +284,13 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); */ struct mem_cgroup { struct cgroup_subsys_state css; - /* - * the counter to account for memory usage - */ - struct res_counter res; + + /* Accounted resources */ + struct page_counter memory; + struct page_counter memsw; + struct page_counter kmem; + + unsigned long soft_limit; /* vmpressure notifications */ struct vmpressure vmpressure; @@ -295,15 +298,6 @@ struct mem_cgroup { /* css_online() has been completed */ int initialized; - /* - * the counter to account for mem+swap usage. - */ - struct res_counter memsw; - - /* - * the counter to account for kernel memory usage. - */ - struct res_counter kmem; /* * Should the accounting and control be hierarchical, per subtree? */ @@ -650,7 +644,7 @@ static void disarm_kmem_keys(struct mem_cgroup *memcg) * This check can't live in kmem destruction function, * since the charges will outlive the cgroup */ - WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0); + WARN_ON(page_counter_read(&memcg->kmem)); } #else static void disarm_kmem_keys(struct mem_cgroup *memcg) @@ -706,7 +700,7 @@ soft_limit_tree_from_page(struct page *page) static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, struct mem_cgroup_tree_per_zone *mctz, - unsigned long long new_usage_in_excess) + unsigned long new_usage_in_excess) { struct rb_node **p = &mctz->rb_root.rb_node; struct rb_node *parent = NULL; @@ -755,10 +749,21 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, spin_unlock_irqrestore(&mctz->lock, flags); } +static unsigned long soft_limit_excess(struct mem_cgroup *memcg) +{ + unsigned long nr_pages = page_counter_read(&memcg->memory); + unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit); + unsigned long excess = 0; + + if (nr_pages > soft_limit) + excess = nr_pages - soft_limit; + + return excess; +} static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) { - unsigned long long excess; + unsigned long excess; struct mem_cgroup_per_zone *mz; struct mem_cgroup_tree_per_zone *mctz; @@ -769,7 +774,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) */ for (; memcg; memcg = parent_mem_cgroup(memcg)) { mz = mem_cgroup_page_zoneinfo(memcg, page); - excess = res_counter_soft_limit_excess(&memcg->res); + excess = soft_limit_excess(memcg); /* * We have to update the tree if mz is on RB-tree or * mem is over its softlimit. @@ -825,7 +830,7 @@ retry: * position in the tree. */ __mem_cgroup_remove_exceeded(mz, mctz); - if (!res_counter_soft_limit_excess(&mz->memcg->res) || + if (!soft_limit_excess(mz->memcg) || !css_tryget_online(&mz->memcg->css)) goto retry; done: @@ -1492,7 +1497,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) return inactive * inactive_ratio < active; } -#define mem_cgroup_from_res_counter(counter, member) \ +#define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) /** @@ -1504,12 +1509,23 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) */ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) { - unsigned long long margin; + unsigned long margin = 0; + unsigned long count; + unsigned long limit; - margin = res_counter_margin(&memcg->res); - if (do_swap_account) - margin = min(margin, res_counter_margin(&memcg->memsw)); - return margin >> PAGE_SHIFT; + count = page_counter_read(&memcg->memory); + limit = ACCESS_ONCE(memcg->memory.limit); + if (count < limit) + margin = limit - count; + + if (do_swap_account) { + count = page_counter_read(&memcg->memsw); + limit = ACCESS_ONCE(memcg->memsw.limit); + if (count <= limit) + margin = min(margin, limit - count); + } + + return margin; } int mem_cgroup_swappiness(struct mem_cgroup *memcg) @@ -1644,18 +1660,15 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) rcu_read_unlock(); - pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->res, RES_FAILCNT)); - pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); - pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); + pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->memory)), + K((u64)memcg->memory.limit), memcg->memory.failcnt); + pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->memsw)), + K((u64)memcg->memsw.limit), memcg->memsw.failcnt); + pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->kmem)), + K((u64)memcg->kmem.limit), memcg->kmem.failcnt); for_each_mem_cgroup_tree(iter, memcg) { pr_info("Memory cgroup stats for "); @@ -1695,28 +1708,17 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) /* * Return the memory (and swap, if configured) limit for a memcg. */ -static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) +static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) { - u64 limit; + unsigned long limit; - limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - - /* - * Do not consider swap space if we cannot swap due to swappiness - */ + limit = memcg->memory.limit; if (mem_cgroup_swappiness(memcg)) { - u64 memsw; + unsigned long memsw_limit; - limit += total_swap_pages << PAGE_SHIFT; - memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - - /* - * If memsw is finite and limits the amount of swap space - * available to this memcg, return that limit. - */ - limit = min(limit, memsw); + memsw_limit = memcg->memsw.limit; + limit = min(limit + total_swap_pages, memsw_limit); } - return limit; } @@ -1740,7 +1742,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, } check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); - totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; + totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; struct task_struct *task; @@ -1943,7 +1945,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, .priority = 0, }; - excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; + excess = soft_limit_excess(root_memcg); while (1) { victim = mem_cgroup_iter(root_memcg, victim, &reclaim); @@ -1974,7 +1976,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, zone, &nr_scanned); *total_scanned += nr_scanned; - if (!res_counter_soft_limit_excess(&root_memcg->res)) + if (!soft_limit_excess(root_memcg)) break; } mem_cgroup_iter_break(root_memcg, victim); @@ -2316,33 +2318,31 @@ static DEFINE_MUTEX(percpu_charge_mutex); static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { struct memcg_stock_pcp *stock; - bool ret = true; + bool ret = false; if (nr_pages > CHARGE_BATCH) - return false; + return ret; stock = &get_cpu_var(memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; - else /* need to call res_counter_charge */ - ret = false; + ret = true; + } put_cpu_var(memcg_stock); return ret; } /* - * Returns stocks cached in percpu to res_counter and reset cached information. + * Returns stocks cached in percpu and reset cached information. */ static void drain_stock(struct memcg_stock_pcp *stock) { struct mem_cgroup *old = stock->cached; if (stock->nr_pages) { - unsigned long bytes = stock->nr_pages * PAGE_SIZE; - - res_counter_uncharge(&old->res, bytes); + page_counter_uncharge(&old->memory, stock->nr_pages); if (do_swap_account) - res_counter_uncharge(&old->memsw, bytes); + page_counter_uncharge(&old->memsw, stock->nr_pages); stock->nr_pages = 0; } stock->cached = NULL; @@ -2371,7 +2371,7 @@ static void __init memcg_stock_init(void) } /* - * Cache charges(val) which is from res_counter, to local per_cpu area. + * Cache charges(val) to local per_cpu area. * This will be consumed by consume_stock() function, later. */ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) @@ -2431,8 +2431,7 @@ out: /* * Tries to drain stocked charges in other cpus. This function is asynchronous * and just put a work per cpu for draining localy on each cpu. Caller can - * expects some charges will be back to res_counter later but cannot wait for - * it. + * expects some charges will be back later but cannot wait for it. */ static void drain_all_stock_async(struct mem_cgroup *root_memcg) { @@ -2506,9 +2505,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int batch = max(CHARGE_BATCH, nr_pages); int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct mem_cgroup *mem_over_limit; - struct res_counter *fail_res; + struct page_counter *counter; unsigned long nr_reclaimed; - unsigned long long size; bool may_swap = true; bool drained = false; int ret = 0; @@ -2519,16 +2517,15 @@ retry: if (consume_stock(memcg, nr_pages)) goto done; - size = batch * PAGE_SIZE; if (!do_swap_account || - !res_counter_charge(&memcg->memsw, size, &fail_res)) { - if (!res_counter_charge(&memcg->res, size, &fail_res)) + !page_counter_try_charge(&memcg->memsw, batch, &counter)) { + if (!page_counter_try_charge(&memcg->memory, batch, &counter)) goto done_restock; if (do_swap_account) - res_counter_uncharge(&memcg->memsw, size); - mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); + page_counter_uncharge(&memcg->memsw, batch); + mem_over_limit = mem_cgroup_from_counter(counter, memory); } else { - mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); + mem_over_limit = mem_cgroup_from_counter(counter, memsw); may_swap = false; } @@ -2611,32 +2608,12 @@ done: static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) { - unsigned long bytes = nr_pages * PAGE_SIZE; - if (mem_cgroup_is_root(memcg)) return; - res_counter_uncharge(&memcg->res, bytes); + page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_uncharge(&memcg->memsw, bytes); -} - -/* - * Cancel chrages in this cgroup....doesn't propagate to parent cgroup. - * This is useful when moving usage to parent cgroup. - */ -static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, - unsigned int nr_pages) -{ - unsigned long bytes = nr_pages * PAGE_SIZE; - - if (mem_cgroup_is_root(memcg)) - return; - - res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); - if (do_swap_account) - res_counter_uncharge_until(&memcg->memsw, - memcg->memsw.parent, bytes); + page_counter_uncharge(&memcg->memsw, nr_pages); } /* @@ -2760,8 +2737,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, unlock_page_lru(page, isolated); } -static DEFINE_MUTEX(set_limit_mutex); - #ifdef CONFIG_MEMCG_KMEM /* * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or @@ -2804,16 +2779,17 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) } #endif -static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, + unsigned long nr_pages) { - struct res_counter *fail_res; + struct page_counter *counter; int ret = 0; - ret = res_counter_charge(&memcg->kmem, size, &fail_res); - if (ret) + ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter); + if (ret < 0) return ret; - ret = try_charge(memcg, gfp, size >> PAGE_SHIFT); + ret = try_charge(memcg, gfp, nr_pages); if (ret == -EINTR) { /* * try_charge() chose to bypass to root due to OOM kill or @@ -2830,25 +2806,25 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) * when the allocation triggers should have been already * directed to the root cgroup in memcontrol.h */ - res_counter_charge_nofail(&memcg->res, size, &fail_res); + page_counter_charge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_charge_nofail(&memcg->memsw, size, - &fail_res); + page_counter_charge(&memcg->memsw, nr_pages); ret = 0; } else if (ret) - res_counter_uncharge(&memcg->kmem, size); + page_counter_uncharge(&memcg->kmem, nr_pages); return ret; } -static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +static void memcg_uncharge_kmem(struct mem_cgroup *memcg, + unsigned long nr_pages) { - res_counter_uncharge(&memcg->res, size); + page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_uncharge(&memcg->memsw, size); + page_counter_uncharge(&memcg->memsw, nr_pages); /* Not down to 0 */ - if (res_counter_uncharge(&memcg->kmem, size)) + if (page_counter_uncharge(&memcg->kmem, nr_pages)) return; /* @@ -3124,19 +3100,21 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg, int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) { + unsigned int nr_pages = 1 << order; int res; - res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, - PAGE_SIZE << order); + res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages); if (!res) - atomic_add(1 << order, &cachep->memcg_params->nr_pages); + atomic_add(nr_pages, &cachep->memcg_params->nr_pages); return res; } void __memcg_uncharge_slab(struct kmem_cache *cachep, int order) { - memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order); - atomic_sub(1 << order, &cachep->memcg_params->nr_pages); + unsigned int nr_pages = 1 << order; + + memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages); + atomic_sub(nr_pages, &cachep->memcg_params->nr_pages); } /* @@ -3257,7 +3235,7 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) return true; } - ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); + ret = memcg_charge_kmem(memcg, gfp, 1 << order); if (!ret) *_memcg = memcg; @@ -3274,7 +3252,7 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, /* The page allocation failed. Revert */ if (!page) { - memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + memcg_uncharge_kmem(memcg, 1 << order); return; } /* @@ -3307,7 +3285,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) return; VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); - memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + memcg_uncharge_kmem(memcg, 1 << order); } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3485,8 +3463,12 @@ static int mem_cgroup_move_parent(struct page *page, ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent); - if (!ret) - __mem_cgroup_cancel_local_charge(child, nr_pages); + if (!ret) { + /* Take charge off the local counters */ + page_counter_cancel(&child->memory, nr_pages); + if (do_swap_account) + page_counter_cancel(&child->memsw, nr_pages); + } if (nr_pages > 1) compound_unlock_irqrestore(page, flags); @@ -3516,7 +3498,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, * * Returns 0 on success, -EINVAL on failure. * - * The caller must have charged to @to, IOW, called res_counter_charge() about + * The caller must have charged to @to, IOW, called page_counter_charge() about * both res and memsw, and called css_get(). */ static int mem_cgroup_move_swap_account(swp_entry_t entry, @@ -3532,7 +3514,7 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, mem_cgroup_swap_statistics(to, true); /* * This function is only called from task migration context now. - * It postpones res_counter and refcount handling till the end + * It postpones page_counter and refcount handling till the end * of task migration(mem_cgroup_clear_mc()) for performance * improvement. But we cannot postpone css_get(to) because if * the process that has been moved to @to does swap-in, the @@ -3590,60 +3572,57 @@ void mem_cgroup_print_bad_page(struct page *page) } #endif +static DEFINE_MUTEX(memcg_limit_mutex); + static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { + unsigned long curusage; + unsigned long oldusage; + bool enlarge = false; int retry_count; - int ret = 0; - int children = mem_cgroup_count_children(memcg); - u64 curusage, oldusage; - int enlarge; + int ret; /* * For keeping hierarchical_reclaim simple, how long we should retry * is depends on callers. We set our retry-count to be function * of # of children which we should visit in this loop. */ - retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; + retry_count = MEM_CGROUP_RECLAIM_RETRIES * + mem_cgroup_count_children(memcg); - oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); + oldusage = page_counter_read(&memcg->memory); - enlarge = 0; - while (retry_count) { + do { if (signal_pending(current)) { ret = -EINTR; break; } - /* - * Rather than hide all in some function, I do this in - * open coded manner. You see what this really does. - * We have to guarantee memcg->res.limit <= memcg->memsw.limit. - */ - mutex_lock(&set_limit_mutex); - if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) { + + mutex_lock(&memcg_limit_mutex); + if (limit > memcg->memsw.limit) { + mutex_unlock(&memcg_limit_mutex); ret = -EINVAL; - mutex_unlock(&set_limit_mutex); break; } - - if (res_counter_read_u64(&memcg->res, RES_LIMIT) < val) - enlarge = 1; - - ret = res_counter_set_limit(&memcg->res, val); - mutex_unlock(&set_limit_mutex); + if (limit > memcg->memory.limit) + enlarge = true; + ret = page_counter_limit(&memcg->memory, limit); + mutex_unlock(&memcg_limit_mutex); if (!ret) break; try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); - curusage = res_counter_read_u64(&memcg->res, RES_USAGE); + curusage = page_counter_read(&memcg->memory); /* Usage is reduced ? */ if (curusage >= oldusage) retry_count--; else oldusage = curusage; - } + } while (retry_count); + if (!ret && enlarge) memcg_oom_recover(memcg); @@ -3651,52 +3630,53 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, } static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { + unsigned long curusage; + unsigned long oldusage; + bool enlarge = false; int retry_count; - u64 oldusage, curusage; - int children = mem_cgroup_count_children(memcg); - int ret = -EBUSY; - int enlarge = 0; + int ret; /* see mem_cgroup_resize_res_limit */ - retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; - oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - while (retry_count) { + retry_count = MEM_CGROUP_RECLAIM_RETRIES * + mem_cgroup_count_children(memcg); + + oldusage = page_counter_read(&memcg->memsw); + + do { if (signal_pending(current)) { ret = -EINTR; break; } - /* - * Rather than hide all in some function, I do this in - * open coded manner. You see what this really does. - * We have to guarantee memcg->res.limit <= memcg->memsw.limit. - */ - mutex_lock(&set_limit_mutex); - if (res_counter_read_u64(&memcg->res, RES_LIMIT) > val) { + + mutex_lock(&memcg_limit_mutex); + if (limit < memcg->memory.limit) { + mutex_unlock(&memcg_limit_mutex); ret = -EINVAL; - mutex_unlock(&set_limit_mutex); break; } - if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) - enlarge = 1; - ret = res_counter_set_limit(&memcg->memsw, val); - mutex_unlock(&set_limit_mutex); + if (limit > memcg->memsw.limit) + enlarge = true; + ret = page_counter_limit(&memcg->memsw, limit); + mutex_unlock(&memcg_limit_mutex); if (!ret) break; try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); - curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + curusage = page_counter_read(&memcg->memsw); /* Usage is reduced ? */ if (curusage >= oldusage) retry_count--; else oldusage = curusage; - } + } while (retry_count); + if (!ret && enlarge) memcg_oom_recover(memcg); + return ret; } @@ -3709,7 +3689,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long reclaimed; int loop = 0; struct mem_cgroup_tree_per_zone *mctz; - unsigned long long excess; + unsigned long excess; unsigned long nr_scanned; if (order > 0) @@ -3763,7 +3743,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, } while (1); } __mem_cgroup_remove_exceeded(mz, mctz); - excess = res_counter_soft_limit_excess(&mz->memcg->res); + excess = soft_limit_excess(mz->memcg); /* * One school of thought says that we should not add * back the node to the tree if reclaim returns 0. @@ -3856,7 +3836,6 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) { int node, zid; - u64 usage; do { /* This is for making all *used* pages to be on LRU. */ @@ -3888,9 +3867,8 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) * right after the check. RES_USAGE should be safe as we always * charge before adding to the LRU. */ - usage = res_counter_read_u64(&memcg->res, RES_USAGE) - - res_counter_read_u64(&memcg->kmem, RES_USAGE); - } while (usage > 0); + } while (page_counter_read(&memcg->memory) - + page_counter_read(&memcg->kmem) > 0); } /* @@ -3930,7 +3908,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) /* we call try-to-free pages for make this cgroup empty */ lru_add_drain_all(); /* try to free all pages in this cgroup */ - while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { + while (nr_retries && page_counter_read(&memcg->memory)) { int progress; if (signal_pending(current)) @@ -4001,8 +3979,8 @@ out: return retval; } -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long tree_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; long val = 0; @@ -4020,55 +3998,72 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) { u64 val; - if (!mem_cgroup_is_root(memcg)) { + if (mem_cgroup_is_root(memcg)) { + val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); + val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); + if (swap) + val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); + } else { if (!swap) - return res_counter_read_u64(&memcg->res, RES_USAGE); + val = page_counter_read(&memcg->memory); else - return res_counter_read_u64(&memcg->memsw, RES_USAGE); + val = page_counter_read(&memcg->memsw); } - - /* - * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS - * as well as in MEM_CGROUP_STAT_RSS_HUGE. - */ - val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); - val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); - - if (swap) - val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); - return val << PAGE_SHIFT; } +enum { + RES_USAGE, + RES_LIMIT, + RES_MAX_USAGE, + RES_FAILCNT, + RES_SOFT_LIMIT, +}; static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - enum res_type type = MEMFILE_TYPE(cft->private); - int name = MEMFILE_ATTR(cft->private); + struct page_counter *counter; - switch (type) { + switch (MEMFILE_TYPE(cft->private)) { case _MEM: - if (name == RES_USAGE) - return mem_cgroup_usage(memcg, false); - return res_counter_read_u64(&memcg->res, name); + counter = &memcg->memory; + break; case _MEMSWAP: - if (name == RES_USAGE) - return mem_cgroup_usage(memcg, true); - return res_counter_read_u64(&memcg->memsw, name); + counter = &memcg->memsw; + break; case _KMEM: - return res_counter_read_u64(&memcg->kmem, name); + counter = &memcg->kmem; break; default: BUG(); } + + switch (MEMFILE_ATTR(cft->private)) { + case RES_USAGE: + if (counter == &memcg->memory) + return mem_cgroup_usage(memcg, false); + if (counter == &memcg->memsw) + return mem_cgroup_usage(memcg, true); + return (u64)page_counter_read(counter) * PAGE_SIZE; + case RES_LIMIT: + return (u64)counter->limit * PAGE_SIZE; + case RES_MAX_USAGE: + return (u64)counter->watermark * PAGE_SIZE; + case RES_FAILCNT: + return counter->failcnt; + case RES_SOFT_LIMIT: + return (u64)memcg->soft_limit * PAGE_SIZE; + default: + BUG(); + } } #ifdef CONFIG_MEMCG_KMEM /* should be called with activate_kmem_mutex held */ static int __memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long long limit) + unsigned long nr_pages) { int err = 0; int memcg_id; @@ -4115,7 +4110,7 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, * We couldn't have accounted to this cgroup, because it hasn't got the * active bit set yet, so this should succeed. */ - err = res_counter_set_limit(&memcg->kmem, limit); + err = page_counter_limit(&memcg->kmem, nr_pages); VM_BUG_ON(err); static_key_slow_inc(&memcg_kmem_enabled_key); @@ -4131,25 +4126,27 @@ out: } static int memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long long limit) + unsigned long nr_pages) { int ret; mutex_lock(&activate_kmem_mutex); - ret = __memcg_activate_kmem(memcg, limit); + ret = __memcg_activate_kmem(memcg, nr_pages); mutex_unlock(&activate_kmem_mutex); return ret; } static int memcg_update_kmem_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { int ret; + mutex_lock(&memcg_limit_mutex); if (!memcg_kmem_is_active(memcg)) - ret = memcg_activate_kmem(memcg, val); + ret = memcg_activate_kmem(memcg, limit); else - ret = res_counter_set_limit(&memcg->kmem, val); + ret = page_counter_limit(&memcg->kmem, limit); + mutex_unlock(&memcg_limit_mutex); return ret; } @@ -4167,13 +4164,13 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) * after this point, because it has at least one child already. */ if (memcg_kmem_is_active(parent)) - ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); + ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); mutex_unlock(&activate_kmem_mutex); return ret; } #else static int memcg_update_kmem_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { return -EINVAL; } @@ -4187,110 +4184,69 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - enum res_type type; - int name; - unsigned long long val; + unsigned long nr_pages; int ret; buf = strstrip(buf); - type = MEMFILE_TYPE(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); + ret = page_counter_memparse(buf, &nr_pages); + if (ret) + return ret; - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_LIMIT: if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ ret = -EINVAL; break; } - /* This function does all necessary parse...reuse it */ - ret = res_counter_memparse_write_strategy(buf, &val); - if (ret) + switch (MEMFILE_TYPE(of_cft(of)->private)) { + case _MEM: + ret = mem_cgroup_resize_limit(memcg, nr_pages); break; - if (type == _MEM) - ret = mem_cgroup_resize_limit(memcg, val); - else if (type == _MEMSWAP) - ret = mem_cgroup_resize_memsw_limit(memcg, val); - else if (type == _KMEM) - ret = memcg_update_kmem_limit(memcg, val); - else - return -EINVAL; - break; - case RES_SOFT_LIMIT: - ret = res_counter_memparse_write_strategy(buf, &val); - if (ret) + case _MEMSWAP: + ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); break; - /* - * For memsw, soft limits are hard to implement in terms - * of semantics, for now, we support soft limits for - * control without swap - */ - if (type == _MEM) - ret = res_counter_set_soft_limit(&memcg->res, val); - else - ret = -EINVAL; + case _KMEM: + ret = memcg_update_kmem_limit(memcg, nr_pages); + break; + } break; - default: - ret = -EINVAL; /* should be BUG() ? */ + case RES_SOFT_LIMIT: + memcg->soft_limit = nr_pages; + ret = 0; break; } return ret ?: nbytes; } -static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, - unsigned long long *mem_limit, unsigned long long *memsw_limit) -{ - unsigned long long min_limit, min_memsw_limit, tmp; - - min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - if (!memcg->use_hierarchy) - goto out; - - while (memcg->css.parent) { - memcg = mem_cgroup_from_css(memcg->css.parent); - if (!memcg->use_hierarchy) - break; - tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); - min_limit = min(min_limit, tmp); - tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - min_memsw_limit = min(min_memsw_limit, tmp); - } -out: - *mem_limit = min_limit; - *memsw_limit = min_memsw_limit; -} - static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - int name; - enum res_type type; + struct page_counter *counter; - type = MEMFILE_TYPE(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); + switch (MEMFILE_TYPE(of_cft(of)->private)) { + case _MEM: + counter = &memcg->memory; + break; + case _MEMSWAP: + counter = &memcg->memsw; + break; + case _KMEM: + counter = &memcg->kmem; + break; + default: + BUG(); + } - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_MAX_USAGE: - if (type == _MEM) - res_counter_reset_max(&memcg->res); - else if (type == _MEMSWAP) - res_counter_reset_max(&memcg->memsw); - else if (type == _KMEM) - res_counter_reset_max(&memcg->kmem); - else - return -EINVAL; + page_counter_reset_watermark(counter); break; case RES_FAILCNT: - if (type == _MEM) - res_counter_reset_failcnt(&memcg->res); - else if (type == _MEMSWAP) - res_counter_reset_failcnt(&memcg->memsw); - else if (type == _KMEM) - res_counter_reset_failcnt(&memcg->kmem); - else - return -EINVAL; + counter->failcnt = 0; break; + default: + BUG(); } return nbytes; @@ -4387,6 +4343,7 @@ static inline void mem_cgroup_lru_names_not_uptodate(void) static int memcg_stat_show(struct seq_file *m, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long memory, memsw; struct mem_cgroup *mi; unsigned int i; @@ -4406,14 +4363,16 @@ static int memcg_stat_show(struct seq_file *m, void *v) mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); /* Hierarchical information */ - { - unsigned long long limit, memsw_limit; - memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); - seq_printf(m, "hierarchical_memory_limit %llu\n", limit); - if (do_swap_account) - seq_printf(m, "hierarchical_memsw_limit %llu\n", - memsw_limit); + memory = memsw = PAGE_COUNTER_MAX; + for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { + memory = min(memory, mi->memory.limit); + memsw = min(memsw, mi->memsw.limit); } + seq_printf(m, "hierarchical_memory_limit %llu\n", + (u64)memory * PAGE_SIZE); + if (do_swap_account) + seq_printf(m, "hierarchical_memsw_limit %llu\n", + (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { long long val = 0; @@ -4497,7 +4456,7 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; - u64 usage; + unsigned long usage; int i; rcu_read_lock(); @@ -4596,10 +4555,11 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, { struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - u64 threshold, usage; + unsigned long threshold; + unsigned long usage; int i, size, ret; - ret = res_counter_memparse_write_strategy(args, &threshold); + ret = page_counter_memparse(args, &threshold); if (ret) return ret; @@ -4689,7 +4649,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, { struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - u64 usage; + unsigned long usage; int i, j, size; mutex_lock(&memcg->thresholds_lock); @@ -4883,7 +4843,7 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) memcg_kmem_mark_dead(memcg); - if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) + if (page_counter_read(&memcg->kmem)) return; if (memcg_kmem_test_and_clear_dead(memcg)) @@ -5363,9 +5323,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) */ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { - if (!memcg->res.parent) + if (!memcg->memory.parent) return NULL; - return mem_cgroup_from_res_counter(memcg->res.parent, res); + return mem_cgroup_from_counter(memcg->memory.parent, memory); } EXPORT_SYMBOL(parent_mem_cgroup); @@ -5410,9 +5370,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) /* root ? */ if (parent_css == NULL) { root_mem_cgroup = memcg; - res_counter_init(&memcg->res, NULL); - res_counter_init(&memcg->memsw, NULL); - res_counter_init(&memcg->kmem, NULL); + page_counter_init(&memcg->memory, NULL); + page_counter_init(&memcg->memsw, NULL); + page_counter_init(&memcg->kmem, NULL); } memcg->last_scanned_node = MAX_NUMNODES; @@ -5451,18 +5411,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) memcg->swappiness = mem_cgroup_swappiness(parent); if (parent->use_hierarchy) { - res_counter_init(&memcg->res, &parent->res); - res_counter_init(&memcg->memsw, &parent->memsw); - res_counter_init(&memcg->kmem, &parent->kmem); + page_counter_init(&memcg->memory, &parent->memory); + page_counter_init(&memcg->memsw, &parent->memsw); + page_counter_init(&memcg->kmem, &parent->kmem); /* * No need to take a reference to the parent because cgroup * core guarantees its existence. */ } else { - res_counter_init(&memcg->res, NULL); - res_counter_init(&memcg->memsw, NULL); - res_counter_init(&memcg->kmem, NULL); + page_counter_init(&memcg->memory, NULL); + page_counter_init(&memcg->memsw, NULL); + page_counter_init(&memcg->kmem, NULL); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this @@ -5544,7 +5504,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) /* * XXX: css_offline() would be where we should reparent all * memory to prepare the cgroup for destruction. However, - * memcg does not do css_tryget_online() and res_counter charging + * memcg does not do css_tryget_online() and page_counter charging * under the same RCU lock region, which means that charging * could race with offlining. Offlining only happens to * cgroups with no tasks in them but charges can show up @@ -5564,7 +5524,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) * call_rcu() * offline_css() * reparent_charges() - * res_counter_charge() + * page_counter_try_charge() * css_put() * css_free() * pc->mem_cgroup = dead memcg @@ -5599,10 +5559,10 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - mem_cgroup_resize_limit(memcg, ULLONG_MAX); - mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX); - memcg_update_kmem_limit(memcg, ULLONG_MAX); - res_counter_set_soft_limit(&memcg->res, ULLONG_MAX); + mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); + mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); + memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); + memcg->soft_limit = 0; } #ifdef CONFIG_MMU @@ -5916,19 +5876,18 @@ static void __mem_cgroup_clear_mc(void) if (mc.moved_swap) { /* uncharge swap account from the old cgroup */ if (!mem_cgroup_is_root(mc.from)) - res_counter_uncharge(&mc.from->memsw, - PAGE_SIZE * mc.moved_swap); - - for (i = 0; i < mc.moved_swap; i++) - css_put(&mc.from->css); + page_counter_uncharge(&mc.from->memsw, mc.moved_swap); /* - * we charged both to->res and to->memsw, so we should - * uncharge to->res. + * we charged both to->memory and to->memsw, so we + * should uncharge to->memory. */ if (!mem_cgroup_is_root(mc.to)) - res_counter_uncharge(&mc.to->res, - PAGE_SIZE * mc.moved_swap); + page_counter_uncharge(&mc.to->memory, mc.moved_swap); + + for (i = 0; i < mc.moved_swap; i++) + css_put(&mc.from->css); + /* we've already done css_get(mc.to) */ mc.moved_swap = 0; } @@ -6294,7 +6253,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) memcg = mem_cgroup_lookup(id); if (memcg) { if (!mem_cgroup_is_root(memcg)) - res_counter_uncharge(&memcg->memsw, PAGE_SIZE); + page_counter_uncharge(&memcg->memsw, 1); mem_cgroup_swap_statistics(memcg, false); css_put(&memcg->css); } @@ -6460,11 +6419,9 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, if (!mem_cgroup_is_root(memcg)) { if (nr_mem) - res_counter_uncharge(&memcg->res, - nr_mem * PAGE_SIZE); + page_counter_uncharge(&memcg->memory, nr_mem); if (nr_memsw) - res_counter_uncharge(&memcg->memsw, - nr_memsw * PAGE_SIZE); + page_counter_uncharge(&memcg->memsw, nr_memsw); memcg_oom_recover(memcg); } -- cgit v1.2.3-70-g09d2 From 5ac8fb31ad2ebd6492d1c5e8f31846b532f03945 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:39 -0800 Subject: mm: memcontrol: convert reclaim iterator to simple css refcounting The memcg reclaim iterators use a complicated weak reference scheme to prevent pinning cgroups indefinitely in the absence of memory pressure. However, during the ongoing cgroup core rework, css lifetime has been decoupled such that a pinned css no longer interferes with removal of the user-visible cgroup, and all this complexity is now unnecessary. [mhocko@suse.cz: ensure that the cached reference is always released] Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 258 ++++++++++++++++++-------------------------------------- 1 file changed, 84 insertions(+), 174 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4129ad74e93b..c3cd3bb77dd9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -143,14 +143,8 @@ struct mem_cgroup_stat_cpu { unsigned long targets[MEM_CGROUP_NTARGETS]; }; -struct mem_cgroup_reclaim_iter { - /* - * last scanned hierarchy member. Valid only if last_dead_count - * matches memcg->dead_count of the hierarchy root group. - */ - struct mem_cgroup *last_visited; - int last_dead_count; - +struct reclaim_iter { + struct mem_cgroup *position; /* scan generation, increased every round-trip */ unsigned int generation; }; @@ -162,7 +156,7 @@ struct mem_cgroup_per_zone { struct lruvec lruvec; unsigned long lru_size[NR_LRU_LISTS]; - struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; + struct reclaim_iter iter[DEF_PRIORITY + 1]; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -346,7 +340,6 @@ struct mem_cgroup { struct mem_cgroup_stat_cpu nocpu_base; spinlock_t pcp_counter_lock; - atomic_t dead_count; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; #endif @@ -1067,122 +1060,6 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return memcg; } -/* - * Returns a next (in a pre-order walk) alive memcg (with elevated css - * ref. count) or NULL if the whole root's subtree has been visited. - * - * helper function to be used by mem_cgroup_iter - */ -static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, - struct mem_cgroup *last_visited) -{ - struct cgroup_subsys_state *prev_css, *next_css; - - prev_css = last_visited ? &last_visited->css : NULL; -skip_node: - next_css = css_next_descendant_pre(prev_css, &root->css); - - /* - * Even if we found a group we have to make sure it is - * alive. css && !memcg means that the groups should be - * skipped and we should continue the tree walk. - * last_visited css is safe to use because it is - * protected by css_get and the tree walk is rcu safe. - * - * We do not take a reference on the root of the tree walk - * because we might race with the root removal when it would - * be the only node in the iterated hierarchy and mem_cgroup_iter - * would end up in an endless loop because it expects that at - * least one valid node will be returned. Root cannot disappear - * because caller of the iterator should hold it already so - * skipping css reference should be safe. - */ - if (next_css) { - struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); - - if (next_css == &root->css) - return memcg; - - if (css_tryget_online(next_css)) { - /* - * Make sure the memcg is initialized: - * mem_cgroup_css_online() orders the the - * initialization against setting the flag. - */ - if (smp_load_acquire(&memcg->initialized)) - return memcg; - css_put(next_css); - } - - prev_css = next_css; - goto skip_node; - } - - return NULL; -} - -static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) -{ - /* - * When a group in the hierarchy below root is destroyed, the - * hierarchy iterator can no longer be trusted since it might - * have pointed to the destroyed group. Invalidate it. - */ - atomic_inc(&root->dead_count); -} - -static struct mem_cgroup * -mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, - struct mem_cgroup *root, - int *sequence) -{ - struct mem_cgroup *position = NULL; - /* - * A cgroup destruction happens in two stages: offlining and - * release. They are separated by a RCU grace period. - * - * If the iterator is valid, we may still race with an - * offlining. The RCU lock ensures the object won't be - * released, tryget will fail if we lost the race. - */ - *sequence = atomic_read(&root->dead_count); - if (iter->last_dead_count == *sequence) { - smp_rmb(); - position = iter->last_visited; - - /* - * We cannot take a reference to root because we might race - * with root removal and returning NULL would end up in - * an endless loop on the iterator user level when root - * would be returned all the time. - */ - if (position && position != root && - !css_tryget_online(&position->css)) - position = NULL; - } - return position; -} - -static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, - struct mem_cgroup *last_visited, - struct mem_cgroup *new_position, - struct mem_cgroup *root, - int sequence) -{ - /* root reference counting symmetric to mem_cgroup_iter_load */ - if (last_visited && last_visited != root) - css_put(&last_visited->css); - /* - * We store the sequence count from the time @last_visited was - * loaded successfully instead of rereading it here so that we - * don't lose destruction events in between. We could have - * raced with the destruction of @new_position after all. - */ - iter->last_visited = new_position; - smp_wmb(); - iter->last_dead_count = sequence; -} - /** * mem_cgroup_iter - iterate over memory cgroup hierarchy * @root: hierarchy root @@ -1204,8 +1081,10 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { + struct reclaim_iter *uninitialized_var(iter); + struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; - struct mem_cgroup *last_visited = NULL; + struct mem_cgroup *pos = NULL; if (mem_cgroup_disabled()) return NULL; @@ -1214,50 +1093,101 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, root = root_mem_cgroup; if (prev && !reclaim) - last_visited = prev; + pos = prev; if (!root->use_hierarchy && root != root_mem_cgroup) { if (prev) - goto out_css_put; + goto out; return root; } rcu_read_lock(); - while (!memcg) { - struct mem_cgroup_reclaim_iter *uninitialized_var(iter); - int uninitialized_var(seq); - - if (reclaim) { - struct mem_cgroup_per_zone *mz; - - mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); - iter = &mz->reclaim_iter[reclaim->priority]; - if (prev && reclaim->generation != iter->generation) { - iter->last_visited = NULL; - goto out_unlock; - } - last_visited = mem_cgroup_iter_load(iter, root, &seq); + if (reclaim) { + struct mem_cgroup_per_zone *mz; + + mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); + iter = &mz->iter[reclaim->priority]; + + if (prev && reclaim->generation != iter->generation) + goto out_unlock; + + do { + pos = ACCESS_ONCE(iter->position); + /* + * A racing update may change the position and + * put the last reference, hence css_tryget(), + * or retry to see the updated position. + */ + } while (pos && !css_tryget(&pos->css)); + } + + if (pos) + css = &pos->css; + + for (;;) { + css = css_next_descendant_pre(css, &root->css); + if (!css) { + /* + * Reclaimers share the hierarchy walk, and a + * new one might jump in right at the end of + * the hierarchy - make sure they see at least + * one group and restart from the beginning. + */ + if (!prev) + continue; + break; } - memcg = __mem_cgroup_iter_next(root, last_visited); + /* + * Verify the css and acquire a reference. The root + * is provided by the caller, so we know it's alive + * and kicking, and don't take an extra reference. + */ + memcg = mem_cgroup_from_css(css); - if (reclaim) { - mem_cgroup_iter_update(iter, last_visited, memcg, root, - seq); + if (css == &root->css) + break; - if (!memcg) - iter->generation++; - else if (!prev && memcg) - reclaim->generation = iter->generation; + if (css_tryget_online(css)) { + /* + * Make sure the memcg is initialized: + * mem_cgroup_css_online() orders the the + * initialization against setting the flag. + */ + if (smp_load_acquire(&memcg->initialized)) + break; + + css_put(css); } - if (prev && !memcg) - goto out_unlock; + memcg = NULL; + } + + if (reclaim) { + if (cmpxchg(&iter->position, pos, memcg) == pos) { + if (memcg) + css_get(&memcg->css); + if (pos) + css_put(&pos->css); + } + + /* + * pairs with css_tryget when dereferencing iter->position + * above. + */ + if (pos) + css_put(&pos->css); + + if (!memcg) + iter->generation++; + else if (!prev) + reclaim->generation = iter->generation; } + out_unlock: rcu_read_unlock(); -out_css_put: +out: if (prev && prev != root) css_put(&prev->css); @@ -5447,24 +5377,6 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) return 0; } -/* - * Announce all parents that a group from their hierarchy is gone. - */ -static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) -{ - struct mem_cgroup *parent = memcg; - - while ((parent = parent_mem_cgroup(parent))) - mem_cgroup_iter_invalidate(parent); - - /* - * if the root memcg is not hierarchical we have to check it - * explicitely. - */ - if (!root_mem_cgroup->use_hierarchy) - mem_cgroup_iter_invalidate(root_mem_cgroup); -} - static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); @@ -5485,8 +5397,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) kmem_cgroup_css_offline(memcg); - mem_cgroup_invalidate_reclaim_iterators(memcg); - /* * This requires that offlining is serialized. Right now that is * guaranteed because css_killed_work_fn() holds the cgroup_mutex. -- cgit v1.2.3-70-g09d2 From e8ea14cc6eadfe2ea63e9989e16e62625a2619f8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:42 -0800 Subject: mm: memcontrol: take a css reference for each charged page Charges currently pin the css indirectly by playing tricks during css_offline(): user pages stall the offlining process until all of them have been reparented, whereas kmemcg acquires a keep-alive reference if outstanding kernel pages are detected at that point. In preparation for removing all this complexity, make the pinning explicit and acquire a css references for every charged page. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cgroup.h | 26 +++++++++++++++++++++++ include/linux/percpu-refcount.h | 47 +++++++++++++++++++++++++++++++++-------- mm/memcontrol.c | 21 ++++++++++++++---- 3 files changed, 81 insertions(+), 13 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..9f96b25965c2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -112,6 +112,19 @@ static inline void css_get(struct cgroup_subsys_state *css) percpu_ref_get(&css->refcnt); } +/** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + /** * css_tryget - try to obtain a reference on the specified css * @css: target css @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..530b249f7ea4 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -147,27 +147,41 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } +/** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + /** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get @@ -231,28 +245,43 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } +/** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + /** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c3cd3bb77dd9..f69da2ac6323 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2273,6 +2273,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) page_counter_uncharge(&old->memory, stock->nr_pages); if (do_swap_account) page_counter_uncharge(&old->memsw, stock->nr_pages); + css_put_many(&old->css, stock->nr_pages); stock->nr_pages = 0; } stock->cached = NULL; @@ -2530,6 +2531,7 @@ bypass: return -EINTR; done_restock: + css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); done: @@ -2544,6 +2546,8 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) page_counter_uncharge(&memcg->memsw, nr_pages); + + css_put_many(&memcg->css, nr_pages); } /* @@ -2739,6 +2743,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, page_counter_charge(&memcg->memory, nr_pages); if (do_swap_account) page_counter_charge(&memcg->memsw, nr_pages); + css_get_many(&memcg->css, nr_pages); ret = 0; } else if (ret) page_counter_uncharge(&memcg->kmem, nr_pages); @@ -2754,8 +2759,10 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, page_counter_uncharge(&memcg->memsw, nr_pages); /* Not down to 0 */ - if (page_counter_uncharge(&memcg->kmem, nr_pages)) + if (page_counter_uncharge(&memcg->kmem, nr_pages)) { + css_put_many(&memcg->css, nr_pages); return; + } /* * Releases a reference taken in kmem_cgroup_css_offline in case @@ -2767,6 +2774,8 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, */ if (memcg_kmem_test_and_clear_dead(memcg)) css_put(&memcg->css); + + css_put_many(&memcg->css, nr_pages); } /* @@ -3394,10 +3403,13 @@ static int mem_cgroup_move_parent(struct page *page, ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent); if (!ret) { + if (!mem_cgroup_is_root(parent)) + css_get_many(&parent->css, nr_pages); /* Take charge off the local counters */ page_counter_cancel(&child->memory, nr_pages); if (do_swap_account) page_counter_cancel(&child->memsw, nr_pages); + css_put_many(&child->css, nr_pages); } if (nr_pages > 1) @@ -5767,7 +5779,6 @@ static void __mem_cgroup_clear_mc(void) { struct mem_cgroup *from = mc.from; struct mem_cgroup *to = mc.to; - int i; /* we must uncharge all the leftover precharges from mc.to */ if (mc.precharge) { @@ -5795,8 +5806,7 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - for (i = 0; i < mc.moved_swap; i++) - css_put(&mc.from->css); + css_put_many(&mc.from->css, mc.moved_swap); /* we've already done css_get(mc.to) */ mc.moved_swap = 0; @@ -6343,6 +6353,9 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); memcg_check_events(memcg, dummy_page); local_irq_restore(flags); + + if (!mem_cgroup_is_root(memcg)) + css_put_many(&memcg->css, max(nr_mem, nr_memsw)); } static void uncharge_list(struct list_head *page_list) -- cgit v1.2.3-70-g09d2 From 64f2199389414341ed3a570663f23616c131ba25 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:45 -0800 Subject: mm: memcontrol: remove obsolete kmemcg pinning tricks As charges now pin the css explicitely, there is no more need for kmemcg to acquire a proxy reference for outstanding pages during offlining, or maintain state to identify such "dead" groups. This was the last user of the uncharge functions' return values, so remove them as well. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_counter.h | 4 +-- mm/memcontrol.c | 74 +------------------------------------------- mm/page_counter.c | 23 +++----------- 3 files changed, 7 insertions(+), 94 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 7cce3be99ff3..955421575d16 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -34,12 +34,12 @@ static inline unsigned long page_counter_read(struct page_counter *counter) return atomic_long_read(&counter->count); } -int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); int page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); -int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, unsigned long *nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f69da2ac6323..0e6484ea268d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -369,7 +369,6 @@ struct mem_cgroup { /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ - KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ }; #ifdef CONFIG_MEMCG_KMEM @@ -383,22 +382,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } -static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) -{ - /* - * Our caller must use css_get() first, because memcg_uncharge_kmem() - * will call css_put() if it sees the memcg is dead. - */ - smp_wmb(); - if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) - set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); -} - -static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg) -{ - return test_and_clear_bit(KMEM_ACCOUNTED_DEAD, - &memcg->kmem_account_flags); -} #endif /* Stuffs for move charges at task migration. */ @@ -2758,22 +2741,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, if (do_swap_account) page_counter_uncharge(&memcg->memsw, nr_pages); - /* Not down to 0 */ - if (page_counter_uncharge(&memcg->kmem, nr_pages)) { - css_put_many(&memcg->css, nr_pages); - return; - } - - /* - * Releases a reference taken in kmem_cgroup_css_offline in case - * this last uncharge is racing with the offlining code or it is - * outliving the memcg existence. - * - * The memory barrier imposed by test&clear is paired with the - * explicit one in memcg_kmem_mark_dead(). - */ - if (memcg_kmem_test_and_clear_dead(memcg)) - css_put(&memcg->css); + page_counter_uncharge(&memcg->kmem, nr_pages); css_put_many(&memcg->css, nr_pages); } @@ -4757,40 +4725,6 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg) { mem_cgroup_sockets_destroy(memcg); } - -static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) -{ - if (!memcg_kmem_is_active(memcg)) - return; - - /* - * kmem charges can outlive the cgroup. In the case of slab - * pages, for instance, a page contain objects from various - * processes. As we prevent from taking a reference for every - * such allocation we have to be careful when doing uncharge - * (see memcg_uncharge_kmem) and here during offlining. - * - * The idea is that that only the _last_ uncharge which sees - * the dead memcg will drop the last reference. An additional - * reference is taken here before the group is marked dead - * which is then paired with css_put during uncharge resp. here. - * - * Although this might sound strange as this path is called from - * css_offline() when the referencemight have dropped down to 0 and - * shouldn't be incremented anymore (css_tryget_online() would - * fail) we do not have other options because of the kmem - * allocations lifetime. - */ - css_get(&memcg->css); - - memcg_kmem_mark_dead(memcg); - - if (page_counter_read(&memcg->kmem)) - return; - - if (memcg_kmem_test_and_clear_dead(memcg)) - css_put(&memcg->css); -} #else static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { @@ -4800,10 +4734,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) static void memcg_destroy_kmem(struct mem_cgroup *memcg) { } - -static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) -{ -} #endif /* @@ -5407,8 +5337,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) } spin_unlock(&memcg->event_list_lock); - kmem_cgroup_css_offline(memcg); - /* * This requires that offlining is serialized. Right now that is * guaranteed because css_killed_work_fn() holds the cgroup_mutex. diff --git a/mm/page_counter.c b/mm/page_counter.c index f0cbc0825426..a009574fbba9 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -16,19 +16,14 @@ * page_counter_cancel - take pages out of the local counter * @counter: counter * @nr_pages: number of pages to cancel - * - * Returns whether there are remaining pages in the counter. */ -int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) { long new; new = atomic_long_sub_return(nr_pages, &counter->count); - /* More uncharges than charges? */ WARN_ON_ONCE(new < 0); - - return new > 0; } /** @@ -117,23 +112,13 @@ failed: * page_counter_uncharge - hierarchically uncharge pages * @counter: counter * @nr_pages: number of pages to uncharge - * - * Returns whether there are remaining charges in @counter. */ -int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; - int ret = 1; - for (c = counter; c; c = c->parent) { - int remainder; - - remainder = page_counter_cancel(c, nr_pages); - if (c == counter && !remainder) - ret = 0; - } - - return ret; + for (c = counter; c; c = c->parent) + page_counter_cancel(c, nr_pages); } /** -- cgit v1.2.3-70-g09d2 From b2052564e66da2f0551d34a09488411919cfa14d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:48 -0800 Subject: mm: memcontrol: continue cache reclaim from offlined groups On cgroup deletion, outstanding page cache charges are moved to the parent group so that they're not lost and can be reclaimed during pressure on/inside said parent. But this reparenting is fairly tricky and its synchroneous nature has led to several lock-ups in the past. Since c2931b70a32c ("cgroup: iterate cgroup_subsys_states directly") css iterators now also include offlined css, so memcg iterators can be changed to include offlined children during reclaim of a group, and leftover cache can just stay put. There is a slight change of behavior in that charges of deleted groups no longer show up as local charges in the parent. But they are still included in the parent's hierarchical statistics. Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 218 +------------------------------------------------------- 1 file changed, 1 insertion(+), 217 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0e6484ea268d..f90e43c1499f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1132,7 +1132,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, if (css == &root->css) break; - if (css_tryget_online(css)) { + if (css_tryget(css)) { /* * Make sure the memcg is initialized: * mem_cgroup_css_online() orders the the @@ -3316,79 +3316,6 @@ out: return ret; } -/** - * mem_cgroup_move_parent - moves page to the parent group - * @page: the page to move - * @pc: page_cgroup of the page - * @child: page's cgroup - * - * move charges to its parent or the root cgroup if the group has no - * parent (aka use_hierarchy==0). - * Although this might fail (get_page_unless_zero, isolate_lru_page or - * mem_cgroup_move_account fails) the failure is always temporary and - * it signals a race with a page removal/uncharge or migration. In the - * first case the page is on the way out and it will vanish from the LRU - * on the next attempt and the call should be retried later. - * Isolation from the LRU fails only if page has been isolated from - * the LRU since we looked at it and that usually means either global - * reclaim or migration going on. The page will either get back to the - * LRU or vanish. - * Finaly mem_cgroup_move_account fails only if the page got uncharged - * (!PageCgroupUsed) or moved to a different group. The page will - * disappear in the next attempt. - */ -static int mem_cgroup_move_parent(struct page *page, - struct page_cgroup *pc, - struct mem_cgroup *child) -{ - struct mem_cgroup *parent; - unsigned int nr_pages; - unsigned long uninitialized_var(flags); - int ret; - - VM_BUG_ON(mem_cgroup_is_root(child)); - - ret = -EBUSY; - if (!get_page_unless_zero(page)) - goto out; - if (isolate_lru_page(page)) - goto put; - - nr_pages = hpage_nr_pages(page); - - parent = parent_mem_cgroup(child); - /* - * If no parent, move charges to root cgroup. - */ - if (!parent) - parent = root_mem_cgroup; - - if (nr_pages > 1) { - VM_BUG_ON_PAGE(!PageTransHuge(page), page); - flags = compound_lock_irqsave(page); - } - - ret = mem_cgroup_move_account(page, nr_pages, - pc, child, parent); - if (!ret) { - if (!mem_cgroup_is_root(parent)) - css_get_many(&parent->css, nr_pages); - /* Take charge off the local counters */ - page_counter_cancel(&child->memory, nr_pages); - if (do_swap_account) - page_counter_cancel(&child->memsw, nr_pages); - css_put_many(&child->css, nr_pages); - } - - if (nr_pages > 1) - compound_unlock_irqrestore(page, flags); - putback_lru_page(page); -put: - put_page(page); -out: - return ret; -} - #ifdef CONFIG_MEMCG_SWAP static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, bool charge) @@ -3682,105 +3609,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, return nr_reclaimed; } -/** - * mem_cgroup_force_empty_list - clears LRU of a group - * @memcg: group to clear - * @node: NUMA node - * @zid: zone id - * @lru: lru to to clear - * - * Traverse a specified page_cgroup list and try to drop them all. This doesn't - * reclaim the pages page themselves - pages are moved to the parent (or root) - * group. - */ -static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, - int node, int zid, enum lru_list lru) -{ - struct lruvec *lruvec; - unsigned long flags; - struct list_head *list; - struct page *busy; - struct zone *zone; - - zone = &NODE_DATA(node)->node_zones[zid]; - lruvec = mem_cgroup_zone_lruvec(zone, memcg); - list = &lruvec->lists[lru]; - - busy = NULL; - do { - struct page_cgroup *pc; - struct page *page; - - spin_lock_irqsave(&zone->lru_lock, flags); - if (list_empty(list)) { - spin_unlock_irqrestore(&zone->lru_lock, flags); - break; - } - page = list_entry(list->prev, struct page, lru); - if (busy == page) { - list_move(&page->lru, list); - busy = NULL; - spin_unlock_irqrestore(&zone->lru_lock, flags); - continue; - } - spin_unlock_irqrestore(&zone->lru_lock, flags); - - pc = lookup_page_cgroup(page); - - if (mem_cgroup_move_parent(page, pc, memcg)) { - /* found lock contention or "pc" is obsolete. */ - busy = page; - } else - busy = NULL; - cond_resched(); - } while (!list_empty(list)); -} - -/* - * make mem_cgroup's charge to be 0 if there is no task by moving - * all the charges and pages to the parent. - * This enables deleting this mem_cgroup. - * - * Caller is responsible for holding css reference on the memcg. - */ -static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) -{ - int node, zid; - - do { - /* This is for making all *used* pages to be on LRU. */ - lru_add_drain_all(); - drain_all_stock_sync(memcg); - mem_cgroup_start_move(memcg); - for_each_node_state(node, N_MEMORY) { - for (zid = 0; zid < MAX_NR_ZONES; zid++) { - enum lru_list lru; - for_each_lru(lru) { - mem_cgroup_force_empty_list(memcg, - node, zid, lru); - } - } - } - mem_cgroup_end_move(memcg); - memcg_oom_recover(memcg); - cond_resched(); - - /* - * Kernel memory may not necessarily be trackable to a specific - * process. So they are not migrated, and therefore we can't - * expect their value to drop to 0 here. - * Having res filled up with kmem only is enough. - * - * This is a safety check because mem_cgroup_force_empty_list - * could have raced with mem_cgroup_replace_page_cache callers - * so the lru seemed empty but the page could have been added - * right after the check. RES_USAGE should be safe as we always - * charge before adding to the LRU. - */ - } while (page_counter_read(&memcg->memory) - - page_counter_read(&memcg->kmem) > 0); -} - /* * Test whether @memcg has children, dead or alive. Note that this * function doesn't care whether @memcg has use_hierarchy enabled and @@ -5323,7 +5151,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_event *event, *tmp; - struct cgroup_subsys_state *iter; /* * Unregister events and notify userspace. @@ -5337,13 +5164,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) } spin_unlock(&memcg->event_list_lock); - /* - * This requires that offlining is serialized. Right now that is - * guaranteed because css_killed_work_fn() holds the cgroup_mutex. - */ - css_for_each_descendant_post(iter, css) - mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); - memcg_unregister_all_caches(memcg); vmpressure_cleanup(&memcg->vmpressure); } @@ -5351,42 +5171,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) static void mem_cgroup_css_free(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - /* - * XXX: css_offline() would be where we should reparent all - * memory to prepare the cgroup for destruction. However, - * memcg does not do css_tryget_online() and page_counter charging - * under the same RCU lock region, which means that charging - * could race with offlining. Offlining only happens to - * cgroups with no tasks in them but charges can show up - * without any tasks from the swapin path when the target - * memcg is looked up from the swapout record and not from the - * current task as it usually is. A race like this can leak - * charges and put pages with stale cgroup pointers into - * circulation: - * - * #0 #1 - * lookup_swap_cgroup_id() - * rcu_read_lock() - * mem_cgroup_lookup() - * css_tryget_online() - * rcu_read_unlock() - * disable css_tryget_online() - * call_rcu() - * offline_css() - * reparent_charges() - * page_counter_try_charge() - * css_put() - * css_free() - * pc->mem_cgroup = dead memcg - * add page to lru - * - * The bulk of the charges are still moved in offline_css() to - * avoid pinning a lot of pages in case a long-term reference - * like a swapout record is deferring the css_free() to long - * after offlining. But this makes sure we catch any charges - * made after offlining: - */ - mem_cgroup_reparent_charges(memcg); memcg_destroy_kmem(memcg); __mem_cgroup_free(memcg); -- cgit v1.2.3-70-g09d2 From 6d3d6aa22af30580cde0d2e23890027bb47a3544 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:50 -0800 Subject: mm: memcontrol: remove synchronous stock draining code With charge reparenting, the last synchronous stock drainer left. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 46 ++++++---------------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f90e43c1499f..3a628435f36a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg) disarm_kmem_keys(memcg); } -static void drain_all_stock_async(struct mem_cgroup *memcg); - static struct mem_cgroup_per_zone * mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) { @@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) /* * Drains all per-CPU charge caches for given root_memcg resp. subtree - * of the hierarchy under it. sync flag says whether we should block - * until the work is done. + * of the hierarchy under it. */ -static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) +static void drain_all_stock(struct mem_cgroup *root_memcg) { int cpu, curcpu; + /* If someone's already draining, avoid adding running more workers. */ + if (!mutex_trylock(&percpu_charge_mutex)) + return; /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); curcpu = get_cpu(); @@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) } } put_cpu(); - - if (!sync) - goto out; - - for_each_online_cpu(cpu) { - struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) - flush_work(&stock->work); - } -out: put_online_cpus(); -} - -/* - * Tries to drain stocked charges in other cpus. This function is asynchronous - * and just put a work per cpu for draining localy on each cpu. Caller can - * expects some charges will be back later but cannot wait for it. - */ -static void drain_all_stock_async(struct mem_cgroup *root_memcg) -{ - /* - * If someone calls draining, avoid adding more kworker runs. - */ - if (!mutex_trylock(&percpu_charge_mutex)) - return; - drain_all_stock(root_memcg, false); - mutex_unlock(&percpu_charge_mutex); -} - -/* This is a synchronous drain interface. */ -static void drain_all_stock_sync(struct mem_cgroup *root_memcg) -{ - /* called when force_empty is called */ - mutex_lock(&percpu_charge_mutex); - drain_all_stock(root_memcg, true); mutex_unlock(&percpu_charge_mutex); } @@ -2472,7 +2438,7 @@ retry: goto retry; if (!drained) { - drain_all_stock_async(mem_over_limit); + drain_all_stock(mem_over_limit); drained = true; goto retry; } -- cgit v1.2.3-70-g09d2 From bc2f2e7ffe5b6292c74ee1206d6ca303e13886b2 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:43:40 -0800 Subject: memcg: simplify unreclaimable groups handling in soft limit reclaim If we fail to reclaim anything from a cgroup during a soft reclaim pass we want to get the next largest cgroup exceeding its soft limit. To achieve this, we should obviously remove the current group from the tree and then pick the largest group. Currently we have a weird loop instead. Let's simplify it. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a628435f36a..975207a9cc65 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3518,34 +3518,16 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, nr_reclaimed += reclaimed; *total_scanned += nr_scanned; spin_lock_irq(&mctz->lock); + __mem_cgroup_remove_exceeded(mz, mctz); /* * If we failed to reclaim anything from this memory cgroup * it is time to move on to the next cgroup */ next_mz = NULL; - if (!reclaimed) { - do { - /* - * Loop until we find yet another one. - * - * By the time we get the soft_limit lock - * again, someone might have aded the - * group back on the RB tree. Iterate to - * make sure we get a different mem. - * mem_cgroup_largest_soft_limit_node returns - * NULL if no other cgroup is present on - * the tree - */ - next_mz = - __mem_cgroup_largest_soft_limit_node(mctz); - if (next_mz == mz) - css_put(&next_mz->memcg->css); - else /* next_mz == NULL or other memcg */ - break; - } while (1); - } - __mem_cgroup_remove_exceeded(mz, mctz); + if (!reclaimed) + next_mz = __mem_cgroup_largest_soft_limit_node(mctz); + excess = soft_limit_excess(mz->memcg); /* * One school of thought says that we should not add -- cgit v1.2.3-70-g09d2 From dfe0e773d0258a4d7dfd763e1fda04aa27680b90 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:43 -0800 Subject: mm: memcontrol: update mem_cgroup_page_lruvec() documentation Commit 7512102cf64d ("memcg: fix GPF when cgroup removal races with last exit") added a pc->mem_cgroup reset into mem_cgroup_page_lruvec() to prevent a crash where an anon page gets uncharged on unmap, the memcg is released, and then the final LRU isolation on free dereferences the stale pc->mem_cgroup pointer. But since commit 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API"), pages are only uncharged AFTER that final LRU isolation, which guarantees the memcg's lifetime until then. pc->mem_cgroup now only needs to be reset for swapcache readahead pages. Update the comment and callsite requirements accordingly. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 975207a9cc65..b495f29d4746 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1262,9 +1262,13 @@ out: } /** - * mem_cgroup_page_lruvec - return lruvec for adding an lru page + * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page * @zone: zone of the page + * + * This function is only safe when following the LRU page isolation + * and putback protocol: the LRU lock must be held, and the page must + * either be PageLRU() or the caller must have isolated/allocated it. */ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) { @@ -1282,13 +1286,9 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) memcg = pc->mem_cgroup; /* - * Surreptitiously switch any uncharged offlist page to root: - * an uncharged page off lru does nothing to secure - * its former mem_cgroup from sudden removal. - * - * Our caller holds lru_lock, and PageCgroupUsed is updated - * under page_cgroup lock: between them, they make all uses - * of pc->mem_cgroup safe. + * Swapcache readahead pages are added to the LRU - and + * possibly migrated - before they are charged. Ensure + * pc->mem_cgroup is sane. */ if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) pc->mem_cgroup = memcg = root_mem_cgroup; -- cgit v1.2.3-70-g09d2 From 7d5e324573b0ffd7098ab880c82096ca29a11f7f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:46 -0800 Subject: mm: memcontrol: clarify migration where old page is uncharged Better explain re-entrant migration when compaction races with reclaim, and also mention swapcache readahead pages as possible uncharged migration sources. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b495f29d4746..a0ae64ca55bf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6157,7 +6157,12 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, if (PageCgroupUsed(pc)) return; - /* Re-entrant migration: old page already uncharged? */ + /* + * Swapcache readahead pages can get migrated before being + * charged, and migration from compaction can happen to an + * uncharged page when the PFN walker finds a page that + * reclaim just put back on the LRU but has not released yet. + */ pc = lookup_page_cgroup(oldpage); if (!PageCgroupUsed(pc)) return; -- cgit v1.2.3-70-g09d2 From 8c0145b62ef7e9019ab39284ed88873c483c8003 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:43:48 -0800 Subject: memcg: remove activate_kmem_mutex The activate_kmem_mutex is used to serialize memcg.kmem.limit updates, but we already serialize them with memcg_limit_mutex so let's remove the former. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a0ae64ca55bf..420461bcaefd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2627,8 +2627,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, */ static DEFINE_MUTEX(memcg_slab_mutex); -static DEFINE_MUTEX(activate_kmem_mutex); - /* * This is a bit cumbersome, but it is rarely used and avoids a backpointer * in the memcg_cache_params struct. @@ -3747,9 +3745,8 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } #ifdef CONFIG_MEMCG_KMEM -/* should be called with activate_kmem_mutex held */ -static int __memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long nr_pages) +static int memcg_activate_kmem(struct mem_cgroup *memcg, + unsigned long nr_pages) { int err = 0; int memcg_id; @@ -3811,17 +3808,6 @@ out: return err; } -static int memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long nr_pages) -{ - int ret; - - mutex_lock(&activate_kmem_mutex); - ret = __memcg_activate_kmem(memcg, nr_pages); - mutex_unlock(&activate_kmem_mutex); - return ret; -} - static int memcg_update_kmem_limit(struct mem_cgroup *memcg, unsigned long limit) { @@ -3844,14 +3830,14 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) if (!parent) return 0; - mutex_lock(&activate_kmem_mutex); + mutex_lock(&memcg_limit_mutex); /* * If the parent cgroup is not kmem-active now, it cannot be activated * after this point, because it has at least one child already. */ if (memcg_kmem_is_active(parent)) - ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); - mutex_unlock(&activate_kmem_mutex); + ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); + mutex_unlock(&memcg_limit_mutex); return ret; } #else -- cgit v1.2.3-70-g09d2 From b9982f8d27f893de2e8e98a25c68bb838b5311a4 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 10 Dec 2014 15:43:51 -0800 Subject: mm: memcontrol: micro-optimize mem_cgroup_split_huge_fixup() Don't call lookup_page_cgroup() when memcg is disabled. Cc: Johannes Weiner Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 420461bcaefd..8c10d4ca9c3f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3174,7 +3174,7 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *head_pc = lookup_page_cgroup(head); + struct page_cgroup *head_pc; struct page_cgroup *pc; struct mem_cgroup *memcg; int i; @@ -3182,6 +3182,8 @@ void mem_cgroup_split_huge_fixup(struct page *head) if (mem_cgroup_disabled()) return; + head_pc = lookup_page_cgroup(head); + memcg = head_pc->mem_cgroup; for (i = 1; i < HPAGE_PMD_NR; i++) { pc = head_pc + i; -- cgit v1.2.3-70-g09d2 From 7bdd143c37e591c254d0991ac398a53f3f9ef1af Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:54 -0800 Subject: mm: memcontrol: uncharge pages on swapout This series gets rid of the remaining page_cgroup flags, thus cutting the memcg per-page overhead down to one pointer. This patch (of 4): mem_cgroup_swapout() is called with exclusive access to the page at the end of the page's lifetime. Instead of clearing the PCG_MEMSW flag and deferring the uncharge, just do it right away. This allows follow-up patches to simplify the uncharge code. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Acked-by: Vladimir Davydov Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8c10d4ca9c3f..266a440c89f9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5777,6 +5777,7 @@ static void __init enable_swap_cgroup(void) */ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { + struct mem_cgroup *memcg; struct page_cgroup *pc; unsigned short oldid; @@ -5793,13 +5794,22 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) return; VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page); + memcg = pc->mem_cgroup; - oldid = swap_cgroup_record(entry, mem_cgroup_id(pc->mem_cgroup)); + oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); + mem_cgroup_swap_statistics(memcg, true); + + pc->flags = 0; - pc->flags &= ~PCG_MEMSW; - css_get(&pc->mem_cgroup->css); - mem_cgroup_swap_statistics(pc->mem_cgroup, true); + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memory, 1); + + /* XXX: caller holds IRQ-safe mapping->tree_lock */ + VM_BUG_ON(!irqs_disabled()); + + mem_cgroup_charge_statistics(memcg, page, -1); + memcg_check_events(memcg, page); } /** -- cgit v1.2.3-70-g09d2 From 18eca2e636f921e6350dc31b5b450bb4102d664f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:57 -0800 Subject: mm: memcontrol: remove unnecessary PCG_MEMSW memory+swap charge flag Now that mem_cgroup_swapout() fully uncharges the page, every page that is still in use when reaching mem_cgroup_uncharge() is known to carry both the memory and the memory+swap charge. Simplify the uncharge path and remove the PCG_MEMSW page flag accordingly. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 1 - mm/memcontrol.c | 34 ++++++++++++---------------------- 2 files changed, 12 insertions(+), 23 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 5c831f1eca79..da62ee2be28b 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -5,7 +5,6 @@ enum { /* flags for mem_cgroup */ PCG_USED = 0x01, /* This page is charged to a memcg */ PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ }; struct pglist_data; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 266a440c89f9..baf3b535b180 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0); + pc->flags = PCG_USED | PCG_MEM; if (lrucare) unlock_page_lru(page, isolated); @@ -5793,7 +5793,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!PageCgroupUsed(pc)) return; - VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page); memcg = pc->mem_cgroup; oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); @@ -5989,17 +5988,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) } static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, - unsigned long nr_mem, unsigned long nr_memsw, unsigned long nr_anon, unsigned long nr_file, unsigned long nr_huge, struct page *dummy_page) { + unsigned long nr_pages = nr_anon + nr_file; unsigned long flags; if (!mem_cgroup_is_root(memcg)) { - if (nr_mem) - page_counter_uncharge(&memcg->memory, nr_mem); - if (nr_memsw) - page_counter_uncharge(&memcg->memsw, nr_memsw); + page_counter_uncharge(&memcg->memory, nr_pages); + if (do_swap_account) + page_counter_uncharge(&memcg->memsw, nr_pages); memcg_oom_recover(memcg); } @@ -6008,23 +6006,21 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); - __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); + __this_cpu_add(memcg->stat->nr_page_events, nr_pages); memcg_check_events(memcg, dummy_page); local_irq_restore(flags); if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, max(nr_mem, nr_memsw)); + css_put_many(&memcg->css, nr_pages); } static void uncharge_list(struct list_head *page_list) { struct mem_cgroup *memcg = NULL; - unsigned long nr_memsw = 0; unsigned long nr_anon = 0; unsigned long nr_file = 0; unsigned long nr_huge = 0; unsigned long pgpgout = 0; - unsigned long nr_mem = 0; struct list_head *next; struct page *page; @@ -6051,10 +6047,9 @@ static void uncharge_list(struct list_head *page_list) if (memcg != pc->mem_cgroup) { if (memcg) { - uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, - nr_anon, nr_file, nr_huge, page); - pgpgout = nr_mem = nr_memsw = 0; - nr_anon = nr_file = nr_huge = 0; + uncharge_batch(memcg, pgpgout, nr_anon, nr_file, + nr_huge, page); + pgpgout = nr_anon = nr_file = nr_huge = 0; } memcg = pc->mem_cgroup; } @@ -6070,18 +6065,14 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - if (pc->flags & PCG_MEM) - nr_mem += nr_pages; - if (pc->flags & PCG_MEMSW) - nr_memsw += nr_pages; pc->flags = 0; pgpgout++; } while (next != page_list); if (memcg) - uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, - nr_anon, nr_file, nr_huge, page); + uncharge_batch(memcg, pgpgout, nr_anon, nr_file, + nr_huge, page); } /** @@ -6166,7 +6157,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, return; VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); - VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage); if (lrucare) lock_page_lru(oldpage, &isolated); -- cgit v1.2.3-70-g09d2 From f4aaa8b43d90294ca7546317997c452600e9a8a7 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:00 -0800 Subject: mm: memcontrol: remove unnecessary PCG_MEM memory charge flag PCG_MEM is a remnant from an earlier version of 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API"), used to tell whether migration cleared a charge while leaving pc->mem_cgroup valid and PCG_USED set. But in the final version, mem_cgroup_migrate() directly uncharges the source page, rendering this distinction unnecessary. Remove it. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 1 - mm/memcontrol.c | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index da62ee2be28b..97536e685843 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -4,7 +4,6 @@ enum { /* flags for mem_cgroup */ PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ }; struct pglist_data; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index baf3b535b180..3dfb56a93117 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED | PCG_MEM; + pc->flags = PCG_USED; if (lrucare) unlock_page_lru(page, isolated); @@ -6156,8 +6156,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, if (!PageCgroupUsed(pc)) return; - VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); - if (lrucare) lock_page_lru(oldpage, &isolated); -- cgit v1.2.3-70-g09d2 From 2983331575bfb248abfb02efb5140b4a299e3f45 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:02 -0800 Subject: mm: memcontrol: remove unnecessary PCG_USED pc->mem_cgroup valid flag pc->mem_cgroup had to be left intact after uncharge for the final LRU removal, and !PCG_USED indicated whether the page was uncharged. But since commit 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API") pages are uncharged after the final LRU removal. Uncharge can simply clear the pointer and the PCG_USED/PageCgroupUsed sites can test that instead. Because this is the last page_cgroup flag, this patch reduces the memcg per-page overhead to a single pointer. [akpm@linux-foundation.org: remove unneeded initialization of `memcg', per Michal] Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 10 ----- mm/memcontrol.c | 107 +++++++++++++++++--------------------------- 2 files changed, 41 insertions(+), 76 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 97536e685843..1289be6b436c 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -1,11 +1,6 @@ #ifndef __LINUX_PAGE_CGROUP_H #define __LINUX_PAGE_CGROUP_H -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ -}; - struct pglist_data; #ifdef CONFIG_MEMCG @@ -19,7 +14,6 @@ struct mem_cgroup; * then the page cgroup for pfn always exists. */ struct page_cgroup { - unsigned long flags; struct mem_cgroup *mem_cgroup; }; @@ -39,10 +33,6 @@ static inline void page_cgroup_init(void) struct page_cgroup *lookup_page_cgroup(struct page *page); -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} #else /* !CONFIG_MEMCG */ struct page_cgroup; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3dfb56a93117..09fece0eb9f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1284,14 +1284,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) pc = lookup_page_cgroup(page); memcg = pc->mem_cgroup; - /* * Swapcache readahead pages are added to the LRU - and - * possibly migrated - before they are charged. Ensure - * pc->mem_cgroup is sane. + * possibly migrated - before they are charged. */ - if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) - pc->mem_cgroup = memcg = root_mem_cgroup; + if (!memcg) + memcg = root_mem_cgroup; mz = mem_cgroup_page_zoneinfo(memcg, page); lruvec = &mz->lruvec; @@ -2151,7 +2149,7 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, pc = lookup_page_cgroup(page); again: memcg = pc->mem_cgroup; - if (unlikely(!memcg || !PageCgroupUsed(pc))) + if (unlikely(!memcg)) return NULL; *locked = false; @@ -2159,7 +2157,7 @@ again: return memcg; move_lock_mem_cgroup(memcg, flags); - if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { + if (memcg != pc->mem_cgroup) { move_unlock_mem_cgroup(memcg, flags); goto again; } @@ -2525,7 +2523,7 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) */ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { - struct mem_cgroup *memcg = NULL; + struct mem_cgroup *memcg; struct page_cgroup *pc; unsigned short id; swp_entry_t ent; @@ -2533,9 +2531,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) VM_BUG_ON_PAGE(!PageLocked(page), page); pc = lookup_page_cgroup(page); - if (PageCgroupUsed(pc)) { - memcg = pc->mem_cgroup; - if (memcg && !css_tryget_online(&memcg->css)) + memcg = pc->mem_cgroup; + + if (memcg) { + if (!css_tryget_online(&memcg->css)) memcg = NULL; } else if (PageSwapCache(page)) { ent.val = page_private(page); @@ -2586,7 +2585,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, struct page_cgroup *pc = lookup_page_cgroup(page); int isolated; - VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); + VM_BUG_ON_PAGE(pc->mem_cgroup, page); /* * we don't need page_cgroup_lock about tail pages, becase they are not * accessed by any other context at this point. @@ -2601,7 +2600,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup and pc->flags at this point: + * pc->mem_cgroup at this point: * * - the page is uncharged * @@ -2614,7 +2613,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED; if (lrucare) unlock_page_lru(page, isolated); @@ -3126,37 +3124,22 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg_uncharge_kmem(memcg, 1 << order); return; } - /* - * The page is freshly allocated and not visible to any - * outside callers yet. Set up pc non-atomically. - */ pc = lookup_page_cgroup(page); pc->mem_cgroup = memcg; - pc->flags = PCG_USED; } void __memcg_kmem_uncharge_pages(struct page *page, int order) { - struct mem_cgroup *memcg = NULL; - struct page_cgroup *pc; - - - pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) - return; - - memcg = pc->mem_cgroup; - pc->flags = 0; + struct page_cgroup *pc = lookup_page_cgroup(page); + struct mem_cgroup *memcg = pc->mem_cgroup; - /* - * We trust that only if there is a memcg associated with the page, it - * is a valid allocation - */ if (!memcg) return; VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); + memcg_uncharge_kmem(memcg, 1 << order); + pc->mem_cgroup = NULL; } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3174,23 +3157,16 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *head_pc; - struct page_cgroup *pc; - struct mem_cgroup *memcg; + struct page_cgroup *pc = lookup_page_cgroup(head); int i; if (mem_cgroup_disabled()) return; - head_pc = lookup_page_cgroup(head); + for (i = 1; i < HPAGE_PMD_NR; i++) + pc[i].mem_cgroup = pc[0].mem_cgroup; - memcg = head_pc->mem_cgroup; - for (i = 1; i < HPAGE_PMD_NR; i++) { - pc = head_pc + i; - pc->mem_cgroup = memcg; - pc->flags = head_pc->flags; - } - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + __this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], HPAGE_PMD_NR); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -3240,7 +3216,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; ret = -EINVAL; - if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) + if (pc->mem_cgroup != from) goto out_unlock; move_lock_mem_cgroup(from, &flags); @@ -3350,7 +3326,7 @@ static struct page_cgroup *lookup_page_cgroup_used(struct page *page) * the first time, i.e. during boot or memory hotplug; * or when mem_cgroup_disabled(). */ - if (likely(pc) && PageCgroupUsed(pc)) + if (likely(pc) && pc->mem_cgroup) return pc; return NULL; } @@ -3368,10 +3344,8 @@ void mem_cgroup_print_bad_page(struct page *page) struct page_cgroup *pc; pc = lookup_page_cgroup_used(page); - if (pc) { - pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n", - pc, pc->flags, pc->mem_cgroup); - } + if (pc) + pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup); } #endif @@ -5308,7 +5282,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, * mem_cgroup_move_account() checks the pc is valid or * not under LRU exclusion. */ - if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + if (pc->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) target->page = page; @@ -5344,7 +5318,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, if (!move_anon()) return ret; pc = lookup_page_cgroup(page); - if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + if (pc->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) { get_page(page); @@ -5788,18 +5762,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) return; pc = lookup_page_cgroup(page); + memcg = pc->mem_cgroup; /* Readahead page, never charged */ - if (!PageCgroupUsed(pc)) + if (!memcg) return; - memcg = pc->mem_cgroup; - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); - pc->flags = 0; + pc->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); @@ -5874,7 +5847,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, * the page lock, which serializes swap cache removal, which * in turn serializes uncharging. */ - if (PageCgroupUsed(pc)) + if (pc->mem_cgroup) goto out; } @@ -6036,13 +6009,13 @@ static void uncharge_list(struct list_head *page_list) VM_BUG_ON_PAGE(page_count(page), page); pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) + if (!pc->mem_cgroup) continue; /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup and pc->flags at this point, we have - * fully exclusive access to the page. + * pc->mem_cgroup at this point, we have fully + * exclusive access to the page. */ if (memcg != pc->mem_cgroup) { @@ -6065,7 +6038,7 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - pc->flags = 0; + pc->mem_cgroup = NULL; pgpgout++; } while (next != page_list); @@ -6091,7 +6064,7 @@ void mem_cgroup_uncharge(struct page *page) /* Don't touch page->lru of any random page, pre-check: */ pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) + if (!pc->mem_cgroup) return; INIT_LIST_HEAD(&page->lru); @@ -6127,6 +6100,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list) void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare) { + struct mem_cgroup *memcg; struct page_cgroup *pc; int isolated; @@ -6143,7 +6117,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, /* Page cache replacement: new page already charged? */ pc = lookup_page_cgroup(newpage); - if (PageCgroupUsed(pc)) + if (pc->mem_cgroup) return; /* @@ -6153,18 +6127,19 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, * reclaim just put back on the LRU but has not released yet. */ pc = lookup_page_cgroup(oldpage); - if (!PageCgroupUsed(pc)) + memcg = pc->mem_cgroup; + if (!memcg) return; if (lrucare) lock_page_lru(oldpage, &isolated); - pc->flags = 0; + pc->mem_cgroup = NULL; if (lrucare) unlock_page_lru(oldpage, isolated); - commit_charge(newpage, pc->mem_cgroup, lrucare); + commit_charge(newpage, memcg, lrucare); } /* -- cgit v1.2.3-70-g09d2 From 354a4783a2ee5ba1cb5a1442cca8ecd4c0ac6d66 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:05 -0800 Subject: mm: memcontrol: inline memcg->move_lock locking The wrappers around taking and dropping the memcg->move_lock spinlock add nothing of value. Inline the spinlock calls into the callsites. Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09fece0eb9f1..a5c9aa4688e8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1522,23 +1522,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) return false; } -/* - * Take this lock when - * - a code tries to modify page's memcg while it's USED. - * - a code tries to modify page state accounting in a memcg. - */ -static void move_lock_mem_cgroup(struct mem_cgroup *memcg, - unsigned long *flags) -{ - spin_lock_irqsave(&memcg->move_lock, *flags); -} - -static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, - unsigned long *flags) -{ - spin_unlock_irqrestore(&memcg->move_lock, *flags); -} - #define K(x) ((x) << (PAGE_SHIFT-10)) /** * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. @@ -2156,9 +2139,9 @@ again: if (atomic_read(&memcg->moving_account) <= 0) return memcg; - move_lock_mem_cgroup(memcg, flags); + spin_lock_irqsave(&memcg->move_lock, *flags); if (memcg != pc->mem_cgroup) { - move_unlock_mem_cgroup(memcg, flags); + spin_unlock_irqrestore(&memcg->move_lock, *flags); goto again; } *locked = true; @@ -2176,7 +2159,7 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, unsigned long flags) { if (memcg && locked) - move_unlock_mem_cgroup(memcg, &flags); + spin_unlock_irqrestore(&memcg->move_lock, flags); rcu_read_unlock(); } @@ -3219,7 +3202,7 @@ static int mem_cgroup_move_account(struct page *page, if (pc->mem_cgroup != from) goto out_unlock; - move_lock_mem_cgroup(from, &flags); + spin_lock_irqsave(&from->move_lock, flags); if (!PageAnon(page) && page_mapped(page)) { __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], @@ -3243,7 +3226,8 @@ static int mem_cgroup_move_account(struct page *page, /* caller should have done css_get */ pc->mem_cgroup = to; - move_unlock_mem_cgroup(from, &flags); + spin_unlock_irqrestore(&from->move_lock, flags); + ret = 0; local_irq_disable(); -- cgit v1.2.3-70-g09d2 From 4e2f245d38ba86e3922c6c188fe4a0d0688aed88 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:08 -0800 Subject: mm: memcontrol: don't pass a NULL memcg to mem_cgroup_end_move() mem_cgroup_end_move() checks if the passed memcg is NULL, along with a lengthy comment to explain why this seemingly non-sensical situation is even possible. Check in cancel_attach() itself whether can_attach() set up the move context or not, it's a lot more obvious from there. Then remove the check and comment in mem_cgroup_end_move(). Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a5c9aa4688e8..3cd4f1e0bfb3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1469,12 +1469,7 @@ static void mem_cgroup_start_move(struct mem_cgroup *memcg) static void mem_cgroup_end_move(struct mem_cgroup *memcg) { - /* - * Now, mem_cgroup_clear_mc() may call this function with NULL. - * We check NULL in callee rather than caller. - */ - if (memcg) - atomic_dec(&memcg->moving_account); + atomic_dec(&memcg->moving_account); } /* @@ -5489,7 +5484,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { - mem_cgroup_clear_mc(); + if (mc.to) + mem_cgroup_clear_mc(); } static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, -- cgit v1.2.3-70-g09d2 From 247b1447b6ccb2890cefc370f8e204592a70774d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:11 -0800 Subject: mm: memcontrol: fold mem_cgroup_start_move()/mem_cgroup_end_move() Having these functions and their documentation split out and somewhere makes it harder, not easier, to follow what's going on. Inline them directly where charge moving is prepared and finished, and put an explanation right next to it. Signed-off-by: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3cd4f1e0bfb3..3734fd6d1132 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1446,32 +1446,6 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg) return memcg->swappiness; } -/* - * memcg->moving_account is used for checking possibility that some thread is - * calling move_account(). When a thread on CPU-A starts moving pages under - * a memcg, other threads should check memcg->moving_account under - * rcu_read_lock(), like this: - * - * CPU-A CPU-B - * rcu_read_lock() - * memcg->moving_account+1 if (memcg->mocing_account) - * take heavy locks. - * synchronize_rcu() update something. - * rcu_read_unlock() - * start move here. - */ - -static void mem_cgroup_start_move(struct mem_cgroup *memcg) -{ - atomic_inc(&memcg->moving_account); - synchronize_rcu(); -} - -static void mem_cgroup_end_move(struct mem_cgroup *memcg) -{ - atomic_dec(&memcg->moving_account); -} - /* * A routine for checking "mem" is under move_account() or not. * @@ -5431,7 +5405,8 @@ static void mem_cgroup_clear_mc(void) mc.from = NULL; mc.to = NULL; spin_unlock(&mc.lock); - mem_cgroup_end_move(from); + + atomic_dec(&from->moving_account); } static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, @@ -5464,7 +5439,16 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, VM_BUG_ON(mc.precharge); VM_BUG_ON(mc.moved_charge); VM_BUG_ON(mc.moved_swap); - mem_cgroup_start_move(from); + + /* + * Signal mem_cgroup_begin_page_stat() to take + * the memcg's move_lock while we're moving + * its pages to another memcg. Then wait for + * already started RCU-only updates to finish. + */ + atomic_inc(&from->moving_account); + synchronize_rcu(); + spin_lock(&mc.lock); mc.from = from; mc.to = memcg; -- cgit v1.2.3-70-g09d2 From 4ef461e8f4dd13a2e64c6c8f00c420d62294e2d4 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:44:16 -0800 Subject: memcg: remove mem_cgroup_reclaimable check from soft reclaim mem_cgroup_reclaimable() checks whether a cgroup has reclaimable pages on *any* NUMA node. However, the only place where it's called is mem_cgroup_soft_reclaim(), which tries to reclaim memory from a *specific* zone. So the way it is used is incorrect - it will return true even if the cgroup doesn't have pages on the zone we're scanning. I think we can get rid of this check completely, because mem_cgroup_shrink_node_zone(), which is called by mem_cgroup_soft_reclaim() if mem_cgroup_reclaimable() returns true, is equivalent to shrink_lruvec(), which exits almost immediately if the lruvec passed to it is empty. So there's no need to optimize anything here. Besides, we don't have such a check in the general scan path (shrink_zone) either. Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 43 ------------------------------------------- 1 file changed, 43 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3734fd6d1132..32e3b191857d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1743,52 +1743,11 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) memcg->last_scanned_node = node; return node; } - -/* - * Check all nodes whether it contains reclaimable pages or not. - * For quick scan, we make use of scan_nodes. This will allow us to skip - * unused nodes. But scan_nodes is lazily updated and may not cotain - * enough new information. We need to do double check. - */ -static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) -{ - int nid; - - /* - * quick check...making use of scan_node. - * We can skip unused nodes. - */ - if (!nodes_empty(memcg->scan_nodes)) { - for (nid = first_node(memcg->scan_nodes); - nid < MAX_NUMNODES; - nid = next_node(nid, memcg->scan_nodes)) { - - if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) - return true; - } - } - /* - * Check rest of nodes. - */ - for_each_node_state(nid, N_MEMORY) { - if (node_isset(nid, memcg->scan_nodes)) - continue; - if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) - return true; - } - return false; -} - #else int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) { return 0; } - -static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) -{ - return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); -} #endif static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, @@ -1832,8 +1791,6 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, } continue; } - if (!mem_cgroup_reclaimable(victim, false)) - continue; total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, zone, &nr_scanned); *total_scanned += nr_scanned; -- cgit v1.2.3-70-g09d2 From b047501cd9f11d5e1d54ea0f90e2b10754021a0e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:44:19 -0800 Subject: memcg: use generic slab iterators for showing slabinfo Let's use generic slab_start/next/stop for showing memcg caches info. In contrast to the current implementation, this will work even if all memcg caches' info doesn't fit into a seq buffer (a page), plus it simply looks neater. Actually, the main reason I do this isn't mere cleanup. I'm going to zap the memcg_slab_caches list, because I find it useless provided we have the slab_caches list, and this patch is a step in this direction. It should be noted that before this patch an attempt to read memory.kmem.slabinfo of a cgroup that doesn't have kmem limit set resulted in -EIO, while after this patch it will silently show nothing except the header, but I don't think it will frustrate anyone. Signed-off-by: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 4 ---- mm/memcontrol.c | 25 ++++--------------------- mm/slab.h | 1 + mm/slab_common.c | 25 +++++++++++++++++++------ 4 files changed, 24 insertions(+), 31 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..8a2457d42fc8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,10 +513,6 @@ struct memcg_cache_params { int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 32e3b191857d..9d30129b0d4a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2547,26 +2547,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); } -#ifdef CONFIG_SLABINFO -static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) -{ - struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); - struct memcg_cache_params *params; - - if (!memcg_kmem_is_active(memcg)) - return -EIO; - - print_slabinfo_header(m); - - mutex_lock(&memcg_slab_mutex); - list_for_each_entry(params, &memcg->memcg_slab_caches, list) - cache_show(memcg_params_to_cache(params), m); - mutex_unlock(&memcg_slab_mutex); - - return 0; -} -#endif - static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, unsigned long nr_pages) { @@ -4708,7 +4688,10 @@ static struct cftype mem_cgroup_files[] = { #ifdef CONFIG_SLABINFO { .name = "kmem.slabinfo", - .seq_show = mem_cgroup_slabinfo_read, + .seq_start = slab_start, + .seq_next = slab_next, + .seq_stop = slab_stop, + .seq_show = memcg_slab_show, }, #endif #endif diff --git a/mm/slab.h b/mm/slab.h index 078acbcf64e8..1cf4005482dd 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -360,5 +360,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); +int memcg_slab_show(struct seq_file *m, void *p); #endif /* MM_SLAB_H */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 2a3f5ff410cf..e03dd6f2a272 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -811,7 +811,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); #define SLABINFO_RIGHTS S_IRUSR #endif -void print_slabinfo_header(struct seq_file *m) +static void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it @@ -876,7 +876,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) } } -int cache_show(struct kmem_cache *s, struct seq_file *m) +static void cache_show(struct kmem_cache *s, struct seq_file *m) { struct slabinfo sinfo; @@ -895,7 +895,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m) sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); slabinfo_show_stats(m, s); seq_putc(m, '\n'); - return 0; } static int slab_show(struct seq_file *m, void *p) @@ -904,10 +903,24 @@ static int slab_show(struct seq_file *m, void *p) if (p == slab_caches.next) print_slabinfo_header(m); - if (!is_root_cache(s)) - return 0; - return cache_show(s, m); + if (is_root_cache(s)) + cache_show(s, m); + return 0; +} + +#ifdef CONFIG_MEMCG_KMEM +int memcg_slab_show(struct seq_file *m, void *p) +{ + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + if (p == slab_caches.next) + print_slabinfo_header(m); + if (!is_root_cache(s) && s->memcg_params->memcg == memcg) + cache_show(s, m); + return 0; } +#endif /* * slabinfo_op - iterator that generates /proc/slabinfo -- cgit v1.2.3-70-g09d2 From 312722cbb2a6e12b74177f025a8ee7189816b04b Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:25 -0800 Subject: mm: memcontrol: shorten the page statistics update slowpath While moving charges from one memcg to another, page stat updates must acquire the old memcg's move_lock to prevent double accounting. That situation is denoted by an increased memcg->move_accounting. However, the charge moving code declares this way too early for now, even before summing up the RSS and pre-allocating destination charges. Shorten this slowpath mode by increasing memcg->move_accounting only right before walking the task's address space with the intention of actually moving the pages. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9d30129b0d4a..9073d07c1149 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5333,8 +5333,6 @@ static void __mem_cgroup_clear_mc(void) static void mem_cgroup_clear_mc(void) { - struct mem_cgroup *from = mc.from; - /* * we must clear moving_task before waking up waiters at the end of * task migration. @@ -5345,8 +5343,6 @@ static void mem_cgroup_clear_mc(void) mc.from = NULL; mc.to = NULL; spin_unlock(&mc.lock); - - atomic_dec(&from->moving_account); } static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, @@ -5380,15 +5376,6 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, VM_BUG_ON(mc.moved_charge); VM_BUG_ON(mc.moved_swap); - /* - * Signal mem_cgroup_begin_page_stat() to take - * the memcg's move_lock while we're moving - * its pages to another memcg. Then wait for - * already started RCU-only updates to finish. - */ - atomic_inc(&from->moving_account); - synchronize_rcu(); - spin_lock(&mc.lock); mc.from = from; mc.to = memcg; @@ -5520,6 +5507,13 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) struct vm_area_struct *vma; lru_add_drain_all(); + /* + * Signal mem_cgroup_begin_page_stat() to take the memcg's + * move_lock while we're moving its pages to another memcg. + * Then wait for already started RCU-only updates to finish. + */ + atomic_inc(&mc.from->moving_account); + synchronize_rcu(); retry: if (unlikely(!down_read_trylock(&mm->mmap_sem))) { /* @@ -5552,6 +5546,7 @@ retry: break; } up_read(&mm->mmap_sem); + atomic_dec(&mc.from->moving_account); } static void mem_cgroup_move_task(struct cgroup_subsys_state *css, -- cgit v1.2.3-70-g09d2 From c01f46c7c725f0a1330673bc3c767b89a3b2dbc0 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:27 -0800 Subject: mm: memcontrol: remove bogus NULL check after mem_cgroup_from_task() That function acts like a typecast - unless NULL is passed in, no NULL can come out. task_in_mem_cgroup() callers don't pass NULL tasks. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9073d07c1149..367cc57df362 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1356,7 +1356,7 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, bool task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) { - struct mem_cgroup *curr = NULL; + struct mem_cgroup *curr; struct task_struct *p; bool ret; @@ -1372,8 +1372,7 @@ bool task_in_mem_cgroup(struct task_struct *task, */ rcu_read_lock(); curr = mem_cgroup_from_task(task); - if (curr) - css_get(&curr->css); + css_get(&curr->css); rcu_read_unlock(); } /* -- cgit v1.2.3-70-g09d2 From 413918bb61b4fa027baa3e79546c47f15e4b9ea8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:30 -0800 Subject: mm: memcontrol: pull the NULL check from __mem_cgroup_same_or_subtree() The NULL in mm_match_cgroup() comes from a possibly exiting mm->owner. It makes a lot more sense to check where it's looked up, rather than check for it in __mem_cgroup_same_or_subtree() where it's unexpected. No other callsite passes NULL to __mem_cgroup_same_or_subtree(). Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++-- mm/memcontrol.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ea007615e8f9..e32ab948f589 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -83,11 +83,12 @@ static inline bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = __mem_cgroup_same_or_subtree(memcg, task_memcg); rcu_read_unlock(); return match; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 367cc57df362..e5dcebd71dfb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1337,7 +1337,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, { if (root_memcg == memcg) return true; - if (!root_memcg->use_hierarchy || !memcg) + if (!root_memcg->use_hierarchy) return false; return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup); } -- cgit v1.2.3-70-g09d2 From 2314b42db67be30b747122d65c6cd2c85da34538 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:33 -0800 Subject: mm: memcontrol: drop bogus RCU locking from mem_cgroup_same_or_subtree() None of the mem_cgroup_same_or_subtree() callers actually require it to take the RCU lock, either because they hold it themselves or they have css references. Remove it. To make the API change clear, rename the leftover helper to mem_cgroup_is_descendant() to match cgroup_is_descendant(). Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 13 +++++----- mm/memcontrol.c | 59 +++++++++++++--------------------------------- mm/oom_kill.c | 4 ++-- 3 files changed, 24 insertions(+), 52 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e32ab948f589..d4575a1d6e99 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -68,10 +68,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,8 +78,8 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; bool match = false; @@ -88,7 +87,7 @@ bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (task_memcg) - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e5dcebd71dfb..b841bf430179 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1328,41 +1328,24 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, VM_BUG_ON((long)(*lru_size) < 0); } -/* - * Checks whether given mem is same or in the root_mem_cgroup's - * hierarchy subtree - */ -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg) +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { - if (root_memcg == memcg) + if (root == memcg) return true; - if (!root_memcg->use_hierarchy) + if (!root->use_hierarchy) return false; - return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup); -} - -static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg) -{ - bool ret; - - rcu_read_lock(); - ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); - rcu_read_unlock(); - return ret; + return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); } -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg) +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) { - struct mem_cgroup *curr; + struct mem_cgroup *task_memcg; struct task_struct *p; bool ret; p = find_lock_task_mm(task); if (p) { - curr = get_mem_cgroup_from_mm(p->mm); + task_memcg = get_mem_cgroup_from_mm(p->mm); task_unlock(p); } else { /* @@ -1371,18 +1354,12 @@ bool task_in_mem_cgroup(struct task_struct *task, * killed to prevent needlessly killing additional tasks. */ rcu_read_lock(); - curr = mem_cgroup_from_task(task); - css_get(&curr->css); + task_memcg = mem_cgroup_from_task(task); + css_get(&task_memcg->css); rcu_read_unlock(); } - /* - * We should check use_hierarchy of "memcg" not "curr". Because checking - * use_hierarchy of "curr" here make this function true if hierarchy is - * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* - * hierarchy(even if use_hierarchy is disabled in "memcg"). - */ - ret = mem_cgroup_same_or_subtree(memcg, curr); - css_put(&curr->css); + ret = mem_cgroup_is_descendant(task_memcg, memcg); + css_put(&task_memcg->css); return ret; } @@ -1467,8 +1444,8 @@ static bool mem_cgroup_under_move(struct mem_cgroup *memcg) if (!from) goto unlock; - ret = mem_cgroup_same_or_subtree(memcg, from) - || mem_cgroup_same_or_subtree(memcg, to); + ret = mem_cgroup_is_descendant(from, memcg) || + mem_cgroup_is_descendant(to, memcg); unlock: spin_unlock(&mc.lock); return ret; @@ -1900,12 +1877,8 @@ static int memcg_oom_wake_function(wait_queue_t *wait, oom_wait_info = container_of(wait, struct oom_wait_info, wait); oom_wait_memcg = oom_wait_info->memcg; - /* - * Both of oom_wait_info->memcg and wake_memcg are stable under us. - * Then we can use css_is_ancestor without taking care of RCU. - */ - if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) - && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) + if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && + !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) return 0; return autoremove_wake_function(wait, mode, sync, arg); } @@ -2225,7 +2198,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) memcg = stock->cached; if (!memcg || !stock->nr_pages) continue; - if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) + if (!mem_cgroup_is_descendant(memcg, root_memcg)) continue; if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { if (cpu == curcpu) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5340f6b91312..3b014d326151 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -119,7 +119,7 @@ found: /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p, - const struct mem_cgroup *memcg, const nodemask_t *nodemask) + struct mem_cgroup *memcg, const nodemask_t *nodemask) { if (is_global_init(p)) return true; @@ -353,7 +353,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, * swapents, oom_score_adj value, and name. */ -static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask) +static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; -- cgit v1.2.3-70-g09d2 From e4bd6a0248b2a026e07c19995c41a4cb5a49d797 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 10 Dec 2014 15:44:39 -0800 Subject: mm, memcg: fix potential undefined behaviour in page stat accounting Since commit d7365e783edb ("mm: memcontrol: fix missed end-writeback page accounting") mem_cgroup_end_page_stat consumes locked and flags variables directly rather than via pointers which might trigger C undefined behavior as those variables are initialized only in the slow path of mem_cgroup_begin_page_stat. Although mem_cgroup_end_page_stat handles parameters correctly and touches them only when they hold a sensible value it is caller which loads a potentially uninitialized value which then might allow compiler to do crazy things. I haven't seen any warning from gcc and it seems that the current version (4.9) doesn't exploit this type undefined behavior but Sasha has reported the following: UBSan: Undefined behaviour in mm/rmap.c:1084:2 load of value 255 is not a valid value for type '_Bool' CPU: 4 PID: 8304 Comm: rngd Not tainted 3.18.0-rc2-next-20141029-sasha-00039-g77ed13d-dirty #1427 Call Trace: dump_stack (lib/dump_stack.c:52) ubsan_epilogue (lib/ubsan.c:159) __ubsan_handle_load_invalid_value (lib/ubsan.c:482) page_remove_rmap (mm/rmap.c:1084 mm/rmap.c:1096) unmap_page_range (./arch/x86/include/asm/atomic.h:27 include/linux/mm.h:463 mm/memory.c:1146 mm/memory.c:1258 mm/memory.c:1279 mm/memory.c:1303) unmap_single_vma (mm/memory.c:1348) unmap_vmas (mm/memory.c:1377 (discriminator 3)) exit_mmap (mm/mmap.c:2837) mmput (kernel/fork.c:659) do_exit (./arch/x86/include/asm/thread_info.h:168 kernel/exit.c:462 kernel/exit.c:747) do_group_exit (include/linux/sched.h:775 kernel/exit.c:873) SyS_exit_group (kernel/exit.c:901) tracesys_phase2 (arch/x86/kernel/entry_64.S:529) Fix this by using pointer parameters for both locked and flags and be more robust for future compiler changes even though the current code is implemented correctly. Signed-off-by: Michal Hocko Reported-by: Sasha Levin Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 6 +++--- mm/memcontrol.c | 8 ++++---- mm/page-writeback.c | 4 ++-- mm/rmap.c | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d4575a1d6e99..de018766be45 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -141,8 +141,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -297,7 +297,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b841bf430179..031ca345677b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2053,11 +2053,11 @@ again: * @locked: value received from mem_cgroup_begin_page_stat() * @flags: value received from mem_cgroup_begin_page_stat() */ -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags) +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags) { - if (memcg && locked) - spin_unlock_irqrestore(&memcg->move_lock, flags); + if (memcg && *locked) + spin_unlock_irqrestore(&memcg->move_lock, *flags); rcu_read_unlock(); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 19ceae87522d..d5d81f5384d1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2357,7 +2357,7 @@ int test_clear_page_writeback(struct page *page) dec_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN); } - mem_cgroup_end_page_stat(memcg, locked, memcg_flags); + mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); return ret; } @@ -2399,7 +2399,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); inc_zone_page_state(page, NR_WRITEBACK); } - mem_cgroup_end_page_stat(memcg, locked, memcg_flags); + mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); return ret; } diff --git a/mm/rmap.c b/mm/rmap.c index 3e4c7213210c..45eba36fd673 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1053,7 +1053,7 @@ void page_add_file_rmap(struct page *page) __inc_zone_page_state(page, NR_FILE_MAPPED); mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); } - mem_cgroup_end_page_stat(memcg, locked, flags); + mem_cgroup_end_page_stat(memcg, &locked, &flags); } static void page_remove_file_rmap(struct page *page) @@ -1083,7 +1083,7 @@ static void page_remove_file_rmap(struct page *page) if (unlikely(PageMlocked(page))) clear_page_mlock(page); out: - mem_cgroup_end_page_stat(memcg, locked, flags); + mem_cgroup_end_page_stat(memcg, &locked, &flags); } /** -- cgit v1.2.3-70-g09d2 From 22811c6bc3c764d8935383ad0ddd7a96b45d75dc Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:50 -0800 Subject: mm: memcontrol: remove stale page_cgroup_lock comment There is no cgroup-specific page lock anymore. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 031ca345677b..78cb3b05a9fa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2467,10 +2467,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, int isolated; VM_BUG_ON_PAGE(pc->mem_cgroup, page); - /* - * we don't need page_cgroup_lock about tail pages, becase they are not - * accessed by any other context at this point. - */ /* * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page -- cgit v1.2.3-70-g09d2 From 1306a85aed3ec3db98945aafb7dfbe5648a1203c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:52 -0800 Subject: mm: embed the memcg pointer directly into struct page Memory cgroups used to have 5 per-page pointers. To allow users to disable that amount of overhead during runtime, those pointers were allocated in a separate array, with a translation layer between them and struct page. There is now only one page pointer remaining: the memcg pointer, that indicates which cgroup the page is associated with when charged. The complexity of runtime allocation and the runtime translation overhead is no longer justified to save that *potential* 0.19% of memory. With CONFIG_SLUB, page->mem_cgroup actually sits in the doubleword padding after the page->private member and doesn't even increase struct page, and then this patch actually saves space. Remaining users that care can still compile their kernels without CONFIG_MEMCG. text data bss dec hex filename 8828345 1725264 983040 11536649 b00909 vmlinux.old 8827425 1725264 966656 11519345 afc571 vmlinux.new [mhocko@suse.cz: update Documentation/cgroups/memory.txt] Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Acked-by: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Michal Hocko Cc: Vladimir Davydov Cc: Tejun Heo Cc: Joonsoo Kim Acked-by: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/memory.txt | 5 + include/linux/memcontrol.h | 6 +- include/linux/mm_types.h | 5 + include/linux/mmzone.h | 12 -- include/linux/page_cgroup.h | 53 ------- init/main.c | 7 - mm/memcontrol.c | 124 +++++---------- mm/page_alloc.c | 2 - mm/page_cgroup.c | 319 --------------------------------------- 9 files changed, 46 insertions(+), 487 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 67613ff0270c..46b2b5080317 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -1,5 +1,10 @@ Memory Resource Controller +NOTE: This document is hopelessly outdated and it asks for a complete + rewrite. It still contains a useful information so we are keeping it + here but make sure to check the current code if you need a deeper + understanding. + NOTE: The Memory Resource Controller has generically been referred to as the memory controller in this document. Do not confuse memory controller used here with the memory controller that is used in hardware. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index de018766be45..c4d080875164 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -466,8 +465,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -484,8 +481,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 004e9d17b47e..bf9f57529dcf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +168,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..3879d7664dfc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -722,9 +722,6 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; -#endif #endif #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; @@ -1078,7 +1075,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1096,14 +1092,6 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG - /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) - */ - struct page_cgroup *page_cgroup; - unsigned long pad; -#endif /* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1289be6b436c..65be35785c86 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -1,59 +1,6 @@ #ifndef __LINUX_PAGE_CGROUP_H #define __LINUX_PAGE_CGROUP_H -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - #include #ifdef CONFIG_MEMCG_SWAP diff --git a/init/main.c b/init/main.c index 321d0ceb26d3..d2e4ead4891f 100644 --- a/init/main.c +++ b/init/main.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -485,11 +484,6 @@ void __init __weak thread_info_cache_init(void) */ static void __init mm_init(void) { - /* - * page_cgroup requires contiguous pages, - * bigger than MAX_ORDER unless SPARSEMEM. - */ - page_cgroup_init_flatmem(); mem_init(); kmem_cache_init(); percpu_init_late(); @@ -627,7 +621,6 @@ asmlinkage __visible void __init start_kernel(void) initrd_start = 0; } #endif - page_cgroup_init(); debug_objects_mem_init(); kmemleak_init(); setup_per_cpu_pageset(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 78cb3b05a9fa..b864067791dc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1274,7 +1274,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) { struct mem_cgroup_per_zone *mz; struct mem_cgroup *memcg; - struct page_cgroup *pc; struct lruvec *lruvec; if (mem_cgroup_disabled()) { @@ -1282,8 +1281,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) goto out; } - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; /* * Swapcache readahead pages are added to the LRU - and * possibly migrated - before they are charged. @@ -2020,16 +2018,13 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, unsigned long *flags) { struct mem_cgroup *memcg; - struct page_cgroup *pc; rcu_read_lock(); if (mem_cgroup_disabled()) return NULL; - - pc = lookup_page_cgroup(page); again: - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; if (unlikely(!memcg)) return NULL; @@ -2038,7 +2033,7 @@ again: return memcg; spin_lock_irqsave(&memcg->move_lock, *flags); - if (memcg != pc->mem_cgroup) { + if (memcg != page->mem_cgroup) { spin_unlock_irqrestore(&memcg->move_lock, *flags); goto again; } @@ -2405,15 +2400,12 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { struct mem_cgroup *memcg; - struct page_cgroup *pc; unsigned short id; swp_entry_t ent; VM_BUG_ON_PAGE(!PageLocked(page), page); - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; - + memcg = page->mem_cgroup; if (memcg) { if (!css_tryget_online(&memcg->css)) memcg = NULL; @@ -2463,10 +2455,9 @@ static void unlock_page_lru(struct page *page, int isolated) static void commit_charge(struct page *page, struct mem_cgroup *memcg, bool lrucare) { - struct page_cgroup *pc = lookup_page_cgroup(page); int isolated; - VM_BUG_ON_PAGE(pc->mem_cgroup, page); + VM_BUG_ON_PAGE(page->mem_cgroup, page); /* * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page @@ -2477,7 +2468,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup at this point: + * page->mem_cgroup at this point: * * - the page is uncharged * @@ -2489,7 +2480,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * - a page cache insertion, a swapin fault, or a migration * have the page locked */ - pc->mem_cgroup = memcg; + page->mem_cgroup = memcg; if (lrucare) unlock_page_lru(page, isolated); @@ -2972,8 +2963,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) { - struct page_cgroup *pc; - VM_BUG_ON(mem_cgroup_is_root(memcg)); /* The page allocation failed. Revert */ @@ -2981,14 +2970,12 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg_uncharge_kmem(memcg, 1 << order); return; } - pc = lookup_page_cgroup(page); - pc->mem_cgroup = memcg; + page->mem_cgroup = memcg; } void __memcg_kmem_uncharge_pages(struct page *page, int order) { - struct page_cgroup *pc = lookup_page_cgroup(page); - struct mem_cgroup *memcg = pc->mem_cgroup; + struct mem_cgroup *memcg = page->mem_cgroup; if (!memcg) return; @@ -2996,7 +2983,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); memcg_uncharge_kmem(memcg, 1 << order); - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3014,16 +3001,15 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *pc = lookup_page_cgroup(head); int i; if (mem_cgroup_disabled()) return; for (i = 1; i < HPAGE_PMD_NR; i++) - pc[i].mem_cgroup = pc[0].mem_cgroup; + head[i].mem_cgroup = head->mem_cgroup; - __this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], HPAGE_PMD_NR); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -3032,7 +3018,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) * mem_cgroup_move_account - move account of the page * @page: the page * @nr_pages: number of regular pages (>1 for huge pages) - * @pc: page_cgroup of the page. * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. * @@ -3045,7 +3030,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) */ static int mem_cgroup_move_account(struct page *page, unsigned int nr_pages, - struct page_cgroup *pc, struct mem_cgroup *from, struct mem_cgroup *to) { @@ -3065,7 +3049,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; /* - * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup + * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup * of its source page while we change it: page migration takes * both pages off the LRU, but page cache replacement doesn't. */ @@ -3073,7 +3057,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; ret = -EINVAL; - if (pc->mem_cgroup != from) + if (page->mem_cgroup != from) goto out_unlock; spin_lock_irqsave(&from->move_lock, flags); @@ -3093,13 +3077,13 @@ static int mem_cgroup_move_account(struct page *page, } /* - * It is safe to change pc->mem_cgroup here because the page + * It is safe to change page->mem_cgroup here because the page * is referenced, charged, and isolated - we can't race with * uncharging, charging, migration, or LRU putback. */ /* caller should have done css_get */ - pc->mem_cgroup = to; + page->mem_cgroup = to; spin_unlock_irqrestore(&from->move_lock, flags); ret = 0; @@ -3174,36 +3158,17 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, #endif #ifdef CONFIG_DEBUG_VM -static struct page_cgroup *lookup_page_cgroup_used(struct page *page) -{ - struct page_cgroup *pc; - - pc = lookup_page_cgroup(page); - /* - * Can be NULL while feeding pages into the page allocator for - * the first time, i.e. during boot or memory hotplug; - * or when mem_cgroup_disabled(). - */ - if (likely(pc) && pc->mem_cgroup) - return pc; - return NULL; -} - bool mem_cgroup_bad_page_check(struct page *page) { if (mem_cgroup_disabled()) return false; - return lookup_page_cgroup_used(page) != NULL; + return page->mem_cgroup != NULL; } void mem_cgroup_print_bad_page(struct page *page) { - struct page_cgroup *pc; - - pc = lookup_page_cgroup_used(page); - if (pc) - pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup); + pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); } #endif @@ -5123,7 +5088,6 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) { struct page *page = NULL; - struct page_cgroup *pc; enum mc_target_type ret = MC_TARGET_NONE; swp_entry_t ent = { .val = 0 }; @@ -5137,13 +5101,12 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, if (!page && !ent.val) return ret; if (page) { - pc = lookup_page_cgroup(page); /* * Do only loose check w/o serialization. - * mem_cgroup_move_account() checks the pc is valid or + * mem_cgroup_move_account() checks the page is valid or * not under LRU exclusion. */ - if (pc->mem_cgroup == mc.from) { + if (page->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) target->page = page; @@ -5171,15 +5134,13 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, union mc_target *target) { struct page *page = NULL; - struct page_cgroup *pc; enum mc_target_type ret = MC_TARGET_NONE; page = pmd_page(pmd); VM_BUG_ON_PAGE(!page || !PageHead(page), page); if (!move_anon()) return ret; - pc = lookup_page_cgroup(page); - if (pc->mem_cgroup == mc.from) { + if (page->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) { get_page(page); @@ -5378,7 +5339,6 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, enum mc_target_type target_type; union mc_target target; struct page *page; - struct page_cgroup *pc; /* * We don't take compound_lock() here but no race with splitting thp @@ -5399,9 +5359,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, if (target_type == MC_TARGET_PAGE) { page = target.page; if (!isolate_lru_page(page)) { - pc = lookup_page_cgroup(page); if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, - pc, mc.from, mc.to)) { + mc.from, mc.to)) { mc.precharge -= HPAGE_PMD_NR; mc.moved_charge += HPAGE_PMD_NR; } @@ -5429,9 +5388,7 @@ retry: page = target.page; if (isolate_lru_page(page)) goto put; - pc = lookup_page_cgroup(page); - if (!mem_cgroup_move_account(page, 1, pc, - mc.from, mc.to)) { + if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { mc.precharge--; /* we uncharge from mc.from later. */ mc.moved_charge++; @@ -5619,7 +5576,6 @@ static void __init enable_swap_cgroup(void) void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { struct mem_cgroup *memcg; - struct page_cgroup *pc; unsigned short oldid; VM_BUG_ON_PAGE(PageLRU(page), page); @@ -5628,8 +5584,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!do_swap_account) return; - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; /* Readahead page, never charged */ if (!memcg) @@ -5639,7 +5594,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); @@ -5706,7 +5661,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, goto out; if (PageSwapCache(page)) { - struct page_cgroup *pc = lookup_page_cgroup(page); /* * Every swap fault against a single page tries to charge the * page, bail as early as possible. shmem_unuse() encounters @@ -5714,7 +5668,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, * the page lock, which serializes swap cache removal, which * in turn serializes uncharging. */ - if (pc->mem_cgroup) + if (page->mem_cgroup) goto out; } @@ -5867,7 +5821,6 @@ static void uncharge_list(struct list_head *page_list) next = page_list->next; do { unsigned int nr_pages = 1; - struct page_cgroup *pc; page = list_entry(next, struct page, lru); next = page->lru.next; @@ -5875,23 +5828,22 @@ static void uncharge_list(struct list_head *page_list) VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); - pc = lookup_page_cgroup(page); - if (!pc->mem_cgroup) + if (!page->mem_cgroup) continue; /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup at this point, we have fully + * page->mem_cgroup at this point, we have fully * exclusive access to the page. */ - if (memcg != pc->mem_cgroup) { + if (memcg != page->mem_cgroup) { if (memcg) { uncharge_batch(memcg, pgpgout, nr_anon, nr_file, nr_huge, page); pgpgout = nr_anon = nr_file = nr_huge = 0; } - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; } if (PageTransHuge(page)) { @@ -5905,7 +5857,7 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; pgpgout++; } while (next != page_list); @@ -5924,14 +5876,11 @@ static void uncharge_list(struct list_head *page_list) */ void mem_cgroup_uncharge(struct page *page) { - struct page_cgroup *pc; - if (mem_cgroup_disabled()) return; /* Don't touch page->lru of any random page, pre-check: */ - pc = lookup_page_cgroup(page); - if (!pc->mem_cgroup) + if (!page->mem_cgroup) return; INIT_LIST_HEAD(&page->lru); @@ -5968,7 +5917,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare) { struct mem_cgroup *memcg; - struct page_cgroup *pc; int isolated; VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); @@ -5983,8 +5931,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, return; /* Page cache replacement: new page already charged? */ - pc = lookup_page_cgroup(newpage); - if (pc->mem_cgroup) + if (newpage->mem_cgroup) return; /* @@ -5993,15 +5940,14 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, * uncharged page when the PFN walker finds a page that * reclaim just put back on the LRU but has not released yet. */ - pc = lookup_page_cgroup(oldpage); - memcg = pc->mem_cgroup; + memcg = oldpage->mem_cgroup; if (!memcg) return; if (lrucare) lock_page_lru(oldpage, &isolated); - pc->mem_cgroup = NULL; + oldpage->mem_cgroup = NULL; if (lrucare) unlock_page_lru(oldpage, isolated); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97b6966816e5..22cfdeffbf69 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -4853,7 +4852,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, #endif init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); - pgdat_page_cgroup_init(pgdat); for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 5331c2bd85a2..f0f31c1d4d0c 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -1,326 +1,7 @@ #include -#include -#include -#include #include -#include -#include -#include #include -#include #include -#include - -static unsigned long total_usage; - -#if !defined(CONFIG_SPARSEMEM) - - -void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ - pgdat->node_page_cgroup = NULL; -} - -struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - unsigned long offset; - struct page_cgroup *base; - - base = NODE_DATA(page_to_nid(page))->node_page_cgroup; -#ifdef CONFIG_DEBUG_VM - /* - * The sanity checks the page allocator does upon freeing a - * page can reach here before the page_cgroup arrays are - * allocated when feeding a range of pages to the allocator - * for the first time during bootup or memory hotplug. - */ - if (unlikely(!base)) - return NULL; -#endif - offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; - return base + offset; -} - -static int __init alloc_node_page_cgroup(int nid) -{ - struct page_cgroup *base; - unsigned long table_size; - unsigned long nr_pages; - - nr_pages = NODE_DATA(nid)->node_spanned_pages; - if (!nr_pages) - return 0; - - table_size = sizeof(struct page_cgroup) * nr_pages; - - base = memblock_virt_alloc_try_nid_nopanic( - table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - BOOTMEM_ALLOC_ACCESSIBLE, nid); - if (!base) - return -ENOMEM; - NODE_DATA(nid)->node_page_cgroup = base; - total_usage += table_size; - return 0; -} - -void __init page_cgroup_init_flatmem(void) -{ - - int nid, fail; - - if (mem_cgroup_disabled()) - return; - - for_each_online_node(nid) { - fail = alloc_node_page_cgroup(nid); - if (fail) - goto fail; - } - printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" - " don't want memory cgroups\n"); - return; -fail: - printk(KERN_CRIT "allocation of page_cgroup failed.\n"); - printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); - panic("Out of memory"); -} - -#else /* CONFIG_FLAT_NODE_MEM_MAP */ - -struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - struct mem_section *section = __pfn_to_section(pfn); -#ifdef CONFIG_DEBUG_VM - /* - * The sanity checks the page allocator does upon freeing a - * page can reach here before the page_cgroup arrays are - * allocated when feeding a range of pages to the allocator - * for the first time during bootup or memory hotplug. - */ - if (!section->page_cgroup) - return NULL; -#endif - return section->page_cgroup + pfn; -} - -static void *__meminit alloc_page_cgroup(size_t size, int nid) -{ - gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; - void *addr = NULL; - - addr = alloc_pages_exact_nid(nid, size, flags); - if (addr) { - kmemleak_alloc(addr, size, 1, flags); - return addr; - } - - if (node_state(nid, N_HIGH_MEMORY)) - addr = vzalloc_node(size, nid); - else - addr = vzalloc(size); - - return addr; -} - -static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) -{ - struct mem_section *section; - struct page_cgroup *base; - unsigned long table_size; - - section = __pfn_to_section(pfn); - - if (section->page_cgroup) - return 0; - - table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; - base = alloc_page_cgroup(table_size, nid); - - /* - * The value stored in section->page_cgroup is (base - pfn) - * and it does not point to the memory block allocated above, - * causing kmemleak false positives. - */ - kmemleak_not_leak(base); - - if (!base) { - printk(KERN_ERR "page cgroup allocation failure\n"); - return -ENOMEM; - } - - /* - * The passed "pfn" may not be aligned to SECTION. For the calculation - * we need to apply a mask. - */ - pfn &= PAGE_SECTION_MASK; - section->page_cgroup = base - pfn; - total_usage += table_size; - return 0; -} -#ifdef CONFIG_MEMORY_HOTPLUG -static void free_page_cgroup(void *addr) -{ - if (is_vmalloc_addr(addr)) { - vfree(addr); - } else { - struct page *page = virt_to_page(addr); - size_t table_size = - sizeof(struct page_cgroup) * PAGES_PER_SECTION; - - BUG_ON(PageReserved(page)); - kmemleak_free(addr); - free_pages_exact(addr, table_size); - } -} - -static void __free_page_cgroup(unsigned long pfn) -{ - struct mem_section *ms; - struct page_cgroup *base; - - ms = __pfn_to_section(pfn); - if (!ms || !ms->page_cgroup) - return; - base = ms->page_cgroup + pfn; - free_page_cgroup(base); - ms->page_cgroup = NULL; -} - -static int __meminit online_page_cgroup(unsigned long start_pfn, - unsigned long nr_pages, - int nid) -{ - unsigned long start, end, pfn; - int fail = 0; - - start = SECTION_ALIGN_DOWN(start_pfn); - end = SECTION_ALIGN_UP(start_pfn + nr_pages); - - if (nid == -1) { - /* - * In this case, "nid" already exists and contains valid memory. - * "start_pfn" passed to us is a pfn which is an arg for - * online__pages(), and start_pfn should exist. - */ - nid = pfn_to_nid(start_pfn); - VM_BUG_ON(!node_state(nid, N_ONLINE)); - } - - for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { - if (!pfn_present(pfn)) - continue; - fail = init_section_page_cgroup(pfn, nid); - } - if (!fail) - return 0; - - /* rollback */ - for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) - __free_page_cgroup(pfn); - - return -ENOMEM; -} - -static int __meminit offline_page_cgroup(unsigned long start_pfn, - unsigned long nr_pages, int nid) -{ - unsigned long start, end, pfn; - - start = SECTION_ALIGN_DOWN(start_pfn); - end = SECTION_ALIGN_UP(start_pfn + nr_pages); - - for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) - __free_page_cgroup(pfn); - return 0; - -} - -static int __meminit page_cgroup_callback(struct notifier_block *self, - unsigned long action, void *arg) -{ - struct memory_notify *mn = arg; - int ret = 0; - switch (action) { - case MEM_GOING_ONLINE: - ret = online_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_OFFLINE: - offline_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_CANCEL_ONLINE: - offline_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_GOING_OFFLINE: - break; - case MEM_ONLINE: - case MEM_CANCEL_OFFLINE: - break; - } - - return notifier_from_errno(ret); -} - -#endif - -void __init page_cgroup_init(void) -{ - unsigned long pfn; - int nid; - - if (mem_cgroup_disabled()) - return; - - for_each_node_state(nid, N_MEMORY) { - unsigned long start_pfn, end_pfn; - - start_pfn = node_start_pfn(nid); - end_pfn = node_end_pfn(nid); - /* - * start_pfn and end_pfn may not be aligned to SECTION and the - * page->flags of out of node pages are not initialized. So we - * scan [start_pfn, the biggest section's pfn < end_pfn) here. - */ - for (pfn = start_pfn; - pfn < end_pfn; - pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { - - if (!pfn_valid(pfn)) - continue; - /* - * Nodes's pfns can be overlapping. - * We know some arch can have a nodes layout such as - * -------------pfn--------------> - * N0 | N1 | N2 | N0 | N1 | N2|.... - */ - if (pfn_to_nid(pfn) != nid) - continue; - if (init_section_page_cgroup(pfn, nid)) - goto oom; - } - } - hotplug_memory_notifier(page_cgroup_callback, 0); - printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " - "don't want memory cgroups\n"); - return; -oom: - printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); - panic("Out of memory"); -} - -void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ - return; -} - -#endif - #ifdef CONFIG_MEMCG_SWAP -- cgit v1.2.3-70-g09d2 From 5d1ea48bdde67898e87d6d8f511fd097fa64c749 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:55 -0800 Subject: mm: page_cgroup: rename file to mm/swap_cgroup.c Now that the external page_cgroup data structure and its lookup is gone, the only code remaining in there is swap slot accounting. Rename it and move the conditional compilation into mm/Makefile. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Acked-by: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Tejun Heo Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 2 +- include/linux/page_cgroup.h | 40 --------- include/linux/swap_cgroup.h | 42 +++++++++ mm/Makefile | 3 +- mm/memcontrol.c | 2 +- mm/page_cgroup.c | 211 -------------------------------------------- mm/swap_cgroup.c | 208 +++++++++++++++++++++++++++++++++++++++++++ mm/swap_state.c | 1 - mm/swapfile.c | 2 +- 9 files changed, 255 insertions(+), 256 deletions(-) delete mode 100644 include/linux/page_cgroup.h create mode 100644 include/linux/swap_cgroup.h delete mode 100644 mm/page_cgroup.c create mode 100644 mm/swap_cgroup.c (limited to 'mm/memcontrol.c') diff --git a/MAINTAINERS b/MAINTAINERS index 0d6469a2cf70..0aedd3e1804b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2606,7 +2606,7 @@ L: cgroups@vger.kernel.org L: linux-mm@kvack.org S: Maintained F: mm/memcontrol.c -F: mm/page_cgroup.c +F: mm/swap_cgroup.c CORETEMP HARDWARE MONITORING DRIVER M: Fenghua Yu diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 65be35785c86..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -#include - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/mm/Makefile b/mm/Makefile index 6d9f40e922f7..b3c6ce932c64 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -56,7 +56,8 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b864067791dc..ab270e34ba3e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c deleted file mode 100644 index f0f31c1d4d0c..000000000000 --- a/mm/page_cgroup.c +++ /dev/null @@ -1,211 +0,0 @@ -#include -#include -#include -#include - -#ifdef CONFIG_MEMCG_SWAP - -static DEFINE_MUTEX(swap_cgroup_mutex); -struct swap_cgroup_ctrl { - struct page **map; - unsigned long length; - spinlock_t lock; -}; - -static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; - -struct swap_cgroup { - unsigned short id; -}; -#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) - -/* - * SwapCgroup implements "lookup" and "exchange" operations. - * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge - * against SwapCache. At swap_free(), this is accessed directly from swap. - * - * This means, - * - we have no race in "exchange" when we're accessed via SwapCache because - * SwapCache(and its swp_entry) is under lock. - * - When called via swap_free(), there is no user of this entry and no race. - * Then, we don't need lock around "exchange". - * - * TODO: we can push these buffers out to HIGHMEM. - */ - -/* - * allocate buffer for swap_cgroup. - */ -static int swap_cgroup_prepare(int type) -{ - struct page *page; - struct swap_cgroup_ctrl *ctrl; - unsigned long idx, max; - - ctrl = &swap_cgroup_ctrl[type]; - - for (idx = 0; idx < ctrl->length; idx++) { - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto not_enough_page; - ctrl->map[idx] = page; - } - return 0; -not_enough_page: - max = idx; - for (idx = 0; idx < max; idx++) - __free_page(ctrl->map[idx]); - - return -ENOMEM; -} - -static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, - struct swap_cgroup_ctrl **ctrlp) -{ - pgoff_t offset = swp_offset(ent); - struct swap_cgroup_ctrl *ctrl; - struct page *mappage; - struct swap_cgroup *sc; - - ctrl = &swap_cgroup_ctrl[swp_type(ent)]; - if (ctrlp) - *ctrlp = ctrl; - - mappage = ctrl->map[offset / SC_PER_PAGE]; - sc = page_address(mappage); - return sc + offset % SC_PER_PAGE; -} - -/** - * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. - * @ent: swap entry to be cmpxchged - * @old: old id - * @new: new id - * - * Returns old id at success, 0 at failure. - * (There is no mem_cgroup using 0 as its id) - */ -unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned long flags; - unsigned short retval; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - retval = sc->id; - if (retval == old) - sc->id = new; - else - retval = 0; - spin_unlock_irqrestore(&ctrl->lock, flags); - return retval; -} - -/** - * swap_cgroup_record - record mem_cgroup for this swp_entry. - * @ent: swap entry to be recorded into - * @id: mem_cgroup to be recorded - * - * Returns old value at success, 0 at failure. - * (Of course, old value can be 0.) - */ -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned short old; - unsigned long flags; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - old = sc->id; - sc->id = id; - spin_unlock_irqrestore(&ctrl->lock, flags); - - return old; -} - -/** - * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry - * @ent: swap entry to be looked up. - * - * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) - */ -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return lookup_swap_cgroup(ent, NULL)->id; -} - -int swap_cgroup_swapon(int type, unsigned long max_pages) -{ - void *array; - unsigned long array_size; - unsigned long length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return 0; - - length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); - array_size = length * sizeof(void *); - - array = vzalloc(array_size); - if (!array) - goto nomem; - - ctrl = &swap_cgroup_ctrl[type]; - mutex_lock(&swap_cgroup_mutex); - ctrl->length = length; - ctrl->map = array; - spin_lock_init(&ctrl->lock); - if (swap_cgroup_prepare(type)) { - /* memory shortage */ - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - vfree(array); - goto nomem; - } - mutex_unlock(&swap_cgroup_mutex); - - return 0; -nomem: - printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); - printk(KERN_INFO - "swap_cgroup can be disabled by swapaccount=0 boot option\n"); - return -ENOMEM; -} - -void swap_cgroup_swapoff(int type) -{ - struct page **map; - unsigned long i, length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return; - - mutex_lock(&swap_cgroup_mutex); - ctrl = &swap_cgroup_ctrl[type]; - map = ctrl->map; - length = ctrl->length; - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - - if (map) { - for (i = 0; i < length; i++) { - struct page *page = map[i]; - if (page) - __free_page(page); - } - vfree(map); - } -} - -#endif diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c new file mode 100644 index 000000000000..b5f7f24b8dd1 --- /dev/null +++ b/mm/swap_cgroup.c @@ -0,0 +1,208 @@ +#include +#include +#include + +#include /* depends on mm.h include */ + +static DEFINE_MUTEX(swap_cgroup_mutex); +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; + spinlock_t lock; +}; + +static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; + +struct swap_cgroup { + unsigned short id; +}; +#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) + +/* + * SwapCgroup implements "lookup" and "exchange" operations. + * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge + * against SwapCache. At swap_free(), this is accessed directly from swap. + * + * This means, + * - we have no race in "exchange" when we're accessed via SwapCache because + * SwapCache(and its swp_entry) is under lock. + * - When called via swap_free(), there is no user of this entry and no race. + * Then, we don't need lock around "exchange". + * + * TODO: we can push these buffers out to HIGHMEM. + */ + +/* + * allocate buffer for swap_cgroup. + */ +static int swap_cgroup_prepare(int type) +{ + struct page *page; + struct swap_cgroup_ctrl *ctrl; + unsigned long idx, max; + + ctrl = &swap_cgroup_ctrl[type]; + + for (idx = 0; idx < ctrl->length; idx++) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto not_enough_page; + ctrl->map[idx] = page; + } + return 0; +not_enough_page: + max = idx; + for (idx = 0; idx < max; idx++) + __free_page(ctrl->map[idx]); + + return -ENOMEM; +} + +static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, + struct swap_cgroup_ctrl **ctrlp) +{ + pgoff_t offset = swp_offset(ent); + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + + ctrl = &swap_cgroup_ctrl[swp_type(ent)]; + if (ctrlp) + *ctrlp = ctrl; + + mappage = ctrl->map[offset / SC_PER_PAGE]; + sc = page_address(mappage); + return sc + offset % SC_PER_PAGE; +} + +/** + * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. + * @ent: swap entry to be cmpxchged + * @old: old id + * @new: new id + * + * Returns old id at success, 0 at failure. + * (There is no mem_cgroup using 0 as its id) + */ +unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned long flags; + unsigned short retval; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + retval = sc->id; + if (retval == old) + sc->id = new; + else + retval = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + return retval; +} + +/** + * swap_cgroup_record - record mem_cgroup for this swp_entry. + * @ent: swap entry to be recorded into + * @id: mem_cgroup to be recorded + * + * Returns old value at success, 0 at failure. + * (Of course, old value can be 0.) + */ +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned short old; + unsigned long flags; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + old = sc->id; + sc->id = id; + spin_unlock_irqrestore(&ctrl->lock, flags); + + return old; +} + +/** + * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry + * @ent: swap entry to be looked up. + * + * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) + */ +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return lookup_swap_cgroup(ent, NULL)->id; +} + +int swap_cgroup_swapon(int type, unsigned long max_pages) +{ + void *array; + unsigned long array_size; + unsigned long length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return 0; + + length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); + array_size = length * sizeof(void *); + + array = vzalloc(array_size); + if (!array) + goto nomem; + + ctrl = &swap_cgroup_ctrl[type]; + mutex_lock(&swap_cgroup_mutex); + ctrl->length = length; + ctrl->map = array; + spin_lock_init(&ctrl->lock); + if (swap_cgroup_prepare(type)) { + /* memory shortage */ + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + vfree(array); + goto nomem; + } + mutex_unlock(&swap_cgroup_mutex); + + return 0; +nomem: + printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); + printk(KERN_INFO + "swap_cgroup can be disabled by swapaccount=0 boot option\n"); + return -ENOMEM; +} + +void swap_cgroup_swapoff(int type) +{ + struct page **map; + unsigned long i, length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return; + + mutex_lock(&swap_cgroup_mutex); + ctrl = &swap_cgroup_ctrl[type]; + map = ctrl->map; + length = ctrl->length; + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + + if (map) { + for (i = 0; i < length; i++) { + struct page *page = map[i]; + if (page) + __free_page(page); + } + vfree(map); + } +} diff --git a/mm/swap_state.c b/mm/swap_state.c index 154444918685..9711342987a0 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -17,7 +17,6 @@ #include #include #include -#include #include diff --git a/mm/swapfile.c b/mm/swapfile.c index 8798b2e0ac59..63f55ccb9b26 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); -- cgit v1.2.3-70-g09d2 From 9edad6ea0f1416415f6fe31cc9d1dbc3817803ed Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:58 -0800 Subject: mm: move page->mem_cgroup bad page handling into generic code Now that the external page_cgroup data structure and its lookup is gone, let the generic bad_page() check for page->mem_cgroup sanity. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Cc: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Tejun Heo Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 17 ----------------- init/Kconfig | 12 ------------ mm/debug.c | 5 ++++- mm/memcontrol.c | 15 --------------- mm/page_alloc.c | 12 ++++++++---- 5 files changed, 12 insertions(+), 49 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c4d080875164..6ea9f919e888 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -173,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -346,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, diff --git a/init/Kconfig b/init/Kconfig index 46768752130d..7e9fbd48e2ab 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -983,18 +983,6 @@ config MEMCG Provides a memory resource controller that manages both anonymous memory and page cache. (See Documentation/cgroups/memory.txt) - Note that setting this option increases fixed memory overhead - associated with each page of memory in the system. By this, - 8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory - usage tracking struct at boot. Total amount of this is printed out - at boot. - - Only enable when you're ok with these trade offs and really - sure you need the memory resource controller. Even when you enable - this, you can set "cgroup_disable=memory" at your boot option to - disable memory resource controller and you can avoid overheads. - (and lose benefits of memory resource controller) - config MEMCG_SWAP bool "Memory Resource Controller Swap Extension" depends on MEMCG && SWAP diff --git a/mm/debug.c b/mm/debug.c index 5ce45c9a29b5..0e58f3211f89 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -95,7 +95,10 @@ void dump_page_badflags(struct page *page, const char *reason, dump_flags(page->flags & badflags, pageflag_names, ARRAY_SIZE(pageflag_names)); } - mem_cgroup_print_bad_page(page); +#ifdef CONFIG_MEMCG + if (page->mem_cgroup) + pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); +#endif } void dump_page(struct page *page, const char *reason) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab270e34ba3e..1869cb64d089 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3157,21 +3157,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, } #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page) -{ - if (mem_cgroup_disabled()) - return false; - - return page->mem_cgroup != NULL; -} - -void mem_cgroup_print_bad_page(struct page *page) -{ - pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); -} -#endif - static DEFINE_MUTEX(memcg_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 22cfdeffbf69..a7198c065999 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -640,8 +640,10 @@ static inline int free_pages_check(struct page *page) bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; bad_flags = PAGE_FLAGS_CHECK_AT_FREE; } - if (unlikely(mem_cgroup_bad_page_check(page))) - bad_reason = "cgroup check failed"; +#ifdef CONFIG_MEMCG + if (unlikely(page->mem_cgroup)) + bad_reason = "page still charged to cgroup"; +#endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; @@ -900,8 +902,10 @@ static inline int check_new_page(struct page *page) bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; } - if (unlikely(mem_cgroup_bad_page_check(page))) - bad_reason = "cgroup check failed"; +#ifdef CONFIG_MEMCG + if (unlikely(page->mem_cgroup)) + bad_reason = "page still charged to cgroup"; +#endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; -- cgit v1.2.3-70-g09d2