From b38a872596dad80bd77d98f5fdbc58cc8f438dbb Mon Sep 17 00:00:00 2001 From: Xishi Qiu Date: Tue, 12 Nov 2013 15:07:20 -0800 Subject: mm: use populated_zone() instead of if(zone->present_pages) Use "if (zone->present_pages)" instead of "if (zone->present_pages)". Simplify the code, no functional change. Signed-off-by: Xishi Qiu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 73d812f16dde..3d1d75a6629f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4266,7 +4266,7 @@ static __meminit void zone_pcp_init(struct zone *zone) */ zone->pageset = &boot_pageset; - if (zone->present_pages) + if (populated_zone(zone)) printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", zone->name, zone->present_pages, zone_batchsize(zone)); @@ -5160,7 +5160,7 @@ static void check_for_memory(pg_data_t *pgdat, int nid) for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { struct zone *zone = &pgdat->node_zones[zone_type]; - if (zone->present_pages) { + if (populated_zone(zone)) { node_set_state(nid, N_HIGH_MEMORY); if (N_NORMAL_MEMORY != N_HIGH_MEMORY && zone_type <= ZONE_NORMAL) -- cgit v1.2.3-70-g09d2 From b9921ecdee66984b00c38c00a358ef3f611d2b50 Mon Sep 17 00:00:00 2001 From: Qiang Huang Date: Tue, 12 Nov 2013 15:07:22 -0800 Subject: mm: add a helper function to check may oom condition Use helper function to check if we need to deal with oom condition. Signed-off-by: Qiang Huang Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/oom.h | 5 +++++ mm/memcontrol.c | 9 +-------- mm/page_alloc.c | 2 +- 3 files changed, 7 insertions(+), 9 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/linux/oom.h b/include/linux/oom.h index da60007075b5..4cd62677feb9 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -82,6 +82,11 @@ static inline void oom_killer_enable(void) oom_killer_disabled = false; } +static inline bool oom_gfp_allowed(gfp_t gfp_mask) +{ + return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY); +} + extern struct task_struct *find_lock_task_mm(struct task_struct *p); /* sysctls */ diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 13b9d0f221b8..3427de9897a5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2984,21 +2984,14 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) struct res_counter *fail_res; struct mem_cgroup *_memcg; int ret = 0; - bool may_oom; ret = res_counter_charge(&memcg->kmem, size, &fail_res); if (ret) return ret; - /* - * Conditions under which we can wait for the oom_killer. Those are - * the same conditions tested by the core page allocator - */ - may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY); - _memcg = memcg; ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT, - &_memcg, may_oom); + &_memcg, oom_gfp_allowed(gfp)); if (ret == -EINTR) { /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3d1d75a6629f..e0412c026e0d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2593,7 +2593,7 @@ rebalance: * running out of options and have to consider going OOM */ if (!did_some_progress) { - if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { + if (oom_gfp_allowed(gfp_mask)) { if (oom_killer_disabled) goto nopage; /* Coredumps can quickly deplete all memory reserves */ -- cgit v1.2.3-70-g09d2 From bfc4f9d5206739d0612782508a39e93b0793609d Mon Sep 17 00:00:00 2001 From: Zhang Yanfei Date: Tue, 12 Nov 2013 15:07:48 -0800 Subject: mm/page_alloc.c: remove unused marco LONG_ALIGN Signed-off-by: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e0412c026e0d..770dbb43465f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3881,8 +3881,6 @@ static inline unsigned long wait_table_bits(unsigned long size) return ffz(~size); } -#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) - /* * Check if a pageblock contains reserved pages */ -- cgit v1.2.3-70-g09d2 From 5d0f3f72efb1c1968ce1f56c58f9e3e6495effa6 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 12 Nov 2013 15:08:18 -0800 Subject: mm: fix page_group_by_mobility_disabled breakage Currently, set_pageblock_migratetype() screws up MIGRATE_CMA and MIGRATE_ISOLATE if page_group_by_mobility_disabled is true. It rewrites the argument to MIGRATE_UNMOVABLE and we lost these attribute. The problem was introduced by commit 49255c619fbd ("page allocator: move check for disabled anti-fragmentation out of fastpath"). So a 4 year old issue may mean that nobody uses page_group_by_mobility_disabled. But anyway, this patch fixes the problem. Signed-off-by: KOSAKI Motohiro Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 770dbb43465f..5a9883614d99 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -234,8 +234,8 @@ int page_group_by_mobility_disabled __read_mostly; void set_pageblock_migratetype(struct page *page, int migratetype) { - - if (unlikely(page_group_by_mobility_disabled)) + if (unlikely(page_group_by_mobility_disabled && + migratetype < MIGRATE_PCPTYPES)) migratetype = MIGRATE_UNMOVABLE; set_pageblock_flags_group(page, (unsigned long)migratetype, -- cgit v1.2.3-70-g09d2 From 52c8f6a5aeb0bdd396849ecaa72d96f8175528f5 Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 12 Nov 2013 15:08:19 -0800 Subject: mm: get rid of unnecessary overhead of trace_mm_page_alloc_extfrag() In general, every tracepoint should be zero overhead if it is disabled. However, trace_mm_page_alloc_extfrag() is one of exception. It evaluate "new_type == start_migratetype" even if tracepoint is disabled. However, the code can be moved into tracepoint's TP_fast_assign() and TP_fast_assign exist exactly such purpose. This patch does it. Signed-off-by: KOSAKI Motohiro Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/kmem.h | 10 ++++------ mm/page_alloc.c | 5 ++--- 2 files changed, 6 insertions(+), 9 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index d0c613476620..aece1346ceb7 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -267,14 +267,12 @@ DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain, TRACE_EVENT(mm_page_alloc_extfrag, TP_PROTO(struct page *page, - int alloc_order, int fallback_order, - int alloc_migratetype, int fallback_migratetype, - int change_ownership), + int alloc_order, int fallback_order, + int alloc_migratetype, int fallback_migratetype, int new_migratetype), TP_ARGS(page, alloc_order, fallback_order, - alloc_migratetype, fallback_migratetype, - change_ownership), + alloc_migratetype, fallback_migratetype, new_migratetype), TP_STRUCT__entry( __field( struct page *, page ) @@ -291,7 +289,7 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->fallback_order = fallback_order; __entry->alloc_migratetype = alloc_migratetype; __entry->fallback_migratetype = fallback_migratetype; - __entry->change_ownership = change_ownership; + __entry->change_ownership = (new_migratetype == alloc_migratetype); ), TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5a9883614d99..442f1298f9a7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1103,9 +1103,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) is_migrate_cma(migratetype) ? migratetype : start_migratetype); - trace_mm_page_alloc_extfrag(page, order, - current_order, start_migratetype, migratetype, - new_type == start_migratetype); + trace_mm_page_alloc_extfrag(page, order, current_order, + start_migratetype, migratetype, new_type); return page; } -- cgit v1.2.3-70-g09d2 From 0cbef29a782162a3896487901eca4550bfa397ef Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Tue, 12 Nov 2013 15:08:20 -0800 Subject: mm: __rmqueue_fallback() should respect pageblock type When __rmqueue_fallback() doesn't find a free block with the required size it splits a larger page and puts the rest of the page onto the free list. But it has one serious mistake. When putting back, __rmqueue_fallback() always use start_migratetype if type is not CMA. However, __rmqueue_fallback() is only called when all of the start_migratetype queue is empty. That said, __rmqueue_fallback always puts back memory to the wrong queue except try_to_steal_freepages() changed pageblock type (i.e. requested size is smaller than half of page block). The end result is that the antifragmentation framework increases fragmenation instead of decreasing it. Mel's original anti fragmentation does the right thing. But commit 47118af076f6 ("mm: mmzone: MIGRATE_CMA migration type added") broke it. This patch restores sane and old behavior. It also removes an incorrect comment which was introduced by commit fef903efcf0c ("mm/page_alloc.c: restructure free-page stealing code and fix a bug"). Signed-off-by: KOSAKI Motohiro Cc: Mel Gorman Cc: Michal Nazarewicz Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 442f1298f9a7..cb17e8b800d6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1027,6 +1027,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page, { int current_order = page_order(page); + /* + * When borrowing from MIGRATE_CMA, we need to release the excess + * buddy pages to CMA itself. + */ if (is_migrate_cma(fallback_type)) return fallback_type; @@ -1091,17 +1095,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) list_del(&page->lru); rmv_page_order(page); - /* - * Borrow the excess buddy pages as well, irrespective - * of whether we stole freepages, or took ownership of - * the pageblock or not. - * - * Exception: When borrowing from MIGRATE_CMA, release - * the excess buddy pages to CMA itself. - */ expand(zone, page, order, current_order, area, - is_migrate_cma(migratetype) - ? migratetype : start_migratetype); + new_type); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, migratetype, new_type); -- cgit v1.2.3-70-g09d2 From a1aeb65a4c80d8e97eca8bcde02a5aeae11f6201 Mon Sep 17 00:00:00 2001 From: Zhi Yong Wu Date: Tue, 12 Nov 2013 15:08:29 -0800 Subject: mm/page_alloc.c: fix comment in zlc_setup() Signed-off-by: Zhi Yong Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page_alloc.c') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cb17e8b800d6..580a5f075ed0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1705,7 +1705,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, * comments in mmzone.h. Reduces cache footprint of zonelist scans * that have to skip over a lot of full or unallowed zones. * - * If the zonelist cache is present in the passed in zonelist, then + * If the zonelist cache is present in the passed zonelist, then * returns a pointer to the allowed node mask (either the current * tasks mems_allowed, or node_states[N_MEMORY].) * -- cgit v1.2.3-70-g09d2