diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 32 | 
1 files changed, 20 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a919ba5cb3c8..2ec9cc407216 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4061,17 +4061,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	int reserve_flags;  	/* -	 * In the slowpath, we sanity check order to avoid ever trying to -	 * reclaim >= MAX_ORDER areas which will never succeed. Callers may -	 * be using allocators in order of preference for an area that is -	 * too large. -	 */ -	if (order >= MAX_ORDER) { -		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); -		return NULL; -	} - -	/*  	 * We also sanity check to catch abuse of atomic reserves being used by  	 * callers that are not in atomic context.  	 */ @@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,  	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */  	struct alloc_context ac = { }; +	/* +	 * There are several places where we assume that the order value is sane +	 * so bail out early if the request is out of bound. +	 */ +	if (unlikely(order >= MAX_ORDER)) { +		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); +		return NULL; +	} +  	gfp_mask &= gfp_allowed_mask;  	alloc_mask = gfp_mask;  	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) @@ -5815,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,  					unsigned long size)  {  	struct pglist_data *pgdat = zone->zone_pgdat; +	int zone_idx = zone_idx(zone) + 1; -	pgdat->nr_zones = zone_idx(zone) + 1; +	if (zone_idx > pgdat->nr_zones) +		pgdat->nr_zones = zone_idx;  	zone->zone_start_pfn = zone_start_pfn; @@ -7789,6 +7789,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,  			goto unmovable;  		/* +		 * If the zone is movable and we have ruled out all reserved +		 * pages then it should be reasonably safe to assume the rest +		 * is movable. +		 */ +		if (zone_idx(zone) == ZONE_MOVABLE) +			continue; + +		/*  		 * Hugepages are not in LRU lists, but they're movable.  		 * We need not scan over tail pages bacause we don't  		 * handle each tail page individually in migration.  | 
