diff options
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 30 | 
1 files changed, 25 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3758a09a009..3bac76ae4b30 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)  	__SetPageHead(page);  	for (i = 1; i < nr_pages; i++) {  		struct page *p = page + i; -		__SetPageTail(p);  		set_page_count(p, 0);  		p->first_page = page; +		/* Make sure p->first_page is always valid for PageTail() */ +		smp_wmb(); +		__SetPageTail(p);  	}  } @@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)  	}  	local_irq_restore(flags);  } +static bool gfp_thisnode_allocation(gfp_t gfp_mask) +{ +	return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; +} +#else +static bool gfp_thisnode_allocation(gfp_t gfp_mask) +{ +	return false; +}  #endif  /* @@ -1572,7 +1583,13 @@ again:  					  get_pageblock_migratetype(page));  	} -	__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); +	/* +	 * NOTE: GFP_THISNODE allocations do not partake in the kswapd +	 * aging protocol, so they can't be fair. +	 */ +	if (!gfp_thisnode_allocation(gfp_flags)) +		__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); +  	__count_zone_vm_events(PGALLOC, zone, 1 << order);  	zone_statistics(preferred_zone, zone, gfp_flags);  	local_irq_restore(flags); @@ -1944,8 +1961,12 @@ zonelist_scan:  		 * ultimately fall back to remote zones that do not  		 * partake in the fairness round-robin cycle of this  		 * zonelist. +		 * +		 * NOTE: GFP_THISNODE allocations do not partake in +		 * the kswapd aging protocol, so they can't be fair.  		 */ -		if (alloc_flags & ALLOC_WMARK_LOW) { +		if ((alloc_flags & ALLOC_WMARK_LOW) && +		    !gfp_thisnode_allocation(gfp_mask)) {  			if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)  				continue;  			if (!zone_local(preferred_zone, zone)) @@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,  	 * allowed per node queues are empty and that nodes are  	 * over allocated.  	 */ -	if (IS_ENABLED(CONFIG_NUMA) && -			(gfp_mask & GFP_THISNODE) == GFP_THISNODE) +	if (gfp_thisnode_allocation(gfp_mask))  		goto nopage;  restart:  | 
