diff options
Diffstat (limited to 'mm/huge_memory.c')
| -rw-r--r-- | mm/huge_memory.c | 51 | 
1 files changed, 31 insertions, 20 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1334ede667a8..738065f765ab 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -644,30 +644,40 @@ release:   *	    available   * never: never stall for any thp allocation   */ -static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) +static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)  {  	const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); +	gfp_t this_node = 0; + +#ifdef CONFIG_NUMA +	struct mempolicy *pol; +	/* +	 * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not +	 * specified, to express a general desire to stay on the current +	 * node for optimistic allocation attempts. If the defrag mode +	 * and/or madvise hint requires the direct reclaim then we prefer +	 * to fallback to other node rather than node reclaim because that +	 * can lead to excessive reclaim even though there is free memory +	 * on other nodes. We expect that NUMA preferences are specified +	 * by memory policies. +	 */ +	pol = get_vma_policy(vma, addr); +	if (pol->mode != MPOL_BIND) +		this_node = __GFP_THISNODE; +	mpol_cond_put(pol); +#endif -	/* Always do synchronous compaction */  	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))  		return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY); - -	/* Kick kcompactd and fail quickly */  	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags)) -		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM; - -	/* Synchronous compaction if madvised, otherwise kick kcompactd */ +		return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;  	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags)) -		return GFP_TRANSHUGE_LIGHT | -			(vma_madvised ? __GFP_DIRECT_RECLAIM : -					__GFP_KSWAPD_RECLAIM); - -	/* Only do synchronous compaction if madvised */ +		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : +							     __GFP_KSWAPD_RECLAIM | this_node);  	if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags)) -		return GFP_TRANSHUGE_LIGHT | -		       (vma_madvised ? __GFP_DIRECT_RECLAIM : 0); - -	return GFP_TRANSHUGE_LIGHT; +		return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM : +							     this_node); +	return GFP_TRANSHUGE_LIGHT | this_node;  }  /* Caller must hold page table lock. */ @@ -739,8 +749,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)  			pte_free(vma->vm_mm, pgtable);  		return ret;  	} -	gfp = alloc_hugepage_direct_gfpmask(vma); -	page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); +	gfp = alloc_hugepage_direct_gfpmask(vma, haddr); +	page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());  	if (unlikely(!page)) {  		count_vm_event(THP_FAULT_FALLBACK);  		return VM_FAULT_FALLBACK; @@ -1347,8 +1357,9 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)  alloc:  	if (__transparent_hugepage_enabled(vma) &&  	    !transparent_hugepage_debug_cow()) { -		huge_gfp = alloc_hugepage_direct_gfpmask(vma); -		new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); +		huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr); +		new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma, +				haddr, numa_node_id());  	} else  		new_page = NULL;  | 
