diff options
| author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-03-08 10:11:33 -0300 | 
|---|---|---|
| committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-03-08 10:11:33 -0300 | 
| commit | 009ef05f98129aa91c62c3baab859ba593a15bb2 (patch) | |
| tree | f3414f08d636a597545b1e4f443b373b9d6d8f4b /mm/compaction.c | |
| parent | 2777b81b379df772defd654bc4d3fa82dca17a4b (diff) | |
| parent | 144c79ef33536b4ecb4951e07dbc1f2b7fa99d32 (diff) | |
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up the fixes sent for v5.12 and continue development based on
v5.12-rc2, i.e. without the swap on file bug.
This also gets a slightly newer and better tools/perf/arch/arm/util/cs-etm.c
patch version, using the BIT() macro, that had already been slated to
v5.13 but ended up going to v5.12-rc1 on an older version.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'mm/compaction.c')
| -rw-r--r-- | mm/compaction.c | 73 | 
1 files changed, 40 insertions, 33 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 190ccdaa6c19..e04f4476e68e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -137,7 +137,6 @@ EXPORT_SYMBOL(__SetPageMovable);  void __ClearPageMovable(struct page *page)  { -	VM_BUG_ON_PAGE(!PageLocked(page), page);  	VM_BUG_ON_PAGE(!PageMovable(page), page);  	/*  	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE @@ -988,14 +987,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,  		if (unlikely(!get_page_unless_zero(page)))  			goto isolate_fail; -		if (__isolate_lru_page_prepare(page, isolate_mode) != 0) +		if (!__isolate_lru_page_prepare(page, isolate_mode))  			goto isolate_fail_put;  		/* Try isolate the page */  		if (!TestClearPageLRU(page))  			goto isolate_fail_put; -		rcu_read_lock();  		lruvec = mem_cgroup_page_lruvec(page, pgdat);  		/* If we already hold the lock, we can skip some rechecking */ @@ -1005,7 +1003,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,  			compact_lock_irqsave(&lruvec->lru_lock, &flags, cc);  			locked = lruvec; -			rcu_read_unlock();  			lruvec_memcg_debug(lruvec, page); @@ -1026,15 +1023,14 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,  				SetPageLRU(page);  				goto isolate_fail_put;  			} -		} else -			rcu_read_unlock(); +		}  		/* The whole page is taken off the LRU; skip the tail pages. */  		if (PageCompound(page))  			low_pfn += compound_nr(page) - 1;  		/* Successfully isolated */ -		del_page_from_lru_list(page, lruvec, page_lru(page)); +		del_page_from_lru_list(page, lruvec);  		mod_node_page_state(page_pgdat(page),  				NR_ISOLATED_ANON + page_is_file_lru(page),  				thp_nr_pages(page)); @@ -1288,7 +1284,7 @@ static void  fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)  {  	unsigned long start_pfn, end_pfn; -	struct page *page = pfn_to_page(pfn); +	struct page *page;  	/* Do not search around if there are enough pages already */  	if (cc->nr_freepages >= cc->nr_migratepages) @@ -1299,8 +1295,12 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long  		return;  	/* Pageblock boundaries */ -	start_pfn = pageblock_start_pfn(pfn); -	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1; +	start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); +	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); + +	page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); +	if (!page) +		return;  	/* Scan before */  	if (start_pfn != pfn) { @@ -1402,7 +1402,8 @@ fast_isolate_freepages(struct compact_control *cc)  			pfn = page_to_pfn(freepage);  			if (pfn >= highest) -				highest = pageblock_start_pfn(pfn); +				highest = max(pageblock_start_pfn(pfn), +					      cc->zone->zone_start_pfn);  			if (pfn >= low_pfn) {  				cc->fast_search_fail = 0; @@ -1472,7 +1473,8 @@ fast_isolate_freepages(struct compact_control *cc)  			} else {  				if (cc->direct_compaction && pfn_valid(min_pfn)) {  					page = pageblock_pfn_to_page(min_pfn, -						pageblock_end_pfn(min_pfn), +						min(pageblock_end_pfn(min_pfn), +						    zone_end_pfn(cc->zone)),  						cc->zone);  					cc->free_pfn = min_pfn;  				} @@ -1702,6 +1704,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  	unsigned long pfn = cc->migrate_pfn;  	unsigned long high_pfn;  	int order; +	bool found_block = false;  	/* Skip hints are relied on to avoid repeats on the fast search */  	if (cc->ignore_skip_hint) @@ -1744,7 +1747,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);  	for (order = cc->order - 1; -	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; +	     order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;  	     order--) {  		struct free_area *area = &cc->zone->free_area[order];  		struct list_head *freelist; @@ -1759,7 +1762,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  		list_for_each_entry(freepage, freelist, lru) {  			unsigned long free_pfn; -			nr_scanned++; +			if (nr_scanned++ >= limit) { +				move_freelist_tail(freelist, freepage); +				break; +			} +  			free_pfn = page_to_pfn(freepage);  			if (free_pfn < high_pfn) {  				/* @@ -1768,12 +1775,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  				 * the list assumes an entry is deleted, not  				 * reordered.  				 */ -				if (get_pageblock_skip(freepage)) { -					if (list_is_last(freelist, &freepage->lru)) -						break; - +				if (get_pageblock_skip(freepage))  					continue; -				}  				/* Reorder to so a future search skips recent pages */  				move_freelist_tail(freelist, freepage); @@ -1781,15 +1784,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  				update_fast_start_pfn(cc, free_pfn);  				pfn = pageblock_start_pfn(free_pfn);  				cc->fast_search_fail = 0; +				found_block = true;  				set_pageblock_skip(freepage);  				break;  			} - -			if (nr_scanned >= limit) { -				cc->fast_search_fail++; -				move_freelist_tail(freelist, freepage); -				break; -			}  		}  		spin_unlock_irqrestore(&cc->zone->lock, flags);  	} @@ -1800,9 +1798,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)  	 * If fast scanning failed then use a cached entry for a page block  	 * that had free pages as the basis for starting a linear scan.  	 */ -	if (pfn == cc->migrate_pfn) +	if (!found_block) { +		cc->fast_search_fail++;  		pfn = reinit_migrate_pfn(cc); - +	}  	return pfn;  } @@ -1926,20 +1925,28 @@ static bool kswapd_is_running(pg_data_t *pgdat)  /*   * A zone's fragmentation score is the external fragmentation wrt to the - * COMPACTION_HPAGE_ORDER scaled by the zone's size. It returns a value - * in the range [0, 100]. + * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. + */ +static unsigned int fragmentation_score_zone(struct zone *zone) +{ +	return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); +} + +/* + * A weighted zone's fragmentation score is the external fragmentation + * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It + * returns a value in the range [0, 100].   *   * The scaling factor ensures that proactive compaction focuses on larger   * zones like ZONE_NORMAL, rather than smaller, specialized zones like   * ZONE_DMA32. For smaller zones, the score value remains close to zero,   * and thus never exceeds the high threshold for proactive compaction.   */ -static unsigned int fragmentation_score_zone(struct zone *zone) +static unsigned int fragmentation_score_zone_weighted(struct zone *zone)  {  	unsigned long score; -	score = zone->present_pages * -			extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); +	score = zone->present_pages * fragmentation_score_zone(zone);  	return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);  } @@ -1959,7 +1966,7 @@ static unsigned int fragmentation_score_node(pg_data_t *pgdat)  		struct zone *zone;  		zone = &pgdat->node_zones[zoneid]; -		score += fragmentation_score_zone(zone); +		score += fragmentation_score_zone_weighted(zone);  	}  	return score;  | 
