diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2011-09-20 21:45:56 +0200 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2011-09-20 21:45:56 +0200 | 
| commit | 1fdb4888e45f1413972a8e9da55f3ffc08b9abcb (patch) | |
| tree | 635ef73cdff38d21a529bbdcab4cd2cb39a29484 /mm/vmscan.c | |
| parent | 1884af9365a96314164f4110d4528d425e5dd843 (diff) | |
| parent | ceb1c532ba6220900e61ec7073a9234661efa450 (diff) | |
Merge branch 'omap/cleanup' into next/cleanup
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 19 | 
1 files changed, 11 insertions, 8 deletions
| diff --git a/mm/vmscan.c b/mm/vmscan.c index 7ef69124fa3e..b7719ec10dc5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2283,7 +2283,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  		.mem_cgroup = mem,  		.memcg_record = rec,  	}; -	unsigned long start, end; +	ktime_t start, end;  	sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |  			(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); @@ -2292,7 +2292,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  						      sc.may_writepage,  						      sc.gfp_mask); -	start = sched_clock(); +	start = ktime_get();  	/*  	 * NOTE: Although we can get the priority field, using it  	 * here is not a good idea, since it limits the pages we can scan. @@ -2301,10 +2301,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,  	 * the priority and make it zero.  	 */  	shrink_zone(0, zone, &sc); -	end = sched_clock(); +	end = ktime_get();  	if (rec) -		rec->elapsed += end - start; +		rec->elapsed += ktime_to_ns(ktime_sub(end, start));  	*scanned = sc.nr_scanned;  	trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); @@ -2319,7 +2319,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  {  	struct zonelist *zonelist;  	unsigned long nr_reclaimed; -	unsigned long start, end; +	ktime_t start, end;  	int nid;  	struct scan_control sc = {  		.may_writepage = !laptop_mode, @@ -2337,7 +2337,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  		.gfp_mask = sc.gfp_mask,  	}; -	start = sched_clock(); +	start = ktime_get();  	/*  	 * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't  	 * take care of from where we get pages. So the node where we start the @@ -2352,9 +2352,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,  					    sc.gfp_mask);  	nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink); -	end = sched_clock(); +	end = ktime_get();  	if (rec) -		rec->elapsed += end - start; +		rec->elapsed += ktime_to_ns(ktime_sub(end, start));  	trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); @@ -2529,6 +2529,9 @@ loop_again:  					high_wmark_pages(zone), 0, 0)) {  				end_zone = i;  				break; +			} else { +				/* If balanced, clear the congested flag */ +				zone_clear_flag(zone, ZONE_CONGESTED);  			}  		}  		if (i < 0) | 
