diff options
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 38 | 
1 files changed, 19 insertions, 19 deletions
| diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index db023a92f3a4..cc4a507d7ca4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -65,7 +65,7 @@ enum migratetype {  };  /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ -extern char * const migratetype_names[MIGRATE_TYPES]; +extern const char * const migratetype_names[MIGRATE_TYPES];  #ifdef CONFIG_CMA  #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) @@ -269,9 +269,10 @@ enum zone_watermarks {  	NR_WMARK  }; -#define min_wmark_pages(z) (z->watermark[WMARK_MIN]) -#define low_wmark_pages(z) (z->watermark[WMARK_LOW]) -#define high_wmark_pages(z) (z->watermark[WMARK_HIGH]) +#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) +#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) +#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) +#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)  struct per_cpu_pages {  	int count;		/* number of pages in the list */ @@ -314,7 +315,7 @@ enum zone_type {  	 * Architecture		Limit  	 * ---------------------------  	 * parisc, ia64, sparc	<4G -	 * s390			<2G +	 * s390, powerpc	<2G  	 * arm			Various  	 * alpha		Unlimited or 0-16MB.  	 * @@ -362,7 +363,8 @@ struct zone {  	/* Read-mostly fields */  	/* zone watermarks, access with *_wmark_pages(zone) macros */ -	unsigned long watermark[NR_WMARK]; +	unsigned long _watermark[NR_WMARK]; +	unsigned long watermark_boost;  	unsigned long nr_reserved_highatomic; @@ -428,14 +430,8 @@ struct zone {  	 * Write access to present_pages at runtime should be protected by  	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of  	 * present_pages should get_online_mems() to get a stable value. -	 * -	 * Read access to managed_pages should be safe because it's unsigned -	 * long. Write access to zone->managed_pages and totalram_pages are -	 * protected by managed_page_count_lock at runtime. Idealy only -	 * adjust_managed_page_count() should be used instead of directly -	 * touching zone->managed_pages and totalram_pages.  	 */ -	unsigned long		managed_pages; +	atomic_long_t		managed_pages;  	unsigned long		spanned_pages;  	unsigned long		present_pages; @@ -524,6 +520,11 @@ enum pgdat_flags {  	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */  }; +static inline unsigned long zone_managed_pages(struct zone *zone) +{ +	return (unsigned long)atomic_long_read(&zone->managed_pages); +} +  static inline unsigned long zone_end_pfn(const struct zone *zone)  {  	return zone->zone_start_pfn + zone->spanned_pages; @@ -635,9 +636,8 @@ typedef struct pglist_data {  #endif  #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)  	/* -	 * Must be held any time you expect node_start_pfn, node_present_pages -	 * or node_spanned_pages stay constant.  Holding this will also -	 * guarantee that any pfn_valid() stays that way. +	 * Must be held any time you expect node_start_pfn, +	 * node_present_pages, node_spanned_pages or nr_zones to stay constant.  	 *  	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to  	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG @@ -691,8 +691,6 @@ typedef struct pglist_data {  	 * is the first PFN that needs to be initialised.  	 */  	unsigned long first_deferred_pfn; -	/* Number of non-deferred pages */ -	unsigned long static_init_pgcnt;  #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */  #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -820,7 +818,7 @@ static inline bool is_dev_zone(const struct zone *zone)   */  static inline bool managed_zone(struct zone *zone)  { -	return zone->managed_pages; +	return zone_managed_pages(zone);  }  /* Returns true if a zone has memory */ @@ -890,6 +888,8 @@ static inline int is_highmem(struct zone *zone)  struct ctl_table;  int min_free_kbytes_sysctl_handler(struct ctl_table *, int,  					void __user *, size_t *, loff_t *); +int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, +					void __user *, size_t *, loff_t *);  int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,  					void __user *, size_t *, loff_t *);  extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; | 
