diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/fault-inject.h | 5 | ||||
-rw-r--r-- | include/linux/kasan.h | 4 | ||||
-rw-r--r-- | include/linux/list_lru.h | 3 | ||||
-rw-r--r-- | include/linux/memblock.h | 10 | ||||
-rw-r--r-- | include/linux/memory.h | 3 | ||||
-rw-r--r-- | include/linux/memory_hotplug.h | 53 | ||||
-rw-r--r-- | include/linux/migrate.h | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 56 | ||||
-rw-r--r-- | include/linux/mm_types.h | 2 | ||||
-rw-r--r-- | include/linux/mmdebug.h | 8 | ||||
-rw-r--r-- | include/linux/mmzone.h | 8 | ||||
-rw-r--r-- | include/linux/node.h | 4 | ||||
-rw-r--r-- | include/linux/page-flags.h | 22 | ||||
-rw-r--r-- | include/linux/page_ref.h | 3 | ||||
-rw-r--r-- | include/linux/slab.h | 20 | ||||
-rw-r--r-- | include/linux/slab_def.h | 4 | ||||
-rw-r--r-- | include/linux/slub_def.h | 28 | ||||
-rw-r--r-- | include/linux/swap.h | 38 | ||||
-rw-r--r-- | include/linux/zsmalloc.h | 2 |
19 files changed, 156 insertions, 119 deletions
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index c3c95d18bf43..7e6c77740413 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -64,10 +64,11 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, struct kmem_cache; +int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB -extern bool should_failslab(struct kmem_cache *s, gfp_t gfpflags); +extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); #else -static inline bool should_failslab(struct kmem_cache *s, gfp_t gfpflags) +static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) { return false; } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index d6459bd1376d..de784fd11d12 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); -void kasan_cache_create(struct kmem_cache *cache, size_t *size, +void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, slab_flags_t *flags); void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); @@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {} static inline void kasan_free_pages(struct page *page, unsigned int order) {} static inline void kasan_cache_create(struct kmem_cache *cache, - size_t *size, + unsigned int *size, slab_flags_t *flags) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index bb8129a3474d..96def9d15b1b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,6 +32,7 @@ struct list_lru_one { }; struct list_lru_memcg { + struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ struct list_lru_one *lru[0]; }; @@ -43,7 +44,7 @@ struct list_lru_node { struct list_lru_one lru; #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg *memcg_lrus; + struct list_lru_memcg __rcu *memcg_lrus; #endif long nr_items; } ____cacheline_aligned_in_smp; diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f92ea7783652..0257aee7ab4b 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -416,21 +416,11 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { } #endif - -extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr); #else static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) { return 0; } - -static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, - phys_addr_t end_addr) -{ - return 0; -} - #endif /* CONFIG_HAVE_MEMBLOCK */ #endif /* __KERNEL__ */ diff --git a/include/linux/memory.h b/include/linux/memory.h index f71e732c77b2..31ca3e28b0eb 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -33,6 +33,7 @@ struct memory_block { void *hw; /* optional pointer to fw/hw data */ int (*phys_callback)(struct memory_block *); struct device dev; + int nid; /* NID for this memory block */ }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -109,7 +110,7 @@ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); extern int register_memory_isolate_notifier(struct notifier_block *nb); extern void unregister_memory_isolate_notifier(struct notifier_block *nb); -extern int register_new_memory(int, struct mem_section *); +int hotplug_memory_register(int nid, struct mem_section *section); #ifdef CONFIG_MEMORY_HOTREMOVE extern int unregister_memory_section(struct mem_section *); #endif diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index aba5f86eb038..2b0265265c28 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -52,24 +52,6 @@ enum { }; /* - * pgdat resizing functions - */ -static inline -void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_lock_irqsave(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) -{ - spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); -} -static inline -void pgdat_resize_init(struct pglist_data *pgdat) -{ - spin_lock_init(&pgdat->node_size_lock); -} -/* * Zone resizing functions * * Note: any attempt to resize a zone should has pgdat_resize_lock() @@ -246,13 +228,6 @@ extern void clear_zone_contiguous(struct zone *zone); ___page; \ }) -/* - * Stub functions for when hotplug is off - */ -static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} -static inline void pgdat_resize_init(struct pglist_data *pgdat) {} - static inline unsigned zone_span_seqbegin(struct zone *zone) { return 0; @@ -293,6 +268,34 @@ static inline bool movable_node_is_enabled(void) } #endif /* ! CONFIG_MEMORY_HOTPLUG */ +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) +/* + * pgdat resizing functions + */ +static inline +void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_lock_irqsave(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) +{ + spin_unlock_irqrestore(&pgdat->node_size_lock, *flags); +} +static inline +void pgdat_resize_init(struct pglist_data *pgdat) +{ + spin_lock_init(&pgdat->node_size_lock); +} +#else /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ +/* + * Stub functions for when hotplug is off + */ +static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} +static inline void pgdat_resize_init(struct pglist_data *pgdat) {} +#endif /* !(CONFIG_MEMORY_HOTPLUG || CONFIG_DEFERRED_STRUCT_PAGE_INIT) */ + #ifdef CONFIG_MEMORY_HOTREMOVE extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2246cf670ba..ab45f8a0d288 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -25,7 +25,7 @@ enum migrate_reason { MR_SYSCALL, /* also applies to cpusets */ MR_MEMPOLICY_MBIND, MR_NUMA_MISPLACED, - MR_CMA, + MR_CONTIG_RANGE, MR_TYPES }; diff --git a/include/linux/mm.h b/include/linux/mm.h index f945dff34925..3ad632366973 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -386,17 +386,19 @@ struct vm_operations_struct { void (*close)(struct vm_area_struct * area); int (*split)(struct vm_area_struct * area, unsigned long addr); int (*mremap)(struct vm_area_struct * area); - int (*fault)(struct vm_fault *vmf); - int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size); + vm_fault_t (*fault)(struct vm_fault *vmf); + vm_fault_t (*huge_fault)(struct vm_fault *vmf, + enum page_entry_size pe_size); void (*map_pages)(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); + unsigned long (*pagesize)(struct vm_area_struct * area); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ - int (*page_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */ - int (*pfn_mkwrite)(struct vm_fault *vmf); + vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); /* called by access_process_vm when get_user_pages() fails, typically * for use by special VMAs that can switch between memory and hardware @@ -903,7 +905,9 @@ extern int page_to_nid(const struct page *page); #else static inline int page_to_nid(const struct page *page) { - return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + struct page *p = (struct page *)page; + + return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK; } #endif @@ -1152,6 +1156,7 @@ static inline pgoff_t page_index(struct page *page) bool page_mapped(struct page *page); struct address_space *page_mapping(struct page *page); +struct address_space *page_mapping_file(struct page *page); /* * Return true only if the page has been allocated with @@ -2420,6 +2425,44 @@ int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); +static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, + unsigned long addr, struct page *page) +{ + int err = vm_insert_page(vma, addr, page); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, + unsigned long addr, pfn_t pfn) +{ + int err = vm_insert_mixed(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} + +static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ + int err = vm_insert_pfn(vma, addr, pfn); + + if (err == -ENOMEM) + return VM_FAULT_OOM; + if (err < 0 && err != -EBUSY) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags, @@ -2589,7 +2632,7 @@ extern int get_hwpoison_page(struct page *page); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; extern void shake_page(struct page *p, int access); -extern atomic_long_t num_poisoned_pages; +extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(struct page *page, int flags); @@ -2611,6 +2654,7 @@ enum mf_action_page_type { MF_MSG_POISONED_HUGE, MF_MSG_HUGE, MF_MSG_FREE_HUGE, + MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fd1af6b9591d..21612347d311 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,8 @@ #endif #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) +typedef int vm_fault_t; + struct address_space; struct mem_cgroup; struct hmm; diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 57b0030d3800..2ad72d2c8cc5 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -37,10 +37,10 @@ void dump_mm(const struct mm_struct *mm); BUG(); \ } \ } while (0) -#define VM_WARN_ON(cond) WARN_ON(cond) -#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) -#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) -#define VM_WARN(cond, format...) WARN(cond, format) +#define VM_WARN_ON(cond) (void)WARN_ON(cond) +#define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) (void)WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a2db4576e499..f11ae29005f1 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -633,14 +633,15 @@ typedef struct pglist_data { #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; #endif -#ifdef CONFIG_MEMORY_HOTPLUG +#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, node_present_pages * or node_spanned_pages stay constant. Holding this will also * guarantee that any pfn_valid() stays that way. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to - * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG. + * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG + * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. * * Nests above zone->lock and zone->span_seqlock */ @@ -775,7 +776,8 @@ static inline bool is_dev_zone(const struct zone *zone) #include <linux/memory_hotplug.h> void build_all_zonelists(pg_data_t *pgdat); -void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); +void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, + enum zone_type classzone_idx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, unsigned int alloc_flags, long free_pages); diff --git a/include/linux/node.h b/include/linux/node.h index 4ece0fee0ffc..41f171861dcc 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -67,7 +67,7 @@ extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); extern int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid); + int nid, bool check_nid); extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, unsigned long phys_index); @@ -97,7 +97,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) return 0; } static inline int register_mem_sect_under_node(struct memory_block *mem_blk, - int nid) + int nid, bool check_nid) { return 0; } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 50c2b8786831..e34a27727b9a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -156,9 +156,18 @@ static __always_inline int PageCompound(struct page *page) return test_bit(PG_head, &page->flags) || PageTail(page); } +#define PAGE_POISON_PATTERN -1l +static inline int PagePoisoned(const struct page *page) +{ + return page->flags == PAGE_POISON_PATTERN; +} + /* * Page flags policies wrt compound pages * + * PF_POISONED_CHECK + * check if this struct page poisoned/uninitialized + * * PF_ANY: * the page flag is relevant for small, head and tail pages. * @@ -176,17 +185,20 @@ static __always_inline int PageCompound(struct page *page) * PF_NO_COMPOUND: * the page flag is not relevant for compound pages. */ -#define PF_ANY(page, enforce) page -#define PF_HEAD(page, enforce) compound_head(page) +#define PF_POISONED_CHECK(page) ({ \ + VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \ + page; }) +#define PF_ANY(page, enforce) PF_POISONED_CHECK(page) +#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page)) #define PF_ONLY_HEAD(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(PageTail(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) #define PF_NO_TAIL(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \ - compound_head(page);}) + PF_POISONED_CHECK(compound_head(page)); }) #define PF_NO_COMPOUND(page, enforce) ({ \ VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \ - page;}) + PF_POISONED_CHECK(page); }) /* * Macros to create function definitions for page flags diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h index 760d74a0e9a9..14d14beb1f7f 100644 --- a/include/linux/page_ref.h +++ b/include/linux/page_ref.h @@ -175,8 +175,7 @@ static inline void page_ref_unfreeze(struct page *page, int count) VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); - smp_mb(); - atomic_set(&page->_refcount, count); + atomic_set_release(&page->_refcount, count); if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze)) __page_ref_unfreeze(page, count); } diff --git a/include/linux/slab.h b/include/linux/slab.h index 231abc8976c5..81ebd71f8c03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -125,7 +125,6 @@ #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ (unsigned long)ZERO_SIZE_PTR) -#include <linux/kmemleak.h> #include <linux/kasan.h> struct mem_cgroup; @@ -137,12 +136,13 @@ bool slab_is_available(void); extern bool usercopy_fallback; -struct kmem_cache *kmem_cache_create(const char *name, size_t size, - size_t align, slab_flags_t flags, +struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, void (*ctor)(void *)); struct kmem_cache *kmem_cache_create_usercopy(const char *name, - size_t size, size_t align, slab_flags_t flags, - size_t useroffset, size_t usersize, + unsigned int size, unsigned int align, + slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -308,7 +308,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; * 2 = 129 .. 192 bytes * n = 2^(n-1)+1 .. 2^n */ -static __always_inline int kmalloc_index(size_t size) +static __always_inline unsigned int kmalloc_index(size_t size) { if (!size) return 0; @@ -504,7 +504,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) return kmalloc_large(size, flags); #ifndef CONFIG_SLOB if (!(flags & GFP_DMA)) { - int index = kmalloc_index(size); + unsigned int index = kmalloc_index(size); if (!index) return ZERO_SIZE_PTR; @@ -522,11 +522,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) * return size or 0 if a kmalloc cache for that * size does not exist */ -static __always_inline int kmalloc_size(int n) +static __always_inline unsigned int kmalloc_size(unsigned int n) { #ifndef CONFIG_SLOB if (n > 2) - return 1 << n; + return 1U << n; if (n == 1 && KMALLOC_MIN_SIZE <= 32) return 96; @@ -542,7 +542,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { - int i = kmalloc_index(size); + unsigned int i = kmalloc_index(size); if (!i) return ZERO_SIZE_PTR; diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 7385547c04b1..d9228e4d0320 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -85,8 +85,8 @@ struct kmem_cache { unsigned int *random_seq; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 8ad99c47b19c..3773e26c08c1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -73,7 +73,7 @@ struct kmem_cache_cpu { * given order would contain. */ struct kmem_cache_order_objects { - unsigned long x; + unsigned int x; }; /* @@ -84,11 +84,12 @@ struct kmem_cache { /* Used for retriving partial slabs etc */ slab_flags_t flags; unsigned long min_partial; - int size; /* The size of an object including meta data */ - int object_size; /* The size of an object without meta data */ - int offset; /* Free pointer offset. */ + unsigned int size; /* The size of an object including meta data */ + unsigned int object_size;/* The size of an object without meta data */ + unsigned int offset; /* Free pointer offset. */ #ifdef CONFIG_SLUB_CPU_PARTIAL - int cpu_partial; /* Number of per cpu partial objects to keep around */ + /* Number of per cpu partial objects to keep around */ + unsigned int cpu_partial; #endif struct kmem_cache_order_objects oo; @@ -98,10 +99,10 @@ struct kmem_cache { gfp_t allocflags; /* gfp flags to use on each alloc */ int refcount; /* Refcount for slab cache destroy */ void (*ctor)(void *); - int inuse; /* Offset to metadata */ - int align; /* Alignment */ - int reserved; /* Reserved bytes at the end of slabs */ - int red_left_pad; /* Left redzone padding size */ + unsigned int inuse; /* Offset to metadata */ + unsigned int align; /* Alignment */ + unsigned int reserved; /* Reserved bytes at the end of slabs */ + unsigned int red_left_pad; /* Left redzone padding size */ const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS @@ -110,7 +111,8 @@ struct kmem_cache { #endif #ifdef CONFIG_MEMCG struct memcg_cache_params memcg_params; - int max_attr_size; /* for propagation, maximum size of a stored attr */ + /* for propagation, maximum size of a stored attr */ + unsigned int max_attr_size; #ifdef CONFIG_SYSFS struct kset *memcg_kset; #endif @@ -124,7 +126,7 @@ struct kmem_cache { /* * Defragmentation by allocating from a remote node. */ - int remote_node_defrag_ratio; + unsigned int remote_node_defrag_ratio; #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM @@ -135,8 +137,8 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif - size_t useroffset; /* Usercopy region offset */ - size_t usersize; /* Usercopy region size */ + unsigned int useroffset; /* Usercopy region offset */ + unsigned int usersize; /* Usercopy region size */ struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/swap.h b/include/linux/swap.h index a1a3f4ed94ce..2417d288e016 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *, #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) extern struct address_space *swapper_spaces[]; -extern bool swap_vma_readahead; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) @@ -422,14 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated); -extern struct page *swapin_readahead(swp_entry_t, gfp_t, - struct vm_area_struct *vma, unsigned long addr); - -extern struct page *swap_readahead_detect(struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); -extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, - struct vma_swap_readahead *swap_ra); +extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); +extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, + struct vm_fault *vmf); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; @@ -437,11 +432,6 @@ extern long total_swap_pages; extern atomic_t nr_rotate_swap; extern bool has_usable_swap(void); -static inline bool swap_use_vma_readahead(void) -{ - return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap); -} - /* Swap 50% full? Release swapcache more aggressively.. */ static inline bool vm_swap_full(void) { @@ -537,26 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp) { } -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, - struct vm_area_struct *vma, unsigned long addr) +static inline struct page *swap_cluster_readahead(swp_entry_t entry, + gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; } -static inline bool swap_use_vma_readahead(void) -{ - return false; -} - -static inline struct page *swap_readahead_detect( - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) -{ - return NULL; -} - -static inline struct page *do_swap_page_readahead( - swp_entry_t fentry, gfp_t gfp_mask, - struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) +static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, + struct vm_fault *vmf) { return NULL; } diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 57a8e98f2708..2219cce81ca4 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -47,6 +47,8 @@ void zs_destroy_pool(struct zs_pool *pool); unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); void zs_free(struct zs_pool *pool, unsigned long obj); +size_t zs_huge_class_size(struct zs_pool *pool); + void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); |