From 397b080bb70f30d6b9778b0e607b67efcf64aa6e Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 26 Jul 2016 15:20:59 -0700 Subject: arm: get rid of superfluous __GFP_REPEAT __GFP_REPEAT has a rather weak semantic but since it has been introduced around 2.6.12 it has been ignored for low order allocations. PGALLOC_GFP uses __GFP_REPEAT but none of the allocation which uses this flag is for more than order-2. This means that this flag has never been actually useful here because it has always been used only for PAGE_ALLOC_COSTLY requests. Link: http://lkml.kernel.org/r/1464599699-30131-5-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/pgalloc.h | 2 +- arch/arm/mm/pgd.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 20febb368844..b2902a5cd780 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); -#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) static inline void clean_pte_table(pte_t *pte) { diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index b8d477321730..c1c1a5c67da1 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -23,7 +23,7 @@ #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) #define __pgd_free(pgd) kfree(pgd) #else -#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, 2) +#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) #endif -- cgit v1.2.3-70-g09d2 From 221c7dc881b3a2a6267d6246f8aca072514e98c0 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 26 Jul 2016 15:21:02 -0700 Subject: dax: some small updates to dax.txt documentation These are originally from Matthew Wilcox and were part of his huge "mm,fs,dax: Change ->pmd_fault to ->huge_fault" patch that was part of PUD support. I'm breaking these small changes out as they stand on their own and add useful information to Documentation/filesystems/dax.txt. Link: http://lkml.kernel.org/r/20160714214049.20075-1-ross.zwisler@linux.intel.com Signed-off-by: Ross Zwisler Cc: "Theodore Ts'o" Cc: Alexander Viro Cc: Andreas Dilger Cc: Dan Williams Cc: Dave Chinner Cc: Jan Kara Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/dax.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index ce4587d257d2..0c16a22521a8 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -49,6 +49,7 @@ These block devices may be used for inspiration: - axonram: Axon DDR2 device driver - brd: RAM backed block device driver - dcssblk: s390 dcss block device driver +- pmem: NVDIMM persistent memory driver Implementation Tips for Filesystem Writers @@ -75,8 +76,9 @@ calls to get_block() (for example by a page-fault racing with a read() or a write()) work correctly. These filesystems may be used for inspiration: -- ext2: the second extended filesystem, see Documentation/filesystems/ext2.txt -- ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt +- ext2: see Documentation/filesystems/ext2.txt +- ext4: see Documentation/filesystems/ext4.txt +- xfs: see Documentation/filesystems/xfs.txt Handling Media Errors -- cgit v1.2.3-70-g09d2 From 6b524995a71d49ae032dba308d117dbf2a18d175 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Tue, 26 Jul 2016 15:21:05 -0700 Subject: dax: remote unused fault wrappers Remove the unused wrappers dax_fault() and dax_pmd_fault(). After this removal, rename __dax_fault() and __dax_pmd_fault() to dax_fault() and dax_pmd_fault() respectively, and update all callers. The dax_fault() and dax_pmd_fault() wrappers were initially intended to capture some filesystem independent functionality around page faults (calling sb_start_pagefault() & sb_end_pagefault(), updating file mtime and ctime). However, the following commits: 5726b27b09cc ("ext2: Add locking for DAX faults") ea3d7209ca01 ("ext4: fix races between page faults and hole punching") added locking to the ext2 and ext4 filesystems after these common operations but before __dax_fault() and __dax_pmd_fault() were called. This means that these wrappers are no longer used, and are unlikely to be used in the future. XFS has had locking analogous to what was recently added to ext2 and ext4 since DAX support was initially introduced by: 6b698edeeef0 ("xfs: add DAX file operations support") Link: http://lkml.kernel.org/r/20160714214049.20075-2-ross.zwisler@linux.intel.com Signed-off-by: Ross Zwisler Cc: "Theodore Ts'o" Cc: Alexander Viro Cc: Andreas Dilger Cc: Dan Williams Cc: Dave Chinner Reviewed-by: Jan Kara Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 73 ++++++++++------------------------------------------- fs/ext2/file.c | 4 +-- fs/ext4/file.c | 4 +-- fs/xfs/xfs_file.c | 6 ++--- include/linux/dax.h | 5 ---- 5 files changed, 21 insertions(+), 71 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index e207f8f9b700..432b9e6dd63b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -819,16 +819,16 @@ static int dax_insert_mapping(struct address_space *mapping, } /** - * __dax_fault - handle a page fault on a DAX file + * dax_fault - handle a page fault on a DAX file * @vma: The virtual memory area where the fault occurred * @vmf: The description of the fault * @get_block: The filesystem method used to translate file offsets to blocks * * When a page fault occurs, filesystems may call this helper in their - * fault handler for DAX files. __dax_fault() assumes the caller has done all + * fault handler for DAX files. dax_fault() assumes the caller has done all * the necessary locking for the page fault to proceed successfully. */ -int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, +int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, get_block_t get_block) { struct file *file = vma->vm_file; @@ -913,33 +913,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; } -EXPORT_SYMBOL(__dax_fault); - -/** - * dax_fault - handle a page fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * fault handler for DAX files. - */ -int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, - get_block_t get_block) -{ - int result; - struct super_block *sb = file_inode(vma->vm_file)->i_sb; - - if (vmf->flags & FAULT_FLAG_WRITE) { - sb_start_pagefault(sb); - file_update_time(vma->vm_file); - } - result = __dax_fault(vma, vmf, get_block); - if (vmf->flags & FAULT_FLAG_WRITE) - sb_end_pagefault(sb); - - return result; -} EXPORT_SYMBOL_GPL(dax_fault); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) @@ -967,7 +940,16 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address, #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd") -int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, +/** + * dax_pmd_fault - handle a PMD fault on a DAX file + * @vma: The virtual memory area where the fault occurred + * @vmf: The description of the fault + * @get_block: The filesystem method used to translate file offsets to blocks + * + * When a page fault occurs, filesystems may call this helper in their + * pmd_fault handler for DAX files. + */ +int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, get_block_t get_block) { struct file *file = vma->vm_file; @@ -1119,7 +1101,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, * * The PMD path doesn't have an equivalent to * dax_pfn_mkwrite(), though, so for a read followed by a - * write we traverse all the way through __dax_pmd_fault() + * write we traverse all the way through dax_pmd_fault() * twice. This means we can just skip inserting a radix tree * entry completely on the initial read and just wait until * the write to insert a dirty entry. @@ -1148,33 +1130,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, result = VM_FAULT_FALLBACK; goto out; } -EXPORT_SYMBOL_GPL(__dax_pmd_fault); - -/** - * dax_pmd_fault - handle a PMD fault on a DAX file - * @vma: The virtual memory area where the fault occurred - * @vmf: The description of the fault - * @get_block: The filesystem method used to translate file offsets to blocks - * - * When a page fault occurs, filesystems may call this helper in their - * pmd_fault handler for DAX files. - */ -int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, - pmd_t *pmd, unsigned int flags, get_block_t get_block) -{ - int result; - struct super_block *sb = file_inode(vma->vm_file)->i_sb; - - if (flags & FAULT_FLAG_WRITE) { - sb_start_pagefault(sb); - file_update_time(vma->vm_file); - } - result = __dax_pmd_fault(vma, address, pmd, flags, get_block); - if (flags & FAULT_FLAG_WRITE) - sb_end_pagefault(sb); - - return result; -} EXPORT_SYMBOL_GPL(dax_pmd_fault); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 868c02317b05..5efeefe17abb 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } down_read(&ei->dax_sem); - ret = __dax_fault(vma, vmf, ext2_get_block); + ret = dax_fault(vma, vmf, ext2_get_block); up_read(&ei->dax_sem); if (vmf->flags & FAULT_FLAG_WRITE) @@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, } down_read(&ei->dax_sem); - ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block); + ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block); up_read(&ei->dax_sem); if (flags & FAULT_FLAG_WRITE) diff --git a/fs/ext4/file.c b/fs/ext4/file.c index df44c877892a..6664f9c82005 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else - result = __dax_fault(vma, vmf, ext4_dax_get_block); + result = dax_fault(vma, vmf, ext4_dax_get_block); if (write) { if (!IS_ERR(handle)) @@ -237,7 +237,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, if (IS_ERR(handle)) result = VM_FAULT_SIGBUS; else - result = __dax_pmd_fault(vma, addr, pmd, flags, + result = dax_pmd_fault(vma, addr, pmd, flags, ext4_dax_get_block); if (write) { diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 47fc63295422..1b3dc9dd8861 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1551,7 +1551,7 @@ xfs_filemap_page_mkwrite( xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (IS_DAX(inode)) { - ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); + ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); } else { ret = block_page_mkwrite(vma, vmf, xfs_get_blocks); ret = block_page_mkwrite_return(ret); @@ -1585,7 +1585,7 @@ xfs_filemap_fault( * changes to xfs_get_blocks_direct() to map unwritten extent * ioend for conversion on read-only mappings. */ - ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault); + ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault); } else ret = filemap_fault(vma, vmf); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); @@ -1622,7 +1622,7 @@ xfs_filemap_pmd_fault( } xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); + ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault); xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); if (flags & FAULT_FLAG_WRITE) diff --git a/include/linux/dax.h b/include/linux/dax.h index 43d5f0b799c7..9c6dc7704043 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -14,7 +14,6 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); -int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, pgoff_t index, bool wake_all); @@ -46,19 +45,15 @@ static inline int __dax_zero_page_range(struct block_device *bdev, #if defined(CONFIG_TRANSPARENT_HUGEPAGE) int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, unsigned int flags, get_block_t); -int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, - unsigned int flags, get_block_t); #else static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags, get_block_t gb) { return VM_FAULT_FALLBACK; } -#define __dax_pmd_fault dax_pmd_fault #endif int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) -#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb) static inline bool vma_is_dax(struct vm_area_struct *vma) { -- cgit v1.2.3-70-g09d2 From d5dfc80f80dbb3bf94e5e9efa694670ea78cd84d Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 26 Jul 2016 15:21:08 -0700 Subject: dma-debug: track bucket lock state for static checkers get_hash_bucket() and put_hash_bucket() acquire and release the same spinlock, but this confuses static checkers such as sparse lib/dma-debug.c:254:27: warning: context imbalance in 'get_hash_bucket' - wrong count at exit lib/dma-debug.c:268:13: warning: context imbalance in 'put_hash_bucket' - unexpected unlock Add the appropriate acquire and release statements so that checkers can properly track the lock state. Link: http://lkml.kernel.org/r/20160701191552.24295-1-sboyd@codeaurora.org Signed-off-by: Stephen Boyd Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/dma-debug.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 51a76af25c66..fcfa1939ac41 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -253,6 +253,7 @@ static int hash_fn(struct dma_debug_entry *entry) */ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, unsigned long *flags) + __acquires(&dma_entry_hash[idx].lock) { int idx = hash_fn(entry); unsigned long __flags; @@ -267,6 +268,7 @@ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, */ static void put_hash_bucket(struct hash_bucket *bucket, unsigned long *flags) + __releases(&bucket->lock) { unsigned long __flags = *flags; -- cgit v1.2.3-70-g09d2 From 3bd9646334d8a3e7f91a94b9c217657f726de7ee Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 26 Jul 2016 15:21:11 -0700 Subject: fbmon: remove unused function argument When building with "make W=1", we get a warning about an empty stub function that does nothing but reassign its one of its arguments: drivers/video/fbdev/core/fbmon.c: In function 'fb_edid_to_monspecs': drivers/video/fbdev/core/fbmon.c:1497:67: error: parameter 'specs' set but not used [-Werror=unused-but-set-parameter] We can simply make that function completely empty to avoid the warning. This prevents a warning which everyone will see after "CFLAGS: add -Wunused-but-set-parameter" is merged. Link: http://lkml.kernel.org/r/20160715203229.1771162-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Cc: Jean-Christophe Plagniol-Villard Cc: Tomi Valkeinen Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/fbdev/core/fbmon.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 47c3191ec313..62c0cf79674f 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -1496,7 +1496,6 @@ int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var) } void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs) { - specs = NULL; } void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs) { -- cgit v1.2.3-70-g09d2 From c965b105bf1509beefbb78d33f721d92240a770c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 26 Jul 2016 15:21:17 -0700 Subject: kbuild: abort build on bad stack protector flag Before, the stack protector flag was sanity checked before .config had been reprocessed. This meant the build couldn't be aborted early, and only a warning could be emitted followed later by the compiler blowing up with an unknown flag. This has caused a lot of confusion over time, so this splits the flag selection from sanity checking and performs the sanity checking after the make has been restarted from a reprocessed .config, so builds can be aborted as early as possible now. Additionally moves the x86-specific sanity check to the same location, since it suffered from the same warn-then-wait-for-compiler-failure problem. Link: http://lkml.kernel.org/r/20160712223043.GA11664@www.outflux.net Signed-off-by: Kees Cook Cc: Michal Marek Cc: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Makefile | 69 +++++++++++++++++++++++++++++++++---------------------- arch/x86/Makefile | 8 ------- 2 files changed, 42 insertions(+), 35 deletions(-) diff --git a/Makefile b/Makefile index e1a5605b01fc..ce8ef5d0bd29 100644 --- a/Makefile +++ b/Makefile @@ -647,41 +647,28 @@ ifneq ($(CONFIG_FRAME_WARN),0) KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif -# Handle stack protector mode. -# -# Since kbuild can potentially perform two passes (first with the old -# .config values and then with updated .config values), we cannot error out -# if a desired compiler option is unsupported. If we were to error, kbuild -# could never get to the second pass and actually notice that we changed -# the option to something that was supported. -# -# Additionally, we don't want to fallback and/or silently change which compiler -# flags will be used, since that leads to producing kernels with different -# security feature characteristics depending on the compiler used. ("But I -# selected CC_STACKPROTECTOR_STRONG! Why did it build with _REGULAR?!") -# -# The middle ground is to warn here so that the failed option is obvious, but -# to let the build fail with bad compiler flags so that we can't produce a -# kernel when there is a CONFIG and compiler mismatch. -# +# This selects the stack protector compiler flag. Testing it is delayed +# until after .config has been reprocessed, in the prepare-compiler-check +# target. ifdef CONFIG_CC_STACKPROTECTOR_REGULAR stackp-flag := -fstack-protector - ifeq ($(call cc-option, $(stackp-flag)),) - $(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \ - -fstack-protector not supported by compiler) - endif + stackp-name := REGULAR else ifdef CONFIG_CC_STACKPROTECTOR_STRONG stackp-flag := -fstack-protector-strong - ifeq ($(call cc-option, $(stackp-flag)),) - $(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \ - -fstack-protector-strong not supported by compiler) - endif + stackp-name := STRONG else # Force off for distro compilers that enable stack protector by default. stackp-flag := $(call cc-option, -fno-stack-protector) endif endif +# Find arch-specific stack protector compiler sanity-checking script. +ifdef CONFIG_CC_STACKPROTECTOR + stackp-path := $(srctree)/scripts/gcc-$(ARCH)_$(BITS)-has-stack-protector.sh + ifneq ($(wildcard $(stackp-path)),) + stackp-check := $(stackp-path) + endif +endif KBUILD_CFLAGS += $(stackp-flag) ifdef CONFIG_KCOV @@ -1017,8 +1004,10 @@ ifneq ($(KBUILD_SRC),) fi; endif -# prepare2 creates a makefile if using a separate output directory -prepare2: prepare3 outputmakefile asm-generic +# prepare2 creates a makefile if using a separate output directory. +# From this point forward, .config has been reprocessed, so any rules +# that need to depend on updated CONFIG_* values can be checked here. +prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ include/config/auto.conf @@ -1049,6 +1038,32 @@ endif PHONY += prepare-objtool prepare-objtool: $(objtool_target) +# Check for CONFIG flags that require compiler support. Abort the build +# after .config has been processed, but before the kernel build starts. +# +# For security-sensitive CONFIG options, we don't want to fallback and/or +# silently change which compiler flags will be used, since that leads to +# producing kernels with different security feature characteristics +# depending on the compiler used. (For example, "But I selected +# CC_STACKPROTECTOR_STRONG! Why did it build with _REGULAR?!") +PHONY += prepare-compiler-check +prepare-compiler-check: FORCE +# Make sure compiler supports requested stack protector flag. +ifdef stackp-name + ifeq ($(call cc-option, $(stackp-flag)),) + @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ + $(stackp-flag) not supported by compiler >&2 && exit 1 + endif +endif +# Make sure compiler does not have buggy stack-protector support. +ifdef stackp-check + ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) + @echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \ + $(stackp-flag) available but compiler is broken >&2 && exit 1 + endif +endif + @: + # Generate some files # --------------------------------------------------------------------------- diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 6fce7f096b88..830ed391e7ef 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -126,14 +126,6 @@ else KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args) endif -# Make sure compiler does not have buggy stack-protector support. -ifdef CONFIG_CC_STACKPROTECTOR - cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh - ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) - $(warning stack-protector enabled but compiler support broken) - endif -endif - ifdef CONFIG_X86_X32 x32_ld_ok := $(call try-run,\ /bin/echo -e '1: .quad 1b' | \ -- cgit v1.2.3-70-g09d2 From 8cde0daf6c4cca8babec4861a1ce7f8875edf879 Mon Sep 17 00:00:00 2001 From: Riku Voipio Date: Tue, 26 Jul 2016 15:21:20 -0700 Subject: scripts/bloat-o-meter: fix percent on <1% changes Python divisions are integer divisions unless at least one parameter is a float. The current bloat-o-meter fails to print sub-percentage changes: Total: Before=10515408, After=10604060, chg 0.000000% Force float division by using one float and pretty the print to two significant decimals: Total: Before=10515408, After=10604060, chg +0.84% Link: http://lkml.kernel.org/r/1465980311-23814-1-git-send-email-riku.voipio@linaro.org Signed-off-by: Riku Voipio Reviewed-by: Josh Triplett Cc: Vineet Gupta Cc: Michal Marek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- scripts/bloat-o-meter | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter index 0254f3ba0dba..19f5adfd877d 100755 --- a/scripts/bloat-o-meter +++ b/scripts/bloat-o-meter @@ -67,5 +67,5 @@ print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")) for d, n in delta: if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)) -print("Total: Before=%d, After=%d, chg %f%%" % \ - (otot, ntot, (ntot - otot)*100/otot)) +print("Total: Before=%d, After=%d, chg %+.2f%%" % \ + (otot, ntot, (ntot - otot)*100.0/otot)) -- cgit v1.2.3-70-g09d2 From a44ce52363a886d45cf08b4b896cb65f0a9553eb Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 26 Jul 2016 15:21:23 -0700 Subject: m32r: add __ucmpdi2 to fix build failure We are having build failure with m32r and the error message being: ERROR: "__ucmpdi2" [lib/842/842_decompress.ko] undefined! ERROR: "__ucmpdi2" [fs/btrfs/btrfs.ko] undefined! ERROR: "__ucmpdi2" [drivers/scsi/sd_mod.ko] undefined! ERROR: "__ucmpdi2" [drivers/media/i2c/adv7842.ko] undefined! ERROR: "__ucmpdi2" [drivers/md/bcache/bcache.ko] undefined! ERROR: "__ucmpdi2" [drivers/iio/imu/inv_mpu6050/inv-mpu6050.ko] undefined! __ucmpdi2 is introduced to m32r architecture taking example from other architectures like h8300, microblaze, mips. Link: http://lkml.kernel.org/r/1465509213-4280-1-git-send-email-sudipm.mukherjee@gmail.com Signed-off-by: Sudip Mukherjee Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/m32r/kernel/m32r_ksyms.c | 3 +++ arch/m32r/lib/Makefile | 4 ++-- arch/m32r/lib/libgcc.h | 23 +++++++++++++++++++++++ arch/m32r/lib/ucmpdi2.c | 17 +++++++++++++++++ 4 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 arch/m32r/lib/libgcc.h create mode 100644 arch/m32r/lib/ucmpdi2.c diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index b727e693c805..23f26f4adfff 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -41,6 +41,9 @@ EXPORT_SYMBOL(cpu_data); EXPORT_SYMBOL(smp_flush_tlb_page); #endif +extern int __ucmpdi2(unsigned long long a, unsigned long long b); +EXPORT_SYMBOL(__ucmpdi2); + /* compiler generated symbol */ extern void __ashldi3(void); extern void __ashrdi3(void); diff --git a/arch/m32r/lib/Makefile b/arch/m32r/lib/Makefile index d16b4e40d1ae..5889eb9610b5 100644 --- a/arch/m32r/lib/Makefile +++ b/arch/m32r/lib/Makefile @@ -3,5 +3,5 @@ # lib-y := checksum.o ashxdi3.o memset.o memcpy.o \ - delay.o strlen.o usercopy.o csum_partial_copy.o - + delay.o strlen.o usercopy.o csum_partial_copy.o \ + ucmpdi2.o diff --git a/arch/m32r/lib/libgcc.h b/arch/m32r/lib/libgcc.h new file mode 100644 index 000000000000..267aa435bc35 --- /dev/null +++ b/arch/m32r/lib/libgcc.h @@ -0,0 +1,23 @@ +#ifndef __ASM_LIBGCC_H +#define __ASM_LIBGCC_H + +#include + +#ifdef __BIG_ENDIAN +struct DWstruct { + int high, low; +}; +#elif defined(__LITTLE_ENDIAN) +struct DWstruct { + int low, high; +}; +#else +#error I feel sick. +#endif + +typedef union { + struct DWstruct s; + long long ll; +} DWunion; + +#endif /* __ASM_LIBGCC_H */ diff --git a/arch/m32r/lib/ucmpdi2.c b/arch/m32r/lib/ucmpdi2.c new file mode 100644 index 000000000000..9d3c682c89b5 --- /dev/null +++ b/arch/m32r/lib/ucmpdi2.c @@ -0,0 +1,17 @@ +#include "libgcc.h" + +int __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int)au.s.high < (unsigned int)bu.s.high) + return 0; + else if ((unsigned int)au.s.high > (unsigned int)bu.s.high) + return 2; + if ((unsigned int)au.s.low < (unsigned int)bu.s.low) + return 0; + else if ((unsigned int)au.s.low > (unsigned int)bu.s.low) + return 2; + return 1; +} -- cgit v1.2.3-70-g09d2 From 17359a80b9315670f56cd016a92c083665e80ae1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 26 Jul 2016 15:21:26 -0700 Subject: debugobjects.h: fix trivial kernel doc warning Add ':' to fix trivial kernel-doc warning in : ..//include/linux/debugobjects.h:63: warning: No description found for parameter 'is_static_object' Link: http://lkml.kernel.org/r/575B01B8.5060600@infradead.org Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/debugobjects.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 46056cb161fc..d82bf1994485 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -38,7 +38,7 @@ struct debug_obj { * @name: name of the object typee * @debug_hint: function returning address, which have associated * kernel symbol, to allow identify the object - * @is_static_object return true if the obj is static, otherwise return false + * @is_static_object: return true if the obj is static, otherwise return false * @fixup_init: fixup function, which is called when the init check * fails. All fixup functions must return true if fixup * was successful, otherwise return false -- cgit v1.2.3-70-g09d2 From 191df2b51303d62f95ccd8552e09dcd78eaa1a05 Mon Sep 17 00:00:00 2001 From: Eric Ren Date: Tue, 26 Jul 2016 15:21:29 -0700 Subject: ocfs2: fix a redundant re-initialization Obviously, memset() has zeroed the whole struct locking_max_version. So, it's no need to zero its two fields individually. Link: http://lkml.kernel.org/r/1463970605-18354-1-git-send-email-zren@suse.com Signed-off-by: Eric Ren Reviewed-by: Joseph Qi Reviewed-by: Gang He Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/stackglue.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 13219ed73e1d..52c07346bea3 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -735,8 +735,6 @@ static void __exit ocfs2_stack_glue_exit(void) { memset(&locking_max_version, 0, sizeof(struct ocfs2_protocol_version)); - locking_max_version.pv_major = 0; - locking_max_version.pv_minor = 0; ocfs2_sysfs_exit(); if (ocfs2_table_header) unregister_sysctl_table(ocfs2_table_header); -- cgit v1.2.3-70-g09d2 From 0b492f68bb28c1ecb45cfdc4522074df26c6109d Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Tue, 26 Jul 2016 15:21:32 -0700 Subject: ocfs2: improve recovery performance Journal replay will be run when performing recovery for a dead node. To avoid the stale cache impact, all blocks of dead node's journal inode were reloaded from disk. This hurts the performance. Check whether one block is cached before reloading it can improve performance a lot. In my test env, the time doing recovery was improved from 120s to 1s. [akpm@linux-foundation.org: clean up the for loop p_blkno handling] Link: http://lkml.kernel.org/r/1466155682-24656-1-git-send-email-junxiao.bi@oracle.com Signed-off-by: Junxiao Bi Reviewed-by: Joseph Qi Cc: "Gang He" Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/journal.c | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index e607419cdfa4..a244f14c6b87 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1159,10 +1159,8 @@ static int ocfs2_force_read_journal(struct inode *inode) int status = 0; int i; u64 v_blkno, p_blkno, p_blocks, num_blocks; -#define CONCURRENT_JOURNAL_FILL 32ULL - struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL]; - - memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); + struct buffer_head *bh = NULL; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); v_blkno = 0; @@ -1174,29 +1172,32 @@ static int ocfs2_force_read_journal(struct inode *inode) goto bail; } - if (p_blocks > CONCURRENT_JOURNAL_FILL) - p_blocks = CONCURRENT_JOURNAL_FILL; - - /* We are reading journal data which should not - * be put in the uptodate cache */ - status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb), - p_blkno, p_blocks, bhs); - if (status < 0) { - mlog_errno(status); - goto bail; - } + for (i = 0; i < p_blocks; i++, p_blkno++) { + bh = __find_get_block(osb->sb->s_bdev, p_blkno, + osb->sb->s_blocksize); + /* block not cached. */ + if (!bh) + continue; + + brelse(bh); + bh = NULL; + /* We are reading journal data which should not + * be put in the uptodate cache. + */ + status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh); + if (status < 0) { + mlog_errno(status); + goto bail; + } - for(i = 0; i < p_blocks; i++) { - brelse(bhs[i]); - bhs[i] = NULL; + brelse(bh); + bh = NULL; } v_blkno += p_blocks; } bail: - for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++) - brelse(bhs[i]); return status; } -- cgit v1.2.3-70-g09d2 From a8f24f1b3f0820ca6fe4b363e360f3fe7887647e Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 26 Jul 2016 15:21:35 -0700 Subject: ocfs2: cleanup unneeded goto in ocfs2_create_new_inode_locks The last goto is unneeded, so remove it. Link: http://lkml.kernel.org/r/576213D3.6080002@huawei.com Signed-off-by: Joseph Qi Reviewed-by: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlmglue.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 1eaa9100c889..fc5443226675 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1665,10 +1665,8 @@ int ocfs2_create_new_inode_locks(struct inode *inode) } ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0); - if (ret) { + if (ret) mlog_errno(ret); - goto bail; - } bail: return ret; -- cgit v1.2.3-70-g09d2 From 8ec7b17a668403435a7ab09b952e7f2eca61cf20 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 26 Jul 2016 15:21:38 -0700 Subject: ocfs2/dlm: fix memory leak of dlm_debug_ctxt dlm_debug_ctxt->debug_refcnt is initialized to 1 and then increased to 2 by dlm_debug_get in dlm_debug_init. But dlm_debug_put is called only once in dlm_debug_shutdown during unregister dlm, which leads to dlm_debug_ctxt leaked. Link: http://lkml.kernel.org/r/577BB755.4030900@huawei.com Signed-off-by: Joseph Qi Reviewed-by: Jiufei Xue Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlm/dlmdebug.c | 26 ++------------------------ fs/ocfs2/dlm/dlmdebug.h | 1 - 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 825136070d2c..e7b760deefae 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -347,26 +347,6 @@ static struct dentry *dlm_debugfs_root; #define DLM_DEBUGFS_PURGE_LIST "purge_list" /* begin - utils funcs */ -static void dlm_debug_free(struct kref *kref) -{ - struct dlm_debug_ctxt *dc; - - dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt); - - kfree(dc); -} - -static void dlm_debug_put(struct dlm_debug_ctxt *dc) -{ - if (dc) - kref_put(&dc->debug_refcnt, dlm_debug_free); -} - -static void dlm_debug_get(struct dlm_debug_ctxt *dc) -{ - kref_get(&dc->debug_refcnt); -} - static int debug_release(struct inode *inode, struct file *file) { free_page((unsigned long)file->private_data); @@ -932,11 +912,9 @@ int dlm_debug_init(struct dlm_ctxt *dlm) goto bail; } - dlm_debug_get(dc); return 0; bail: - dlm_debug_shutdown(dlm); return -ENOMEM; } @@ -949,7 +927,8 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm) debugfs_remove(dc->debug_mle_dentry); debugfs_remove(dc->debug_lockres_dentry); debugfs_remove(dc->debug_state_dentry); - dlm_debug_put(dc); + kfree(dc); + dc = NULL; } } @@ -969,7 +948,6 @@ int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm) mlog_errno(-ENOMEM); goto bail; } - kref_init(&dlm->dlm_debug_ctxt->debug_refcnt); return 0; bail: diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h index 1f27c4812d1a..5ced5482e7d3 100644 --- a/fs/ocfs2/dlm/dlmdebug.h +++ b/fs/ocfs2/dlm/dlmdebug.h @@ -30,7 +30,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle); #ifdef CONFIG_DEBUG_FS struct dlm_debug_ctxt { - struct kref debug_refcnt; struct dentry *debug_state_dentry; struct dentry *debug_lockres_dentry; struct dentry *debug_mle_dentry; -- cgit v1.2.3-70-g09d2 From 698d44b43a258006516538c27c8c4a8ab202e7d2 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 26 Jul 2016 15:21:40 -0700 Subject: ocfs2: cleanup implemented prototypes Several prototypes in inode.h are just defined but not actually implemented and used, so remove them. Link: http://lkml.kernel.org/r/57763787.4020706@huawei.com Signed-off-by: Joseph Qi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/inode.h | 7 ------- fs/ocfs2/super.c | 1 - 2 files changed, 8 deletions(-) diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index d8f3fc8d2551..50cc55047443 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -145,22 +145,15 @@ int ocfs2_drop_inode(struct inode *inode); struct inode *ocfs2_ilookup(struct super_block *sb, u64 feoff); struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags, int sysfile_type); -int ocfs2_inode_init_private(struct inode *inode); int ocfs2_inode_revalidate(struct dentry *dentry); void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe, int create_ino); -void ocfs2_read_inode(struct inode *inode); -void ocfs2_read_inode2(struct inode *inode, void *opaque); -ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf, - size_t size, loff_t *offp); void ocfs2_sync_blockdev(struct super_block *sb); void ocfs2_refresh_inode(struct inode *inode, struct ocfs2_dinode *fe); int ocfs2_mark_inode_dirty(handle_t *handle, struct inode *inode, struct buffer_head *bh); -struct buffer_head *ocfs2_bread(struct inode *inode, - int block, int *err, int reada); void ocfs2_set_inode_flags(struct inode *inode); void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index d7cae3327de5..d97de212c4db 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2072,7 +2072,6 @@ static int ocfs2_initialize_super(struct super_block *sb, osb->osb_dx_seed[3] = le32_to_cpu(di->id2.i_super.s_uuid_hash); osb->sb = sb; - /* Save off for ocfs2_rw_direct */ osb->s_sectsize_bits = blksize_bits(sector_size); BUG_ON(!osb->s_sectsize_bits); -- cgit v1.2.3-70-g09d2 From e81f1c5c4a6e2c72add88ec596aaf942e9fcf30b Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Tue, 26 Jul 2016 15:21:44 -0700 Subject: ocfs2: remove obscure BUG_ON in dlmglue These BUG_ON(!inode) are obscure because we have already used inode to get osb. And actually we can guarantee here inode is valid in the context. So we can safely remove them. Link: http://lkml.kernel.org/r/5776336A.6030104@huawei.com Signed-off-by: Joseph Qi Reviewed-by: Eric Ren Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/dlmglue.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index fc5443226675..83d576f6a287 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -1635,7 +1635,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode) int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); BUG_ON(!ocfs2_inode_is_new(inode)); mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -1678,8 +1677,6 @@ int ocfs2_rw_lock(struct inode *inode, int write) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu take %s RW lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); @@ -1722,8 +1719,6 @@ int ocfs2_open_lock(struct inode *inode) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu take PRMODE open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -1747,8 +1742,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write) struct ocfs2_lock_res *lockres; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - BUG_ON(!inode); - mlog(0, "inode %llu try to take %s open lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, write ? "EXMODE" : "PRMODE"); @@ -2326,8 +2319,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct buffer_head *local_bh = NULL; - BUG_ON(!inode); - mlog(0, "inode %llu, take %s META lock\n", (unsigned long long)OCFS2_I(inode)->ip_blkno, ex ? "EXMODE" : "PRMODE"); -- cgit v1.2.3-70-g09d2 From 7d65b27448a90e08270537234819563e07936f76 Mon Sep 17 00:00:00 2001 From: piaojun Date: Tue, 26 Jul 2016 15:21:47 -0700 Subject: ocfs2/cluster: clean up unnecessary assignment for 'ret' Clean up unnecessary assignment for 'ret'. Link: http://lkml.kernel.org/r/578C61F6.4080403@huawei.com Signed-off-by: Jun Piao Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/cluster/tcp.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 4238eb28889f..1d67fcbf7160 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1618,16 +1618,12 @@ static void o2net_start_connect(struct work_struct *work) /* watch for racing with tearing a node down */ node = o2nm_get_node_by_num(o2net_num_from_nn(nn)); - if (node == NULL) { - ret = 0; + if (node == NULL) goto out; - } mynode = o2nm_get_node_by_num(o2nm_this_node()); - if (mynode == NULL) { - ret = 0; + if (mynode == NULL) goto out; - } spin_lock(&nn->nn_lock); /* -- cgit v1.2.3-70-g09d2 From 6c60d2b5746cf23025ffe71bd7ff9075048fc90c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 26 Jul 2016 15:21:50 -0700 Subject: fs/fs-writeback.c: add a new writeback list for sync MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wait_sb_inodes() currently does a walk of all inodes in the filesystem to find dirty one to wait on during sync. This is highly inefficient and wastes a lot of CPU when there are lots of clean cached inodes that we don't need to wait on. To avoid this "all inode" walk, we need to track inodes that are currently under writeback that we need to wait for. We do this by adding inodes to a writeback list on the sb when the mapping is first tagged as having pages under writeback. wait_sb_inodes() can then walk this list of "inodes under IO" and wait specifically just for the inodes that the current sync(2) needs to wait for. Define a couple helpers to add/remove an inode from the writeback list and call them when the overall mapping is tagged for or cleared from writeback. Update wait_sb_inodes() to walk only the inodes under writeback due to the sync. With this change, filesystem sync times are significantly reduced for fs' with largely populated inode caches and otherwise no other work to do. For example, on a 16xcpu 2GHz x86-64 server, 10TB XFS filesystem with a ~10m entry inode cache, sync times are reduced from ~7.3s to less than 0.1s when the filesystem is fully clean. Link: http://lkml.kernel.org/r/1466594593-6757-2-git-send-email-bfoster@redhat.com Signed-off-by: Dave Chinner Signed-off-by: Josef Bacik Signed-off-by: Brian Foster Reviewed-by: Jan Kara Tested-by: Holger Hoffstätte Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 106 +++++++++++++++++++++++++++++++++++----------- fs/inode.c | 2 + fs/super.c | 2 + include/linux/fs.h | 4 ++ include/linux/writeback.h | 3 ++ mm/page-writeback.c | 18 ++++++++ 6 files changed, 110 insertions(+), 25 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index fe7e83a45eff..1fcce8345da3 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -980,6 +980,37 @@ void inode_io_list_del(struct inode *inode) spin_unlock(&wb->list_lock); } +/* + * mark an inode as under writeback on the sb + */ +void sb_mark_inode_writeback(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + unsigned long flags; + + if (list_empty(&inode->i_wb_list)) { + spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); + if (list_empty(&inode->i_wb_list)) + list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); + spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); + } +} + +/* + * clear an inode as under writeback on the sb + */ +void sb_clear_inode_writeback(struct inode *inode) +{ + struct super_block *sb = inode->i_sb; + unsigned long flags; + + if (!list_empty(&inode->i_wb_list)) { + spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); + list_del_init(&inode->i_wb_list); + spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); + } +} + /* * Redirty an inode: set its when-it-was dirtied timestamp and move it to the * furthest end of its superblock's dirty-inode list. @@ -2154,7 +2185,7 @@ EXPORT_SYMBOL(__mark_inode_dirty); */ static void wait_sb_inodes(struct super_block *sb) { - struct inode *inode, *old_inode = NULL; + LIST_HEAD(sync_list); /* * We need to be protected against the filesystem going from @@ -2163,38 +2194,60 @@ static void wait_sb_inodes(struct super_block *sb) WARN_ON(!rwsem_is_locked(&sb->s_umount)); mutex_lock(&sb->s_sync_lock); - spin_lock(&sb->s_inode_list_lock); /* - * Data integrity sync. Must wait for all pages under writeback, - * because there may have been pages dirtied before our sync - * call, but which had writeout started before we write it out. - * In which case, the inode may not be on the dirty list, but - * we still have to wait for that writeout. + * Splice the writeback list onto a temporary list to avoid waiting on + * inodes that have started writeback after this point. + * + * Use rcu_read_lock() to keep the inodes around until we have a + * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as + * the local list because inodes can be dropped from either by writeback + * completion. + */ + rcu_read_lock(); + spin_lock_irq(&sb->s_inode_wblist_lock); + list_splice_init(&sb->s_inodes_wb, &sync_list); + + /* + * Data integrity sync. Must wait for all pages under writeback, because + * there may have been pages dirtied before our sync call, but which had + * writeout started before we write it out. In which case, the inode + * may not be on the dirty list, but we still have to wait for that + * writeout. */ - list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { + while (!list_empty(&sync_list)) { + struct inode *inode = list_first_entry(&sync_list, struct inode, + i_wb_list); struct address_space *mapping = inode->i_mapping; + /* + * Move each inode back to the wb list before we drop the lock + * to preserve consistency between i_wb_list and the mapping + * writeback tag. Writeback completion is responsible to remove + * the inode from either list once the writeback tag is cleared. + */ + list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb); + + /* + * The mapping can appear untagged while still on-list since we + * do not have the mapping lock. Skip it here, wb completion + * will remove it. + */ + if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + continue; + + spin_unlock_irq(&sb->s_inode_wblist_lock); + spin_lock(&inode->i_lock); - if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || - (mapping->nrpages == 0)) { + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { spin_unlock(&inode->i_lock); + + spin_lock_irq(&sb->s_inode_wblist_lock); continue; } __iget(inode); spin_unlock(&inode->i_lock); - spin_unlock(&sb->s_inode_list_lock); - - /* - * We hold a reference to 'inode' so it couldn't have been - * removed from s_inodes list while we dropped the - * s_inode_list_lock. We cannot iput the inode now as we can - * be holding the last reference and we cannot iput it under - * s_inode_list_lock. So we keep the reference and iput it - * later. - */ - iput(old_inode); - old_inode = inode; + rcu_read_unlock(); /* * We keep the error status of individual mapping so that @@ -2205,10 +2258,13 @@ static void wait_sb_inodes(struct super_block *sb) cond_resched(); - spin_lock(&sb->s_inode_list_lock); + iput(inode); + + rcu_read_lock(); + spin_lock_irq(&sb->s_inode_wblist_lock); } - spin_unlock(&sb->s_inode_list_lock); - iput(old_inode); + spin_unlock_irq(&sb->s_inode_wblist_lock); + rcu_read_unlock(); mutex_unlock(&sb->s_sync_lock); } diff --git a/fs/inode.c b/fs/inode.c index 4ccbc21b30ce..e171f7b5f9e4 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -365,6 +365,7 @@ void inode_init_once(struct inode *inode) INIT_HLIST_NODE(&inode->i_hash); INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_io_list); + INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); address_space_init_once(&inode->i_data); i_size_ordered_init(inode); @@ -507,6 +508,7 @@ void clear_inode(struct inode *inode) BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); + BUG_ON(!list_empty(&inode->i_wb_list)); /* don't need i_lock here, no concurrent mods to i_state */ inode->i_state = I_FREEING | I_CLEAR; } diff --git a/fs/super.c b/fs/super.c index d78b9847e6cb..5806ffd45563 100644 --- a/fs/super.c +++ b/fs/super.c @@ -206,6 +206,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) mutex_init(&s->s_sync_lock); INIT_LIST_HEAD(&s->s_inodes); spin_lock_init(&s->s_inode_list_lock); + INIT_LIST_HEAD(&s->s_inodes_wb); + spin_lock_init(&s->s_inode_wblist_lock); if (list_lru_init_memcg(&s->s_dentry_lru)) goto fail; diff --git a/include/linux/fs.h b/include/linux/fs.h index dd288148a6b1..0c9ebf530d9e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -665,6 +665,7 @@ struct inode { #endif struct list_head i_lru; /* inode LRU list */ struct list_head i_sb_list; + struct list_head i_wb_list; /* backing dev writeback list */ union { struct hlist_head i_dentry; struct rcu_head i_rcu; @@ -1448,6 +1449,9 @@ struct super_block { /* s_inode_list_lock protects s_inodes */ spinlock_t s_inode_list_lock ____cacheline_aligned_in_smp; struct list_head s_inodes; /* all inodes */ + + spinlock_t s_inode_wblist_lock; + struct list_head s_inodes_wb; /* writeback inodes */ }; extern struct timespec current_fs_time(struct super_block *sb); diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d0b5ca5d4e08..717e6149e753 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -384,4 +384,7 @@ void tag_pages_for_writeback(struct address_space *mapping, void account_page_redirty(struct page *page); +void sb_mark_inode_writeback(struct inode *inode); +void sb_clear_inode_writeback(struct inode *inode); + #endif /* WRITEBACK_H */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e2481949494c..8195eb454411 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2747,6 +2747,11 @@ int test_clear_page_writeback(struct page *page) __wb_writeout_inc(wb); } } + + if (mapping->host && !mapping_tagged(mapping, + PAGECACHE_TAG_WRITEBACK)) + sb_clear_inode_writeback(mapping->host); + spin_unlock_irqrestore(&mapping->tree_lock, flags); } else { ret = TestClearPageWriteback(page); @@ -2774,11 +2779,24 @@ int __test_set_page_writeback(struct page *page, bool keep_write) spin_lock_irqsave(&mapping->tree_lock, flags); ret = TestSetPageWriteback(page); if (!ret) { + bool on_wblist; + + on_wblist = mapping_tagged(mapping, + PAGECACHE_TAG_WRITEBACK); + radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_WRITEBACK); if (bdi_cap_account_writeback(bdi)) __inc_wb_stat(inode_to_wb(inode), WB_WRITEBACK); + + /* + * We can come through here when swapping anonymous + * pages, so we don't necessarily have an inode to track + * for sync. + */ + if (mapping->host && !on_wblist) + sb_mark_inode_writeback(mapping->host); } if (!PageDirty(page)) radix_tree_tag_clear(&mapping->page_tree, -- cgit v1.2.3-70-g09d2 From 9a46b04f16a032c26bbf0ece61d6cd1e7ba9f627 Mon Sep 17 00:00:00 2001 From: Brian Foster Date: Tue, 26 Jul 2016 15:21:53 -0700 Subject: fs/fs-writeback.c: inode writeback list tracking tracepoints MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The per-sb inode writeback list tracks inodes currently under writeback to facilitate efficient sync processing. In particular, it ensures that sync only needs to walk through a list of inodes that were cleaned by the sync. Add a couple tracepoints to help identify when inodes are added/removed to and from the writeback lists. Piggyback off of the writeback lazytime tracepoint template as it already tracks the relevant inode information. Link: http://lkml.kernel.org/r/1466594593-6757-3-git-send-email-bfoster@redhat.com Signed-off-by: Brian Foster Reviewed-by: Jan Kara Cc: Dave Chinner cc: Josef Bacik Cc: Holger Hoffstätte Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 9 +++++++-- include/trace/events/writeback.h | 22 ++++++++++++++++++---- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1fcce8345da3..6f9c9f6f5157 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -990,8 +990,10 @@ void sb_mark_inode_writeback(struct inode *inode) if (list_empty(&inode->i_wb_list)) { spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); - if (list_empty(&inode->i_wb_list)) + if (list_empty(&inode->i_wb_list)) { list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb); + trace_sb_mark_inode_writeback(inode); + } spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); } } @@ -1006,7 +1008,10 @@ void sb_clear_inode_writeback(struct inode *inode) if (!list_empty(&inode->i_wb_list)) { spin_lock_irqsave(&sb->s_inode_wblist_lock, flags); - list_del_init(&inode->i_wb_list); + if (!list_empty(&inode->i_wb_list)) { + list_del_init(&inode->i_wb_list); + trace_sb_clear_inode_writeback(inode); + } spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags); } } diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 73614ce1d204..531f5811ff6b 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -696,7 +696,7 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, TP_ARGS(inode, wbc, nr_to_write) ); -DECLARE_EVENT_CLASS(writeback_lazytime_template, +DECLARE_EVENT_CLASS(writeback_inode_template, TP_PROTO(struct inode *inode), TP_ARGS(inode), @@ -723,25 +723,39 @@ DECLARE_EVENT_CLASS(writeback_lazytime_template, show_inode_state(__entry->state), __entry->mode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, +DEFINE_EVENT(writeback_inode_template, writeback_lazytime, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, +DEFINE_EVENT(writeback_inode_template, writeback_lazytime_iput, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); -DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, +DEFINE_EVENT(writeback_inode_template, writeback_dirty_inode_enqueue, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); +/* + * Inode writeback list tracking. + */ + +DEFINE_EVENT(writeback_inode_template, sb_mark_inode_writeback, + TP_PROTO(struct inode *inode), + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_inode_template, sb_clear_inode_writeback, + TP_PROTO(struct inode *inode), + TP_ARGS(inode) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ -- cgit v1.2.3-70-g09d2 From 7c00fce98c3e15334a603925b41aa49f76e83227 Mon Sep 17 00:00:00 2001 From: Thomas Garnier Date: Tue, 26 Jul 2016 15:21:56 -0700 Subject: mm: reorganize SLAB freelist randomization The kernel heap allocators are using a sequential freelist making their allocation predictable. This predictability makes kernel heap overflow easier to exploit. An attacker can careful prepare the kernel heap to control the following chunk overflowed. For example these attacks exploit the predictability of the heap: - Linux Kernel CAN SLUB overflow (https://goo.gl/oMNWkU) - Exploiting Linux Kernel Heap corruptions (http://goo.gl/EXLn95) ***Problems that needed solving: - Randomize the Freelist (singled linked) used in the SLUB allocator. - Ensure good performance to encourage usage. - Get best entropy in early boot stage. ***Parts: - 01/02 Reorganize the SLAB Freelist randomization to share elements with the SLUB implementation. - 02/02 The SLUB Freelist randomization implementation. Similar approach than the SLAB but tailored to the singled freelist used in SLUB. ***Performance data: slab_test impact is between 3% to 4% on average for 100000 attempts without smp. It is a very focused testing, kernbench show the overall impact on the system is way lower. Before: Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 100000 times kmalloc(8) -> 49 cycles kfree -> 77 cycles 100000 times kmalloc(16) -> 51 cycles kfree -> 79 cycles 100000 times kmalloc(32) -> 53 cycles kfree -> 83 cycles 100000 times kmalloc(64) -> 62 cycles kfree -> 90 cycles 100000 times kmalloc(128) -> 81 cycles kfree -> 97 cycles 100000 times kmalloc(256) -> 98 cycles kfree -> 121 cycles 100000 times kmalloc(512) -> 95 cycles kfree -> 122 cycles 100000 times kmalloc(1024) -> 96 cycles kfree -> 126 cycles 100000 times kmalloc(2048) -> 115 cycles kfree -> 140 cycles 100000 times kmalloc(4096) -> 149 cycles kfree -> 171 cycles 2. Kmalloc: alloc/free test 100000 times kmalloc(8)/kfree -> 70 cycles 100000 times kmalloc(16)/kfree -> 70 cycles 100000 times kmalloc(32)/kfree -> 70 cycles 100000 times kmalloc(64)/kfree -> 70 cycles 100000 times kmalloc(128)/kfree -> 70 cycles 100000 times kmalloc(256)/kfree -> 69 cycles 100000 times kmalloc(512)/kfree -> 70 cycles 100000 times kmalloc(1024)/kfree -> 73 cycles 100000 times kmalloc(2048)/kfree -> 72 cycles 100000 times kmalloc(4096)/kfree -> 71 cycles After: Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 100000 times kmalloc(8) -> 57 cycles kfree -> 78 cycles 100000 times kmalloc(16) -> 61 cycles kfree -> 81 cycles 100000 times kmalloc(32) -> 76 cycles kfree -> 93 cycles 100000 times kmalloc(64) -> 83 cycles kfree -> 94 cycles 100000 times kmalloc(128) -> 106 cycles kfree -> 107 cycles 100000 times kmalloc(256) -> 118 cycles kfree -> 117 cycles 100000 times kmalloc(512) -> 114 cycles kfree -> 116 cycles 100000 times kmalloc(1024) -> 115 cycles kfree -> 118 cycles 100000 times kmalloc(2048) -> 147 cycles kfree -> 131 cycles 100000 times kmalloc(4096) -> 214 cycles kfree -> 161 cycles 2. Kmalloc: alloc/free test 100000 times kmalloc(8)/kfree -> 66 cycles 100000 times kmalloc(16)/kfree -> 66 cycles 100000 times kmalloc(32)/kfree -> 66 cycles 100000 times kmalloc(64)/kfree -> 66 cycles 100000 times kmalloc(128)/kfree -> 65 cycles 100000 times kmalloc(256)/kfree -> 67 cycles 100000 times kmalloc(512)/kfree -> 67 cycles 100000 times kmalloc(1024)/kfree -> 64 cycles 100000 times kmalloc(2048)/kfree -> 67 cycles 100000 times kmalloc(4096)/kfree -> 67 cycles Kernbench, before: Average Optimal load -j 12 Run (std deviation): Elapsed Time 101.873 (1.16069) User Time 1045.22 (1.60447) System Time 88.969 (0.559195) Percent CPU 1112.9 (13.8279) Context Switches 189140 (2282.15) Sleeps 99008.6 (768.091) After: Average Optimal load -j 12 Run (std deviation): Elapsed Time 102.47 (0.562732) User Time 1045.3 (1.34263) System Time 88.311 (0.342554) Percent CPU 1105.8 (6.49444) Context Switches 189081 (2355.78) Sleeps 99231.5 (800.358) This patch (of 2): This commit reorganizes the previous SLAB freelist randomization to prepare for the SLUB implementation. It moves functions that will be shared to slab_common. The entropy functions are changed to align with the SLUB implementation, now using get_random_(int|long) functions. These functions were chosen because they provide a bit more entropy early on boot and better performance when specific arch instructions are not available. [akpm@linux-foundation.org: fix build] Link: http://lkml.kernel.org/r/1464295031-26375-2-git-send-email-thgarnie@google.com Signed-off-by: Thomas Garnier Reviewed-by: Kees Cook Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab_def.h | 2 +- mm/slab.c | 80 ++++++++++++------------------------------------ mm/slab.h | 14 +++++++++ mm/slab_common.c | 47 ++++++++++++++++++++++++++++ 4 files changed, 82 insertions(+), 61 deletions(-) diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8694f7a5d92b..339ba027ade9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -81,7 +81,7 @@ struct kmem_cache { #endif #ifdef CONFIG_SLAB_FREELIST_RANDOM - void *random_seq; + unsigned int *random_seq; #endif struct kmem_cache_node *node[MAX_NUMNODES]; diff --git a/mm/slab.c b/mm/slab.c index cc8bbc1e6bc9..763096a247f6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1236,61 +1236,6 @@ static void __init set_up_node(struct kmem_cache *cachep, int index) } } -#ifdef CONFIG_SLAB_FREELIST_RANDOM -static void freelist_randomize(struct rnd_state *state, freelist_idx_t *list, - size_t count) -{ - size_t i; - unsigned int rand; - - for (i = 0; i < count; i++) - list[i] = i; - - /* Fisher-Yates shuffle */ - for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(state); - rand %= (i + 1); - swap(list[i], list[rand]); - } -} - -/* Create a random sequence per cache */ -static int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp) -{ - unsigned int seed, count = cachep->num; - struct rnd_state state; - - if (count < 2) - return 0; - - /* If it fails, we will just use the global lists */ - cachep->random_seq = kcalloc(count, sizeof(freelist_idx_t), gfp); - if (!cachep->random_seq) - return -ENOMEM; - - /* Get best entropy at this stage */ - get_random_bytes_arch(&seed, sizeof(seed)); - prandom_seed_state(&state, seed); - - freelist_randomize(&state, cachep->random_seq, count); - return 0; -} - -/* Destroy the per-cache random freelist sequence */ -static void cache_random_seq_destroy(struct kmem_cache *cachep) -{ - kfree(cachep->random_seq); - cachep->random_seq = NULL; -} -#else -static inline int cache_random_seq_create(struct kmem_cache *cachep, gfp_t gfp) -{ - return 0; -} -static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } -#endif /* CONFIG_SLAB_FREELIST_RANDOM */ - - /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). @@ -2535,7 +2480,7 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) union freelist_init_state { struct { unsigned int pos; - freelist_idx_t *list; + unsigned int *list; unsigned int count; unsigned int rand; }; @@ -2554,7 +2499,7 @@ static bool freelist_state_initialize(union freelist_init_state *state, unsigned int rand; /* Use best entropy available to define a random shift */ - get_random_bytes_arch(&rand, sizeof(rand)); + rand = get_random_int(); /* Use a random state if the pre-computed list is not available */ if (!cachep->random_seq) { @@ -2576,13 +2521,20 @@ static freelist_idx_t next_random_slot(union freelist_init_state *state) return (state->list[state->pos++] + state->rand) % state->count; } +/* Swap two freelist entries */ +static void swap_free_obj(struct page *page, unsigned int a, unsigned int b) +{ + swap(((freelist_idx_t *)page->freelist)[a], + ((freelist_idx_t *)page->freelist)[b]); +} + /* * Shuffle the freelist initialization state based on pre-computed lists. * return true if the list was successfully shuffled, false otherwise. */ static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) { - unsigned int objfreelist = 0, i, count = cachep->num; + unsigned int objfreelist = 0, i, rand, count = cachep->num; union freelist_init_state state; bool precomputed; @@ -2607,7 +2559,15 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct page *page) * Later use a pre-computed list for speed. */ if (!precomputed) { - freelist_randomize(&state.rnd_state, page->freelist, count); + for (i = 0; i < count; i++) + set_free_obj(page, i, i); + + /* Fisher-Yates shuffle */ + for (i = count - 1; i > 0; i--) { + rand = prandom_u32_state(&state.rnd_state); + rand %= (i + 1); + swap_free_obj(page, i, rand); + } } else { for (i = 0; i < count; i++) set_free_obj(page, i, next_random_slot(&state)); @@ -3979,7 +3939,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) int shared = 0; int batchcount = 0; - err = cache_random_seq_create(cachep, gfp); + err = cache_random_seq_create(cachep, cachep->num, gfp); if (err) goto end; diff --git a/mm/slab.h b/mm/slab.h index dedb1a920fb8..5fa8b8f20eb1 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -42,6 +42,7 @@ struct kmem_cache { #include #include #include +#include /* * State of the slab allocator. @@ -464,4 +465,17 @@ int memcg_slab_show(struct seq_file *m, void *p); void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); +#ifdef CONFIG_SLAB_FREELIST_RANDOM +int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, + gfp_t gfp); +void cache_random_seq_destroy(struct kmem_cache *cachep); +#else +static inline int cache_random_seq_create(struct kmem_cache *cachep, + unsigned int count, gfp_t gfp) +{ + return 0; +} +static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } +#endif /* CONFIG_SLAB_FREELIST_RANDOM */ + #endif /* MM_SLAB_H */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 82317abb03ed..da88c1588752 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1030,6 +1030,53 @@ void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) EXPORT_SYMBOL(kmalloc_order_trace); #endif +#ifdef CONFIG_SLAB_FREELIST_RANDOM +/* Randomize a generic freelist */ +static void freelist_randomize(struct rnd_state *state, unsigned int *list, + size_t count) +{ + size_t i; + unsigned int rand; + + for (i = 0; i < count; i++) + list[i] = i; + + /* Fisher-Yates shuffle */ + for (i = count - 1; i > 0; i--) { + rand = prandom_u32_state(state); + rand %= (i + 1); + swap(list[i], list[rand]); + } +} + +/* Create a random sequence per cache */ +int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, + gfp_t gfp) +{ + struct rnd_state state; + + if (count < 2 || cachep->random_seq) + return 0; + + cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp); + if (!cachep->random_seq) + return -ENOMEM; + + /* Get best entropy at this stage of boot */ + prandom_seed_state(&state, get_random_long()); + + freelist_randomize(&state, cachep->random_seq, count); + return 0; +} + +/* Destroy the per-cache random freelist sequence */ +void cache_random_seq_destroy(struct kmem_cache *cachep) +{ + kfree(cachep->random_seq); + cachep->random_seq = NULL; +} +#endif /* CONFIG_SLAB_FREELIST_RANDOM */ + #ifdef CONFIG_SLABINFO #ifdef CONFIG_SLAB -- cgit v1.2.3-70-g09d2 From 210e7a43fa905bccafa9bb5966fba1d71f33eb8b Mon Sep 17 00:00:00 2001 From: Thomas Garnier Date: Tue, 26 Jul 2016 15:21:59 -0700 Subject: mm: SLUB freelist randomization Implements freelist randomization for the SLUB allocator. It was previous implemented for the SLAB allocator. Both use the same configuration option (CONFIG_SLAB_FREELIST_RANDOM). The list is randomized during initialization of a new set of pages. The order on different freelist sizes is pre-computed at boot for performance. Each kmem_cache has its own randomized freelist. This security feature reduces the predictability of the kernel SLUB allocator against heap overflows rendering attacks much less stable. For example these attacks exploit the predictability of the heap: - Linux Kernel CAN SLUB overflow (https://goo.gl/oMNWkU) - Exploiting Linux Kernel Heap corruptions (http://goo.gl/EXLn95) Performance results: slab_test impact is between 3% to 4% on average for 100000 attempts without smp. It is a very focused testing, kernbench show the overall impact on the system is way lower. Before: Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 100000 times kmalloc(8) -> 49 cycles kfree -> 77 cycles 100000 times kmalloc(16) -> 51 cycles kfree -> 79 cycles 100000 times kmalloc(32) -> 53 cycles kfree -> 83 cycles 100000 times kmalloc(64) -> 62 cycles kfree -> 90 cycles 100000 times kmalloc(128) -> 81 cycles kfree -> 97 cycles 100000 times kmalloc(256) -> 98 cycles kfree -> 121 cycles 100000 times kmalloc(512) -> 95 cycles kfree -> 122 cycles 100000 times kmalloc(1024) -> 96 cycles kfree -> 126 cycles 100000 times kmalloc(2048) -> 115 cycles kfree -> 140 cycles 100000 times kmalloc(4096) -> 149 cycles kfree -> 171 cycles 2. Kmalloc: alloc/free test 100000 times kmalloc(8)/kfree -> 70 cycles 100000 times kmalloc(16)/kfree -> 70 cycles 100000 times kmalloc(32)/kfree -> 70 cycles 100000 times kmalloc(64)/kfree -> 70 cycles 100000 times kmalloc(128)/kfree -> 70 cycles 100000 times kmalloc(256)/kfree -> 69 cycles 100000 times kmalloc(512)/kfree -> 70 cycles 100000 times kmalloc(1024)/kfree -> 73 cycles 100000 times kmalloc(2048)/kfree -> 72 cycles 100000 times kmalloc(4096)/kfree -> 71 cycles After: Single thread testing ===================== 1. Kmalloc: Repeatedly allocate then free test 100000 times kmalloc(8) -> 57 cycles kfree -> 78 cycles 100000 times kmalloc(16) -> 61 cycles kfree -> 81 cycles 100000 times kmalloc(32) -> 76 cycles kfree -> 93 cycles 100000 times kmalloc(64) -> 83 cycles kfree -> 94 cycles 100000 times kmalloc(128) -> 106 cycles kfree -> 107 cycles 100000 times kmalloc(256) -> 118 cycles kfree -> 117 cycles 100000 times kmalloc(512) -> 114 cycles kfree -> 116 cycles 100000 times kmalloc(1024) -> 115 cycles kfree -> 118 cycles 100000 times kmalloc(2048) -> 147 cycles kfree -> 131 cycles 100000 times kmalloc(4096) -> 214 cycles kfree -> 161 cycles 2. Kmalloc: alloc/free test 100000 times kmalloc(8)/kfree -> 66 cycles 100000 times kmalloc(16)/kfree -> 66 cycles 100000 times kmalloc(32)/kfree -> 66 cycles 100000 times kmalloc(64)/kfree -> 66 cycles 100000 times kmalloc(128)/kfree -> 65 cycles 100000 times kmalloc(256)/kfree -> 67 cycles 100000 times kmalloc(512)/kfree -> 67 cycles 100000 times kmalloc(1024)/kfree -> 64 cycles 100000 times kmalloc(2048)/kfree -> 67 cycles 100000 times kmalloc(4096)/kfree -> 67 cycles Kernbench, before: Average Optimal load -j 12 Run (std deviation): Elapsed Time 101.873 (1.16069) User Time 1045.22 (1.60447) System Time 88.969 (0.559195) Percent CPU 1112.9 (13.8279) Context Switches 189140 (2282.15) Sleeps 99008.6 (768.091) After: Average Optimal load -j 12 Run (std deviation): Elapsed Time 102.47 (0.562732) User Time 1045.3 (1.34263) System Time 88.311 (0.342554) Percent CPU 1105.8 (6.49444) Context Switches 189081 (2355.78) Sleeps 99231.5 (800.358) Link: http://lkml.kernel.org/r/1464295031-26375-3-git-send-email-thgarnie@google.com Signed-off-by: Thomas Garnier Reviewed-by: Kees Cook Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slub_def.h | 5 ++ init/Kconfig | 4 +- mm/slub.c | 133 ++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 133 insertions(+), 9 deletions(-) diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d1faa019c02a..5624c1f3eb0a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -99,6 +99,11 @@ struct kmem_cache { */ int remote_node_defrag_ratio; #endif + +#ifdef CONFIG_SLAB_FREELIST_RANDOM + unsigned int *random_seq; +#endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/init/Kconfig b/init/Kconfig index 557bdf10cd44..504057925ee9 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1786,10 +1786,10 @@ endchoice config SLAB_FREELIST_RANDOM default n - depends on SLAB + depends on SLAB || SLUB bool "SLAB freelist randomization" help - Randomizes the freelist order used on creating new SLABs. This + Randomizes the freelist order used on creating new pages. This security feature reduces the predictability of the kernel slab allocator against heap overflows. diff --git a/mm/slub.c b/mm/slub.c index 825ff4505336..f5b3114b6a97 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1405,6 +1405,109 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s, return page; } +#ifdef CONFIG_SLAB_FREELIST_RANDOM +/* Pre-initialize the random sequence cache */ +static int init_cache_random_seq(struct kmem_cache *s) +{ + int err; + unsigned long i, count = oo_objects(s->oo); + + err = cache_random_seq_create(s, count, GFP_KERNEL); + if (err) { + pr_err("SLUB: Unable to initialize free list for %s\n", + s->name); + return err; + } + + /* Transform to an offset on the set of pages */ + if (s->random_seq) { + for (i = 0; i < count; i++) + s->random_seq[i] *= s->size; + } + return 0; +} + +/* Initialize each random sequence freelist per cache */ +static void __init init_freelist_randomization(void) +{ + struct kmem_cache *s; + + mutex_lock(&slab_mutex); + + list_for_each_entry(s, &slab_caches, list) + init_cache_random_seq(s); + + mutex_unlock(&slab_mutex); +} + +/* Get the next entry on the pre-computed freelist randomized */ +static void *next_freelist_entry(struct kmem_cache *s, struct page *page, + unsigned long *pos, void *start, + unsigned long page_limit, + unsigned long freelist_count) +{ + unsigned int idx; + + /* + * If the target page allocation failed, the number of objects on the + * page might be smaller than the usual size defined by the cache. + */ + do { + idx = s->random_seq[*pos]; + *pos += 1; + if (*pos >= freelist_count) + *pos = 0; + } while (unlikely(idx >= page_limit)); + + return (char *)start + idx; +} + +/* Shuffle the single linked freelist based on a random pre-computed sequence */ +static bool shuffle_freelist(struct kmem_cache *s, struct page *page) +{ + void *start; + void *cur; + void *next; + unsigned long idx, pos, page_limit, freelist_count; + + if (page->objects < 2 || !s->random_seq) + return false; + + freelist_count = oo_objects(s->oo); + pos = get_random_int() % freelist_count; + + page_limit = page->objects * s->size; + start = fixup_red_left(s, page_address(page)); + + /* First entry is used as the base of the freelist */ + cur = next_freelist_entry(s, page, &pos, start, page_limit, + freelist_count); + page->freelist = cur; + + for (idx = 1; idx < page->objects; idx++) { + setup_object(s, page, cur); + next = next_freelist_entry(s, page, &pos, start, page_limit, + freelist_count); + set_freepointer(s, cur, next); + cur = next; + } + setup_object(s, page, cur); + set_freepointer(s, cur, NULL); + + return true; +} +#else +static inline int init_cache_random_seq(struct kmem_cache *s) +{ + return 0; +} +static inline void init_freelist_randomization(void) { } +static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page) +{ + return false; +} +#endif /* CONFIG_SLAB_FREELIST_RANDOM */ + static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; @@ -1412,6 +1515,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) gfp_t alloc_gfp; void *start, *p; int idx, order; + bool shuffle; flags &= gfp_allowed_mask; @@ -1473,15 +1577,19 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) kasan_poison_slab(page); - for_each_object_idx(p, idx, s, start, page->objects) { - setup_object(s, page, p); - if (likely(idx < page->objects)) - set_freepointer(s, p, p + s->size); - else - set_freepointer(s, p, NULL); + shuffle = shuffle_freelist(s, page); + + if (!shuffle) { + for_each_object_idx(p, idx, s, start, page->objects) { + setup_object(s, page, p); + if (likely(idx < page->objects)) + set_freepointer(s, p, p + s->size); + else + set_freepointer(s, p, NULL); + } + page->freelist = fixup_red_left(s, start); } - page->freelist = fixup_red_left(s, start); page->inuse = page->objects; page->frozen = 1; @@ -3207,6 +3315,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) void __kmem_cache_release(struct kmem_cache *s) { + cache_random_seq_destroy(s); free_percpu(s->cpu_slab); free_kmem_cache_nodes(s); } @@ -3431,6 +3540,13 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) #ifdef CONFIG_NUMA s->remote_node_defrag_ratio = 1000; #endif + + /* Initialize the pre-computed randomized freelist if slab is up */ + if (slab_state >= UP) { + if (init_cache_random_seq(s)) + goto error; + } + if (!init_kmem_cache_nodes(s)) goto error; @@ -3947,6 +4063,9 @@ void __init kmem_cache_init(void) setup_kmalloc_cache_index_table(); create_kmalloc_caches(0); + /* Setup random freelists for each cache */ + init_freelist_randomization(); + #ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); #endif -- cgit v1.2.3-70-g09d2 From bacdcb346093794f292c2c9c67ae350895e8b7ef Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 26 Jul 2016 15:22:02 -0700 Subject: slab: make GFP_SLAB_BUG_MASK information more human readable printk offers %pGg for quite some time so let's use it to get a human readable list of invalid flags. The original output would be [ 429.191962] gfp: 2 after the change [ 429.191962] Unexpected gfp: 0x2 (__GFP_HIGHMEM) Link: http://lkml.kernel.org/r/1465548200-11384-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Sergey Senozhatsky Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 3 ++- mm/slub.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 763096a247f6..03fb724d6e48 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2686,7 +2686,8 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, * critical path in kmem_cache_alloc(). */ if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; + pr_emerg("Unexpected gfp: %#x (%pGg)\n", invalid_mask, &invalid_mask); BUG(); } local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); diff --git a/mm/slub.c b/mm/slub.c index f5b3114b6a97..a2fe4edc3599 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1612,7 +1612,8 @@ out: static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { if (unlikely(flags & GFP_SLAB_BUG_MASK)) { - pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; + pr_emerg("Unexpected gfp: %#x (%pGg)\n", invalid_mask, &invalid_mask); BUG(); } -- cgit v1.2.3-70-g09d2 From 72baeef0c2710a9ac99670e59d4865b24ffd2d18 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 26 Jul 2016 15:22:05 -0700 Subject: slab: do not panic on invalid gfp_mask Both SLAB and SLUB BUG() when a caller provides an invalid gfp_mask. This is a rather harsh way to announce a non-critical issue. Allocator is free to ignore invalid flags. Let's simply replace BUG() by dump_stack to tell the offender and fixup the mask to move on with the allocation request. This is an example for kmalloc(GFP_KERNEL|__GFP_HIGHMEM) from a test module: Unexpected gfp: 0x2 (__GFP_HIGHMEM). Fixing up to gfp: 0x24000c0 (GFP_KERNEL). Fix your code! CPU: 0 PID: 2916 Comm: insmod Tainted: G O 4.6.0-slabgfp2-00002-g4cdfc2ef4892-dirty #936 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014 Call Trace: dump_stack+0x67/0x90 cache_alloc_refill+0x201/0x617 kmem_cache_alloc_trace+0xa7/0x24a ? 0xffffffffa0005000 mymodule_init+0x20/0x1000 [test_slab] do_one_initcall+0xe7/0x16c ? rcu_read_lock_sched_held+0x61/0x69 ? kmem_cache_alloc_trace+0x197/0x24a do_init_module+0x5f/0x1d9 load_module+0x1a3d/0x1f21 ? retint_kernel+0x2d/0x2d SyS_init_module+0xe8/0x10e ? SyS_init_module+0xe8/0x10e do_syscall_64+0x68/0x13f entry_SYSCALL64_slow_path+0x25/0x25 Link: http://lkml.kernel.org/r/1465548200-11384-2-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Sergey Senozhatsky Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 6 ++++-- mm/slub.c | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index 03fb724d6e48..fc9496bdd038 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2687,8 +2687,10 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, */ if (unlikely(flags & GFP_SLAB_BUG_MASK)) { gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - pr_emerg("Unexpected gfp: %#x (%pGg)\n", invalid_mask, &invalid_mask); - BUG(); + flags &= ~GFP_SLAB_BUG_MASK; + pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", + invalid_mask, &invalid_mask, flags, &flags); + dump_stack(); } local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); diff --git a/mm/slub.c b/mm/slub.c index a2fe4edc3599..c0cfa2722539 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1613,8 +1613,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { if (unlikely(flags & GFP_SLAB_BUG_MASK)) { gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; - pr_emerg("Unexpected gfp: %#x (%pGg)\n", invalid_mask, &invalid_mask); - BUG(); + flags &= ~GFP_SLAB_BUG_MASK; + pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n", + invalid_mask, &invalid_mask, flags, &flags); } return allocate_slab(s, -- cgit v1.2.3-70-g09d2 From 91c6a05f72a996bee5133e76374ab3ad7d3b9b72 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Tue, 26 Jul 2016 15:22:08 -0700 Subject: mm: faster kmalloc_array(), kcalloc() When both arguments to kmalloc_array() or kcalloc() are known at compile time then their product is known at compile time but search for kmalloc cache happens at runtime not at compile time. Link: http://lkml.kernel.org/r/20160627213454.GA2440@p183.telecom.by Signed-off-by: Alexey Dobriyan Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/slab.h b/include/linux/slab.h index aeb3e6d00a66..1a4ea551aae5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -565,6 +565,8 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) { if (size != 0 && n > SIZE_MAX / size) return NULL; + if (__builtin_constant_p(n) && __builtin_constant_p(size)) + return kmalloc(n * size, flags); return __kmalloc(n * size, flags); } -- cgit v1.2.3-70-g09d2 From de24baecd7628aa19e8b53530bb33f8ffbaf5220 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 26 Jul 2016 15:22:11 -0700 Subject: mm/slab: use list_move instead of list_del/list_add Using list_move() instead of list_del() + list_add() to avoid needlessly poisoning the next and prev values. Link: http://lkml.kernel.org/r/1468929772-9174-1-git-send-email-weiyj_lk@163.com Signed-off-by: Wei Yongjun Acked-by: David Rientjes Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index fc9496bdd038..09771ed3e693 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3452,8 +3452,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, n->free_objects -= cachep->num; page = list_last_entry(&n->slabs_free, struct page, lru); - list_del(&page->lru); - list_add(&page->lru, list); + list_move(&page->lru, list); } } -- cgit v1.2.3-70-g09d2 From 48406ef897f48d1a189d73f8c8cd2ece0dc19e5e Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Tue, 26 Jul 2016 15:22:14 -0700 Subject: mm/memcontrol.c: remove the useless parameter for mc_handle_swap_pte It seems like this parameter has never been used since being introduced by 90254a65833b ("memcg: clean up move charge"). Not a big deal because I assume the function would get inlined into the caller anyway but why not get rid of it. [mhocko@suse.com: wrote changelog] Link: http://lkml.kernel.org/r/20160525151831.GJ20132@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1464145026-26693-1-git-send-email-roy.qing.li@gmail.com Signed-off-by: Li RongQing Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5339c89dff63..3a755212448e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4409,7 +4409,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma, #ifdef CONFIG_SWAP static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, - unsigned long addr, pte_t ptent, swp_entry_t *entry) + pte_t ptent, swp_entry_t *entry) { struct page *page = NULL; swp_entry_t ent = pte_to_swp_entry(ptent); @@ -4428,7 +4428,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, } #else static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, - unsigned long addr, pte_t ptent, swp_entry_t *entry) + pte_t ptent, swp_entry_t *entry) { return NULL; } @@ -4593,7 +4593,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, if (pte_present(ptent)) page = mc_handle_present_pte(vma, addr, ptent); else if (is_swap_pte(ptent)) - page = mc_handle_swap_pte(vma, addr, ptent, &ent); + page = mc_handle_swap_pte(vma, ptent, &ent); else if (pte_none(ptent)) page = mc_handle_file_pte(vma, addr, ptent, &ent); -- cgit v1.2.3-70-g09d2 From 90cae1fe1c3540f791d5b8e025985fa5e699b2bb Mon Sep 17 00:00:00 2001 From: Oliver O'Halloran Date: Tue, 26 Jul 2016 15:22:17 -0700 Subject: mm/init: fix zone boundary creation As a part of memory initialisation the architecture passes an array to free_area_init_nodes() which specifies the max PFN of each memory zone. This array is not necessarily monotonic (due to unused zones) so this array is parsed to build monotonic lists of the min and max PFN for each zone. ZONE_MOVABLE is special cased here as its limits are managed by the mm subsystem rather than the architecture. Unfortunately, this special casing is broken when ZONE_MOVABLE is the not the last zone in the zone list. The core of the issue is: if (i == ZONE_MOVABLE) continue; arch_zone_lowest_possible_pfn[i] = arch_zone_highest_possible_pfn[i-1]; As ZONE_MOVABLE is skipped the lowest_possible_pfn of the next zone will be set to zero. This patch fixes this bug by adding explicitly tracking where the next zone should start rather than relying on the contents arch_zone_highest_possible_pfn[]. Thie is low priority. To get bitten by this you need to enable a zone that appears after ZONE_MOVABLE in the zone_type enum. As far as I can tell this means running a kernel with ZONE_DEVICE or ZONE_CMA enabled, so I can't see this affecting too many people. I only noticed this because I've been fiddling with ZONE_DEVICE on powerpc and 4.6 broke my test kernel. This bug, in conjunction with the changes in Taku Izumi's kernelcore=mirror patch (d91749c1dda71) and powerpc being the odd architecture which initialises max_zone_pfn[] to ~0ul instead of 0 caused all of system memory to be placed into ZONE_DEVICE at boot, followed a panic since device memory cannot be used for kernel allocations. I've already submitted a patch to fix the powerpc specific bits, but I figured this should be fixed too. Link: http://lkml.kernel.org/r/1462435033-15601-1-git-send-email-oohall@gmail.com Signed-off-by: Oliver O'Halloran Cc: Anton Blanchard Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b3e1341b754..8129922a1504 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6467,15 +6467,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) sizeof(arch_zone_lowest_possible_pfn)); memset(arch_zone_highest_possible_pfn, 0, sizeof(arch_zone_highest_possible_pfn)); - arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); - arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; - for (i = 1; i < MAX_NR_ZONES; i++) { + + start_pfn = find_min_pfn_with_active_regions(); + + for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; - arch_zone_lowest_possible_pfn[i] = - arch_zone_highest_possible_pfn[i-1]; - arch_zone_highest_possible_pfn[i] = - max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); + + end_pfn = max(max_zone_pfn[i], start_pfn); + arch_zone_lowest_possible_pfn[i] = start_pfn; + arch_zone_highest_possible_pfn[i] = end_pfn; + + start_pfn = end_pfn; } arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; -- cgit v1.2.3-70-g09d2 From e51e6c8f80731d723ada126c029301cee2827fac Mon Sep 17 00:00:00 2001 From: Reza Arbab Date: Tue, 26 Jul 2016 15:22:20 -0700 Subject: memory-hotplug: add move_pfn_range() Add move_pfn_range(), a wrapper to call move_pfn_range_left() or move_pfn_range_right(). No functional change. This will be utilized by a later patch. Link: http://lkml.kernel.org/r/1462816419-4479-2-git-send-email-arbab@linux.vnet.ibm.com Signed-off-by: Reza Arbab Reviewed-by: Yasuaki Ishimatsu Cc: Greg Kroah-Hartman Cc: Daniel Kiper Cc: Dan Williams Cc: Vlastimil Babka Cc: Tang Chen Cc: Joonsoo Kim Cc: David Vrabel Cc: Vitaly Kuznetsov Cc: David Rientjes Cc: Andrew Banman Cc: Chen Yucong Cc: Yasunori Goto Cc: Zhang Zhen Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory_hotplug.c | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e3cbdcaff2a5..a86a66cbef77 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -449,6 +449,25 @@ out_fail: return -1; } +static struct zone * __meminit move_pfn_range(int zone_shift, + unsigned long start_pfn, unsigned long end_pfn) +{ + struct zone *zone = page_zone(pfn_to_page(start_pfn)); + int ret = 0; + + if (zone_shift < 0) + ret = move_pfn_range_left(zone + zone_shift, zone, + start_pfn, end_pfn); + else if (zone_shift) + ret = move_pfn_range_right(zone, zone + zone_shift, + start_pfn, end_pfn); + + if (ret) + return NULL; + + return zone + zone_shift; +} + static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { @@ -1039,6 +1058,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ int nid; int ret; struct memory_notify arg; + int zone_shift = 0; /* * This doesn't need a lock to do pfn_to_page(). @@ -1053,18 +1073,16 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ return -EINVAL; if (online_type == MMOP_ONLINE_KERNEL && - zone_idx(zone) == ZONE_MOVABLE) { - if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) - return -EINVAL; - } + zone_idx(zone) == ZONE_MOVABLE) + zone_shift = -1; + if (online_type == MMOP_ONLINE_MOVABLE && - zone_idx(zone) == ZONE_MOVABLE - 1) { - if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) - return -EINVAL; - } + zone_idx(zone) == ZONE_MOVABLE - 1) + zone_shift = 1; - /* Previous code may changed the zone of the pfn range */ - zone = page_zone(pfn_to_page(pfn)); + zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); + if (!zone) + return -EINVAL; arg.start_pfn = pfn; arg.nr_pages = nr_pages; -- cgit v1.2.3-70-g09d2 From df429ac039360005299d56247647ca77098d660e Mon Sep 17 00:00:00 2001 From: Reza Arbab Date: Tue, 26 Jul 2016 15:22:23 -0700 Subject: memory-hotplug: more general validation of zone during online When memory is onlined, we are only able to rezone from ZONE_MOVABLE to ZONE_KERNEL, or from (ZONE_MOVABLE - 1) to ZONE_MOVABLE. To be more flexible, use the following criteria instead; to online memory from zone X into zone Y, * Any zones between X and Y must be unused. * If X is lower than Y, the onlined memory must lie at the end of X. * If X is higher than Y, the onlined memory must lie at the start of X. Add zone_can_shift() to make this determination. Link: http://lkml.kernel.org/r/1462816419-4479-3-git-send-email-arbab@linux.vnet.ibm.com Signed-off-by: Reza Arbab Reviewd-by: Yasuaki Ishimatsu Cc: Greg Kroah-Hartman Cc: Daniel Kiper Cc: Dan Williams Cc: Vlastimil Babka Cc: Tang Chen Cc: Joonsoo Kim Cc: David Vrabel Cc: Vitaly Kuznetsov Cc: David Rientjes Cc: Andrew Banman Cc: Chen Yucong Cc: Yasunori Goto Cc: Zhang Zhen Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memory_hotplug.h | 2 ++ mm/memory_hotplug.c | 42 +++++++++++++++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 7 deletions(-) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 5145620ba48a..01033fadea47 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -284,5 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, unsigned long map_offset); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); +extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target); #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a86a66cbef77..82d0b98d27f8 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1047,6 +1047,37 @@ static void node_states_set_node(int node, struct memory_notify *arg) node_set_state(node, N_MEMORY); } +int zone_can_shift(unsigned long pfn, unsigned long nr_pages, + enum zone_type target) +{ + struct zone *zone = page_zone(pfn_to_page(pfn)); + enum zone_type idx = zone_idx(zone); + int i; + + if (idx < target) { + /* pages must be at end of current zone */ + if (pfn + nr_pages != zone_end_pfn(zone)) + return 0; + + /* no zones in use between current zone and target */ + for (i = idx + 1; i < target; i++) + if (zone_is_initialized(zone - idx + i)) + return 0; + } + + if (target < idx) { + /* pages must be at beginning of current zone */ + if (pfn != zone->zone_start_pfn) + return 0; + + /* no zones in use between current zone and target */ + for (i = target + 1; i < idx; i++) + if (zone_is_initialized(zone - idx + i)) + return 0; + } + + return target - idx; +} /* Must be protected by mem_hotplug_begin() */ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) @@ -1072,13 +1103,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ !can_online_high_movable(zone)) return -EINVAL; - if (online_type == MMOP_ONLINE_KERNEL && - zone_idx(zone) == ZONE_MOVABLE) - zone_shift = -1; - - if (online_type == MMOP_ONLINE_MOVABLE && - zone_idx(zone) == ZONE_MOVABLE - 1) - zone_shift = 1; + if (online_type == MMOP_ONLINE_KERNEL) + zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL); + else if (online_type == MMOP_ONLINE_MOVABLE) + zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE); zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages); if (!zone) -- cgit v1.2.3-70-g09d2 From a371d9f1cc49f58c8be3d28c88aceaef86cb59d0 Mon Sep 17 00:00:00 2001 From: Reza Arbab Date: Tue, 26 Jul 2016 15:22:27 -0700 Subject: memory-hotplug: use zone_can_shift() for sysfs valid_zones attribute Since zone_can_shift() is being used to validate the target zone during onlining, it should also be used to determine the content of valid_zones. Link: http://lkml.kernel.org/r/1462816419-4479-4-git-send-email-arbab@linux.vnet.ibm.com Signed-off-by: Reza Arbab Reviewd-by: Yasuaki Ishimatsu Cc: Greg Kroah-Hartman Cc: Daniel Kiper Cc: Dan Williams Cc: Vlastimil Babka Cc: Tang Chen Cc: Joonsoo Kim Cc: David Vrabel Cc: Vitaly Kuznetsov Cc: David Rientjes Cc: Andrew Banman Cc: Chen Yucong Cc: Yasunori Goto Cc: Zhang Zhen Cc: Shaohua Li Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/memory.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/base/memory.c b/drivers/base/memory.c index f46dba8b7092..dc75de9059cd 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -391,6 +391,7 @@ static ssize_t show_valid_zones(struct device *dev, unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; struct page *first_page; struct zone *zone; + int zone_shift = 0; start_pfn = section_nr_to_pfn(mem->start_section_nr); end_pfn = start_pfn + nr_pages; @@ -402,21 +403,26 @@ static ssize_t show_valid_zones(struct device *dev, zone = page_zone(first_page); - if (zone_idx(zone) == ZONE_MOVABLE - 1) { - /*The mem block is the last memoryblock of this zone.*/ - if (end_pfn == zone_end_pfn(zone)) - return sprintf(buf, "%s %s\n", - zone->name, (zone + 1)->name); + /* MMOP_ONLINE_KEEP */ + sprintf(buf, "%s", zone->name); + + /* MMOP_ONLINE_KERNEL */ + zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL); + if (zone_shift) { + strcat(buf, " "); + strcat(buf, (zone + zone_shift)->name); } - if (zone_idx(zone) == ZONE_MOVABLE) { - /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ - if (start_pfn == zone->zone_start_pfn) - return sprintf(buf, "%s %s\n", - zone->name, (zone - 1)->name); + /* MMOP_ONLINE_MOVABLE */ + zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE); + if (zone_shift) { + strcat(buf, " "); + strcat(buf, (zone + zone_shift)->name); } - return sprintf(buf, "%s\n", zone->name); + strcat(buf, "\n"); + + return strlen(buf); } static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); #endif -- cgit v1.2.3-70-g09d2 From 798fd756952c4b6cb7dfe6f6437e9f02da79a5bc Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:22:30 -0700 Subject: mm: zap ZONE_OOM_LOCKED Not used since oom_lock was instroduced. Link: http://lkml.kernel.org/r/1464358093-22663-1-git-send-email-vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Tetsuo Handa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 1 - mm/oom_kill.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 02069c23486d..3388ccbab7d6 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -524,7 +524,6 @@ struct zone { enum zone_flags { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ - ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index ddf74487f848..398c245a484a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -988,8 +988,8 @@ bool out_of_memory(struct oom_control *oc) /* * The pagefault handler calls here because it is out of memory, so kill a - * memory-hogging task. If any populated zone has ZONE_OOM_LOCKED set, a - * parallel oom killing is already in progress so do nothing. + * memory-hogging task. If oom_lock is held by somebody else, a parallel oom + * killing is already in progress so do nothing. */ void pagefault_out_of_memory(void) { -- cgit v1.2.3-70-g09d2 From 2a966b77ae3ede207e787e7538b87d1011c4364e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:22:33 -0700 Subject: mm: oom: add memcg to oom_control It's a part of oom context just like allocation order and nodemask, so let's move it to oom_control instead of passing it in the argument list. Link: http://lkml.kernel.org/r/40e03fd7aaf1f55c75d787128d6d17c5a71226c2.1464358556.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Tetsuo Handa Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/tty/sysrq.c | 1 + include/linux/oom.h | 8 +++++--- mm/memcontrol.c | 5 +++-- mm/oom_kill.c | 32 +++++++++++++++----------------- mm/page_alloc.c | 1 + 5 files changed, 25 insertions(+), 22 deletions(-) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index e5139402e7f8..52bbd27e93ae 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -363,6 +363,7 @@ static void moom_callback(struct work_struct *ignored) struct oom_control oc = { .zonelist = node_zonelist(first_memory_node, gfp_mask), .nodemask = NULL, + .memcg = NULL, .gfp_mask = gfp_mask, .order = -1, }; diff --git a/include/linux/oom.h b/include/linux/oom.h index 83469522690a..cbc24a5fe28d 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -23,6 +23,9 @@ struct oom_control { /* Used to determine mempolicy */ nodemask_t *nodemask; + /* Memory cgroup in which oom is invoked, or NULL for global oom */ + struct mem_cgroup *memcg; + /* Used to determine cpuset and node locality requirement */ const gfp_t gfp_mask; @@ -83,11 +86,10 @@ extern unsigned long oom_badness(struct task_struct *p, extern void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, const char *message); + const char *message); extern void check_panic_on_oom(struct oom_control *oc, - enum oom_constraint constraint, - struct mem_cgroup *memcg); + enum oom_constraint constraint); extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, struct task_struct *task, unsigned long totalpages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a755212448e..caea25a21c70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1259,6 +1259,7 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, struct oom_control oc = { .zonelist = NULL, .nodemask = NULL, + .memcg = memcg, .gfp_mask = gfp_mask, .order = order, }; @@ -1281,7 +1282,7 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, goto unlock; } - check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg); + check_panic_on_oom(&oc, CONSTRAINT_MEMCG); totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; @@ -1329,7 +1330,7 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, if (chosen) { points = chosen_points * 1000 / totalpages; - oom_kill_process(&oc, chosen, points, totalpages, memcg, + oom_kill_process(&oc, chosen, points, totalpages, "Memory cgroup out of memory"); } unlock: diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 398c245a484a..a376f1ebdad5 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -383,8 +383,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) rcu_read_unlock(); } -static void dump_header(struct oom_control *oc, struct task_struct *p, - struct mem_cgroup *memcg) +static void dump_header(struct oom_control *oc, struct task_struct *p) { pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n", current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, @@ -392,12 +391,12 @@ static void dump_header(struct oom_control *oc, struct task_struct *p, cpuset_print_current_mems_allowed(); dump_stack(); - if (memcg) - mem_cgroup_print_oom_info(memcg, p); + if (oc->memcg) + mem_cgroup_print_oom_info(oc->memcg, p); else show_mem(SHOW_MEM_FILTER_NODES); if (sysctl_oom_dump_tasks) - dump_tasks(memcg, oc->nodemask); + dump_tasks(oc->memcg, oc->nodemask); } /* @@ -739,7 +738,7 @@ void oom_killer_enable(void) */ void oom_kill_process(struct oom_control *oc, struct task_struct *p, unsigned int points, unsigned long totalpages, - struct mem_cgroup *memcg, const char *message) + const char *message) { struct task_struct *victim = p; struct task_struct *child; @@ -765,7 +764,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, task_unlock(p); if (__ratelimit(&oom_rs)) - dump_header(oc, p, memcg); + dump_header(oc, p); pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n", message, task_pid_nr(p), p->comm, points); @@ -786,8 +785,8 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, /* * oom_badness() returns 0 if the thread is unkillable */ - child_points = oom_badness(child, memcg, oc->nodemask, - totalpages); + child_points = oom_badness(child, + oc->memcg, oc->nodemask, totalpages); if (child_points > victim_points) { put_task_struct(victim); victim = child; @@ -865,8 +864,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p, /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ -void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, - struct mem_cgroup *memcg) +void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint) { if (likely(!sysctl_panic_on_oom)) return; @@ -882,7 +880,7 @@ void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint, /* Do not panic for oom kills triggered by sysrq */ if (is_sysrq_oom(oc)) return; - dump_header(oc, NULL, memcg); + dump_header(oc, NULL); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); } @@ -957,13 +955,13 @@ bool out_of_memory(struct oom_control *oc) constraint = constrained_alloc(oc, &totalpages); if (constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; - check_panic_on_oom(oc, constraint, NULL); + check_panic_on_oom(oc, constraint); if (sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) { get_task_struct(current); - oom_kill_process(oc, current, 0, totalpages, NULL, + oom_kill_process(oc, current, 0, totalpages, "Out of memory (oom_kill_allocating_task)"); return true; } @@ -971,12 +969,11 @@ bool out_of_memory(struct oom_control *oc) p = select_bad_process(oc, &points, totalpages); /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p && !is_sysrq_oom(oc)) { - dump_header(oc, NULL, NULL); + dump_header(oc, NULL); panic("Out of memory and no killable processes...\n"); } if (p && p != (void *)-1UL) { - oom_kill_process(oc, p, points, totalpages, NULL, - "Out of memory"); + oom_kill_process(oc, p, points, totalpages, "Out of memory"); /* * Give the killed process a good chance to exit before trying * to allocate memory again. @@ -996,6 +993,7 @@ void pagefault_out_of_memory(void) struct oom_control oc = { .zonelist = NULL, .nodemask = NULL, + .memcg = NULL, .gfp_mask = 0, .order = 0, }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8129922a1504..f7bb1aef54f2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3105,6 +3105,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, struct oom_control oc = { .zonelist = ac->zonelist, .nodemask = ac->nodemask, + .memcg = NULL, .gfp_mask = gfp_mask, .order = order, }; -- cgit v1.2.3-70-g09d2 From a54f9aebaa9f0ea2ce6b01f12b65062fb2e74e6c Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:22:36 -0700 Subject: include/linux/mmdebug.h: add VM_WARN which maps to WARN() This enables us to do VM_WARN(condition, "warn message"); Link: http://lkml.kernel.org/r/1464692688-6612-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index de7be78c6f0e..451a811f48f2 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -39,6 +39,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) +#define VM_WARN(cond, format...) WARN(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) @@ -47,6 +48,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif #ifdef CONFIG_DEBUG_VIRTUAL -- cgit v1.2.3-70-g09d2 From 9af3f56ba19ef377170ab3614b9388cc7b7f3d74 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:22:39 -0700 Subject: powerpc/mm: check for irq disabled() only if DEBUG_VM is enabled We don't need to check this always. The idea here is to capture the wrong usage of find_linux_pte_or_hugepte and we can do that by occasionally running with DEBUG_VM enabled. Link: http://lkml.kernel.org/r/1464692688-6612-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Michael Ellerman Cc: Benjamin Herrenschmidt Reviewed-by: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgtable.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index ee09e99097f0..9bd87f269d6d 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -71,10 +71,8 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, bool *is_thp, unsigned *shift) { - if (!arch_irqs_disabled()) { - pr_info("%s called with irq enabled\n", __func__); - dump_stack(); - } + VM_WARN(!arch_irqs_disabled(), + "%s called with irq enabled\n", __func__); return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift); } -- cgit v1.2.3-70-g09d2 From 2aea8493d326bdf15446768333e1d2c91b040b5c Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:42 -0700 Subject: zram: rename zstrm find-release functions This has started as a 'add zlib support' work, but after some thinking I saw no blockers for a bigger change -- a switch to crypto API. We don't have an idle zstreams list anymore and our write path now works absolutely differently, preventing preemption during compression. This removes possibilities of read paths preempting writes at wrong places and opens the door for a move from custom LZO/LZ4 compression backends implementation to a more generic one, using crypto compress API. This patch set also eliminates the need of a new context-less crypto API interface, which was quite hard to sell, so we can move along faster. benchmarks: (x86_64, 4GB, zram-perf script) perf reported run-time fio (max jobs=3). I performed fio test with the increasing number of parallel jobs (max to 3) on a 3G zram device, using `static' data and the following crypto comp algorithms: 842, deflate, lz4, lz4hc, lzo the output was: - test running time (which can tell us what algorithms performs faster) and - zram mm_stat (which tells the compressed memory size, max used memory, etc). It's just for information. for example, LZ4HC has twice the running time of LZO, but the compressed memory size is: 23592960 vs 34603008 bytes. test-fio-zram-842 197.907655282 seconds time elapsed 201.623142884 seconds time elapsed 226.854291345 seconds time elapsed test-fio-zram-DEFLATE 253.259516155 seconds time elapsed 258.148563401 seconds time elapsed 290.251909365 seconds time elapsed test-fio-zram-LZ4 27.022598717 seconds time elapsed 29.580522717 seconds time elapsed 33.293463430 seconds time elapsed test-fio-zram-LZ4HC 56.393954615 seconds time elapsed 74.904659747 seconds time elapsed 101.940998564 seconds time elapsed test-fio-zram-LZO 28.155948075 seconds time elapsed 30.390036330 seconds time elapsed 34.455773159 seconds time elapsed zram mm_stat-s (max fio jobs=3) test-fio-zram-842 mm_stat (jobs1): 3221225472 673185792 690266112 0 690266112 0 0 mm_stat (jobs2): 3221225472 673185792 690266112 0 690266112 0 0 mm_stat (jobs3): 3221225472 673185792 690266112 0 690266112 0 0 test-fio-zram-DEFLATE mm_stat (jobs1): 3221225472 24379392 37761024 0 37761024 0 0 mm_stat (jobs2): 3221225472 24379392 37761024 0 37761024 0 0 mm_stat (jobs3): 3221225472 24379392 37761024 0 37761024 0 0 test-fio-zram-LZ4 mm_stat (jobs1): 3221225472 23592960 37761024 0 37761024 0 0 mm_stat (jobs2): 3221225472 23592960 37761024 0 37761024 0 0 mm_stat (jobs3): 3221225472 23592960 37761024 0 37761024 0 0 test-fio-zram-LZ4HC mm_stat (jobs1): 3221225472 23592960 37761024 0 37761024 0 0 mm_stat (jobs2): 3221225472 23592960 37761024 0 37761024 0 0 mm_stat (jobs3): 3221225472 23592960 37761024 0 37761024 0 0 test-fio-zram-LZO mm_stat (jobs1): 3221225472 34603008 50335744 0 50335744 0 0 mm_stat (jobs2): 3221225472 34603008 50335744 0 50335744 0 0 mm_stat (jobs3): 3221225472 34603008 50335744 0 50339840 0 0 This patch (of 8): We don't perform any zstream idle list lookup anymore, so zcomp_strm_find()/zcomp_strm_release() names are not representative. Rename to zcomp_stream_get()/zcomp_stream_put(). Link: http://lkml.kernel.org/r/20160531122017.2878-2-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zcomp.c | 4 ++-- drivers/block/zram/zcomp.h | 4 ++-- drivers/block/zram/zram_drv.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index b51a816d766b..400f8267337e 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -95,12 +95,12 @@ bool zcomp_available_algorithm(const char *comp) return find_backend(comp) != NULL; } -struct zcomp_strm *zcomp_strm_find(struct zcomp *comp) +struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { return *get_cpu_ptr(comp->stream); } -void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm) +void zcomp_stream_put(struct zcomp *comp) { put_cpu_ptr(comp->stream); } diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index ffd88cb747fe..944b8e60dd82 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -48,8 +48,8 @@ bool zcomp_available_algorithm(const char *comp); struct zcomp *zcomp_create(const char *comp); void zcomp_destroy(struct zcomp *comp); -struct zcomp_strm *zcomp_strm_find(struct zcomp *comp); -void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm); +struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); +void zcomp_stream_put(struct zcomp *comp); int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, const unsigned char *src, size_t *dst_len); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8fcad8b761f1..9361a5db7de8 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -695,7 +695,7 @@ compress_again: goto out; } - zstrm = zcomp_strm_find(zram->comp); + zstrm = zcomp_stream_get(zram->comp); ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); if (!is_partial_io(bvec)) { kunmap_atomic(user_mem); @@ -734,7 +734,7 @@ compress_again: __GFP_NOWARN | __GFP_HIGHMEM); if (!handle) { - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); zstrm = NULL; atomic64_inc(&zram->stats.writestall); @@ -769,7 +769,7 @@ compress_again: memcpy(cmem, src, clen); } - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); zstrm = NULL; zs_unmap_object(meta->mem_pool, handle); @@ -789,7 +789,7 @@ compress_again: atomic64_inc(&zram->stats.pages_stored); out: if (zstrm) - zcomp_strm_release(zram->comp, zstrm); + zcomp_stream_put(zram->comp); if (is_partial_io(bvec)) kfree(uncmem); return ret; -- cgit v1.2.3-70-g09d2 From ebaf9ab56d9d5f350969bd1ea8f47234623c9684 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:45 -0700 Subject: zram: switch to crypto compress API We don't have an idle zstreams list anymore and our write path now works absolutely differently, preventing preemption during compression. This removes possibilities of read paths preempting writes at wrong places (which could badly affect the performance of both paths) and at the same time opens the door for a move from custom LZO/LZ4 compression backends implementation to a more generic one, using crypto compress API. Joonsoo Kim [1] attempted to do this a while ago, but faced with the need of introducing a new crypto API interface. The root cause was the fact that crypto API compression algorithms require a compression stream structure (in zram terminology) for both compression and decompression ops, while in reality only several of compression algorithms really need it. This resulted in a concept of context-less crypto API compression backends [2]. Both write and read paths, though, would have been executed with the preemption enabled, which in the worst case could have resulted in a decreased worst-case performance, e.g. consider the following case: CPU0 zram_write() spin_lock() take the last idle stream spin_unlock() << preempted >> zram_read() spin_lock() no idle streams spin_unlock() schedule() resuming zram_write compression() but it took me some time to realize that, and it took even longer to evolve zram and to make it ready for crypto API. The key turned out to be -- drop the idle streams list entirely. Without the idle streams list we are free to use compression algorithms that require compression stream for decompression (read), because streams are now placed in per-cpu data and each write path has to disable preemption for compression op, almost completely eliminating the aforementioned case (technically, we still have a small chance, because write path has a fast and a slow paths and the slow path is executed with the preemption enabled; but the frequency of failed fast path is too low). TEST ==== - 4 CPUs, x86_64 system - 3G zram, lzo - fio tests: read, randread, write, randwrite, rw, randrw test script [3] command: ZRAM_SIZE=3G LOG_SUFFIX=XXXX FIO_LOOPS=5 ./zram-fio-test.sh BASE PATCHED jobs1 READ: 2527.2MB/s 2482.7MB/s READ: 2102.7MB/s 2045.0MB/s WRITE: 1284.3MB/s 1324.3MB/s WRITE: 1080.7MB/s 1101.9MB/s READ: 430125KB/s 437498KB/s WRITE: 430538KB/s 437919KB/s READ: 399593KB/s 403987KB/s WRITE: 399910KB/s 404308KB/s jobs2 READ: 8133.5MB/s 7854.8MB/s READ: 7086.6MB/s 6912.8MB/s WRITE: 3177.2MB/s 3298.3MB/s WRITE: 2810.2MB/s 2871.4MB/s READ: 1017.6MB/s 1023.4MB/s WRITE: 1018.2MB/s 1023.1MB/s READ: 977836KB/s 984205KB/s WRITE: 979435KB/s 985814KB/s jobs3 READ: 13557MB/s 13391MB/s READ: 11876MB/s 11752MB/s WRITE: 4641.5MB/s 4682.1MB/s WRITE: 4164.9MB/s 4179.3MB/s READ: 1453.8MB/s 1455.1MB/s WRITE: 1455.1MB/s 1458.2MB/s READ: 1387.7MB/s 1395.7MB/s WRITE: 1386.1MB/s 1394.9MB/s jobs4 READ: 20271MB/s 20078MB/s READ: 18033MB/s 17928MB/s WRITE: 6176.8MB/s 6180.5MB/s WRITE: 5686.3MB/s 5705.3MB/s READ: 2009.4MB/s 2006.7MB/s WRITE: 2007.5MB/s 2004.9MB/s READ: 1929.7MB/s 1935.6MB/s WRITE: 1926.8MB/s 1932.6MB/s jobs5 READ: 18823MB/s 19024MB/s READ: 18968MB/s 19071MB/s WRITE: 6191.6MB/s 6372.1MB/s WRITE: 5818.7MB/s 5787.1MB/s READ: 2011.7MB/s 1981.3MB/s WRITE: 2011.4MB/s 1980.1MB/s READ: 1949.3MB/s 1935.7MB/s WRITE: 1940.4MB/s 1926.1MB/s jobs6 READ: 21870MB/s 21715MB/s READ: 19957MB/s 19879MB/s WRITE: 6528.4MB/s 6537.6MB/s WRITE: 6098.9MB/s 6073.6MB/s READ: 2048.6MB/s 2049.9MB/s WRITE: 2041.7MB/s 2042.9MB/s READ: 2013.4MB/s 1990.4MB/s WRITE: 2009.4MB/s 1986.5MB/s jobs7 READ: 21359MB/s 21124MB/s READ: 19746MB/s 19293MB/s WRITE: 6660.4MB/s 6518.8MB/s WRITE: 6211.6MB/s 6193.1MB/s READ: 2089.7MB/s 2080.6MB/s WRITE: 2085.8MB/s 2076.5MB/s READ: 2041.2MB/s 2052.5MB/s WRITE: 2037.5MB/s 2048.8MB/s jobs8 READ: 20477MB/s 19974MB/s READ: 18922MB/s 18576MB/s WRITE: 6851.9MB/s 6788.3MB/s WRITE: 6407.7MB/s 6347.5MB/s READ: 2134.8MB/s 2136.1MB/s WRITE: 2132.8MB/s 2134.4MB/s READ: 2074.2MB/s 2069.6MB/s WRITE: 2087.3MB/s 2082.4MB/s jobs9 READ: 19797MB/s 19994MB/s READ: 18806MB/s 18581MB/s WRITE: 6878.7MB/s 6822.7MB/s WRITE: 6456.8MB/s 6447.2MB/s READ: 2141.1MB/s 2154.7MB/s WRITE: 2144.4MB/s 2157.3MB/s READ: 2084.1MB/s 2085.1MB/s WRITE: 2091.5MB/s 2092.5MB/s jobs10 READ: 19794MB/s 19784MB/s READ: 18794MB/s 18745MB/s WRITE: 6984.4MB/s 6676.3MB/s WRITE: 6532.3MB/s 6342.7MB/s READ: 2150.6MB/s 2155.4MB/s WRITE: 2156.8MB/s 2161.5MB/s READ: 2106.4MB/s 2095.6MB/s WRITE: 2109.7MB/s 2098.4MB/s BASE PATCHED jobs1 perfstat stalled-cycles-frontend 102,480,595,419 ( 41.53%) 114,508,864,804 ( 46.92%) stalled-cycles-backend 51,941,417,832 ( 21.05%) 46,836,112,388 ( 19.19%) instructions 283,612,054,215 ( 1.15) 283,918,134,959 ( 1.16) branches 56,372,560,385 ( 724.923) 56,449,814,753 ( 733.766) branch-misses 374,826,000 ( 0.66%) 326,935,859 ( 0.58%) jobs2 perfstat stalled-cycles-frontend 155,142,745,777 ( 40.99%) 164,170,979,198 ( 43.82%) stalled-cycles-backend 70,813,866,387 ( 18.71%) 66,456,858,165 ( 17.74%) instructions 463,436,648,173 ( 1.22) 464,221,890,191 ( 1.24) branches 91,088,733,902 ( 760.088) 91,278,144,546 ( 769.133) branch-misses 504,460,363 ( 0.55%) 394,033,842 ( 0.43%) jobs3 perfstat stalled-cycles-frontend 201,300,397,212 ( 39.84%) 223,969,902,257 ( 44.44%) stalled-cycles-backend 87,712,593,974 ( 17.36%) 81,618,888,712 ( 16.19%) instructions 642,869,545,023 ( 1.27) 644,677,354,132 ( 1.28) branches 125,724,560,594 ( 690.682) 126,133,159,521 ( 694.542) branch-misses 527,941,798 ( 0.42%) 444,782,220 ( 0.35%) jobs4 perfstat stalled-cycles-frontend 246,701,197,429 ( 38.12%) 280,076,030,886 ( 43.29%) stalled-cycles-backend 119,050,341,112 ( 18.40%) 110,955,641,671 ( 17.15%) instructions 822,716,962,127 ( 1.27) 825,536,969,320 ( 1.28) branches 160,590,028,545 ( 688.614) 161,152,996,915 ( 691.068) branch-misses 650,295,287 ( 0.40%) 550,229,113 ( 0.34%) jobs5 perfstat stalled-cycles-frontend 298,958,462,516 ( 38.30%) 344,852,200,358 ( 44.16%) stalled-cycles-backend 137,558,742,122 ( 17.62%) 129,465,067,102 ( 16.58%) instructions 1,005,714,688,752 ( 1.29) 1,007,657,999,432 ( 1.29) branches 195,988,773,962 ( 697.730) 196,446,873,984 ( 700.319) branch-misses 695,818,940 ( 0.36%) 624,823,263 ( 0.32%) jobs6 perfstat stalled-cycles-frontend 334,497,602,856 ( 36.71%) 387,590,419,779 ( 42.38%) stalled-cycles-backend 163,539,365,335 ( 17.95%) 152,640,193,639 ( 16.69%) instructions 1,184,738,177,851 ( 1.30) 1,187,396,281,677 ( 1.30) branches 230,592,915,640 ( 702.902) 231,253,802,882 ( 702.356) branch-misses 747,934,786 ( 0.32%) 643,902,424 ( 0.28%) jobs7 perfstat stalled-cycles-frontend 396,724,684,187 ( 37.71%) 460,705,858,952 ( 43.84%) stalled-cycles-backend 188,096,616,496 ( 17.88%) 175,785,787,036 ( 16.73%) instructions 1,364,041,136,608 ( 1.30) 1,366,689,075,112 ( 1.30) branches 265,253,096,936 ( 700.078) 265,890,524,883 ( 702.839) branch-misses 784,991,589 ( 0.30%) 729,196,689 ( 0.27%) jobs8 perfstat stalled-cycles-frontend 440,248,299,870 ( 36.92%) 509,554,793,816 ( 42.46%) stalled-cycles-backend 222,575,930,616 ( 18.67%) 213,401,248,432 ( 17.78%) instructions 1,542,262,045,114 ( 1.29) 1,545,233,932,257 ( 1.29) branches 299,775,178,439 ( 697.666) 300,528,458,505 ( 694.769) branch-misses 847,496,084 ( 0.28%) 748,794,308 ( 0.25%) jobs9 perfstat stalled-cycles-frontend 506,269,882,480 ( 37.86%) 592,798,032,820 ( 44.43%) stalled-cycles-backend 253,192,498,861 ( 18.93%) 233,727,666,185 ( 17.52%) instructions 1,721,985,080,913 ( 1.29) 1,724,666,236,005 ( 1.29) branches 334,517,360,255 ( 694.134) 335,199,758,164 ( 697.131) branch-misses 873,496,730 ( 0.26%) 815,379,236 ( 0.24%) jobs10 perfstat stalled-cycles-frontend 549,063,363,749 ( 37.18%) 651,302,376,662 ( 43.61%) stalled-cycles-backend 281,680,986,810 ( 19.07%) 277,005,235,582 ( 18.55%) instructions 1,901,859,271,180 ( 1.29) 1,906,311,064,230 ( 1.28) branches 369,398,536,153 ( 694.004) 370,527,696,358 ( 688.409) branch-misses 967,929,335 ( 0.26%) 890,125,056 ( 0.24%) BASE PATCHED seconds elapsed 79.421641008 78.735285546 seconds elapsed 61.471246133 60.869085949 seconds elapsed 62.317058173 62.224188495 seconds elapsed 60.030739363 60.081102518 seconds elapsed 74.070398362 74.317582865 seconds elapsed 84.985953007 85.414364176 seconds elapsed 97.724553255 98.173311344 seconds elapsed 109.488066758 110.268399318 seconds elapsed 122.768189405 122.967164498 seconds elapsed 135.130035105 136.934770801 On my other system (8 x86_64 CPUs, short version of test results): BASE PATCHED seconds elapsed 19.518065994 19.806320662 seconds elapsed 15.172772749 15.594718291 seconds elapsed 13.820925970 13.821708564 seconds elapsed 13.293097816 14.585206405 seconds elapsed 16.207284118 16.064431606 seconds elapsed 17.958376158 17.771825767 seconds elapsed 19.478009164 19.602961508 seconds elapsed 21.347152811 21.352318709 seconds elapsed 24.478121126 24.171088735 seconds elapsed 26.865057442 26.767327618 So performance-wise the numbers are quite similar. Also update zcomp interface to be more aligned with the crypto API. [1] http://marc.info/?l=linux-kernel&m=144480832108927&w=2 [2] http://marc.info/?l=linux-kernel&m=145379613507518&w=2 [3] https://github.com/sergey-senozhatsky/zram-perf-test Link: http://lkml.kernel.org/r/20160531122017.2878-3-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Suggested-by: Minchan Kim Suggested-by: Joonsoo Kim Acked-by: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/Kconfig | 10 +++--- drivers/block/zram/zcomp.c | 76 ++++++++++++++++++++++++++----------------- drivers/block/zram/zcomp.h | 17 ++++------ drivers/block/zram/zram_drv.c | 18 ++++++---- 4 files changed, 69 insertions(+), 52 deletions(-) diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index 386ba3d1a6ee..2252cd7d0e89 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -1,8 +1,7 @@ config ZRAM tristate "Compressed RAM block device support" - depends on BLOCK && SYSFS && ZSMALLOC - select LZO_COMPRESS - select LZO_DECOMPRESS + depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO + select CRYPTO_LZO default n help Creates virtual block devices called /dev/zramX (X = 0, 1, ...). @@ -18,9 +17,8 @@ config ZRAM config ZRAM_LZ4_COMPRESS bool "Enable LZ4 algorithm support" depends on ZRAM - select LZ4_COMPRESS - select LZ4_DECOMPRESS + select CRYPTO_LZ4 default n help This option enables LZ4 compression algorithm support. Compression - algorithm can be changed using `comp_algorithm' device attribute. \ No newline at end of file + algorithm can be changed using `comp_algorithm' device attribute. diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 400f8267337e..f35726860a1b 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -14,42 +14,39 @@ #include #include #include +#include #include "zcomp.h" -#include "zcomp_lzo.h" -#ifdef CONFIG_ZRAM_LZ4_COMPRESS -#include "zcomp_lz4.h" -#endif -static struct zcomp_backend *backends[] = { - &zcomp_lzo, +static const char * const backends[] = { + "lzo", #ifdef CONFIG_ZRAM_LZ4_COMPRESS - &zcomp_lz4, + "lz4", #endif NULL }; -static struct zcomp_backend *find_backend(const char *compress) +static const char *find_backend(const char *compress) { int i = 0; while (backends[i]) { - if (sysfs_streq(compress, backends[i]->name)) + if (sysfs_streq(compress, backends[i])) break; i++; } return backends[i]; } -static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm) +static void zcomp_strm_free(struct zcomp_strm *zstrm) { - if (zstrm->private) - comp->backend->destroy(zstrm->private); + if (!IS_ERR_OR_NULL(zstrm->tfm)) + crypto_free_comp(zstrm->tfm); free_pages((unsigned long)zstrm->buffer, 1); kfree(zstrm); } /* - * allocate new zcomp_strm structure with ->private initialized by + * allocate new zcomp_strm structure with ->tfm initialized by * backend, return NULL on error */ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) @@ -58,14 +55,14 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) if (!zstrm) return NULL; - zstrm->private = comp->backend->create(flags); + zstrm->tfm = crypto_alloc_comp(comp->name, 0, 0); /* * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1); - if (!zstrm->private || !zstrm->buffer) { - zcomp_strm_free(comp, zstrm); + if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { + zcomp_strm_free(zstrm); zstrm = NULL; } return zstrm; @@ -78,12 +75,12 @@ ssize_t zcomp_available_show(const char *comp, char *buf) int i = 0; while (backends[i]) { - if (!strcmp(comp, backends[i]->name)) + if (!strcmp(comp, backends[i])) sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "[%s] ", backends[i]->name); + "[%s] ", backends[i]); else sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, - "%s ", backends[i]->name); + "%s ", backends[i]); i++; } sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); @@ -105,17 +102,38 @@ void zcomp_stream_put(struct zcomp *comp) put_cpu_ptr(comp->stream); } -int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, - const unsigned char *src, size_t *dst_len) +int zcomp_compress(struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len) { - return comp->backend->compress(src, zstrm->buffer, dst_len, - zstrm->private); + /* + * Our dst memory (zstrm->buffer) is always `2 * PAGE_SIZE' sized + * because sometimes we can endup having a bigger compressed data + * due to various reasons: for example compression algorithms tend + * to add some padding to the compressed buffer. Speaking of padding, + * comp algorithm `842' pads the compressed length to multiple of 8 + * and returns -ENOSP when the dst memory is not big enough, which + * is not something that ZRAM wants to see. We can handle the + * `compressed_size > PAGE_SIZE' case easily in ZRAM, but when we + * receive -ERRNO from the compressing backend we can't help it + * anymore. To make `842' happy we need to tell the exact size of + * the dst buffer, zram_drv will take care of the fact that + * compressed buffer is too big. + */ + *dst_len = PAGE_SIZE * 2; + + return crypto_comp_compress(zstrm->tfm, + src, PAGE_SIZE, + zstrm->buffer, dst_len); } -int zcomp_decompress(struct zcomp *comp, const unsigned char *src, - size_t src_len, unsigned char *dst) +int zcomp_decompress(struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst) { - return comp->backend->decompress(src, src_len, dst); + unsigned int dst_len = PAGE_SIZE; + + return crypto_comp_decompress(zstrm->tfm, + src, src_len, + dst, &dst_len); } static int __zcomp_cpu_notifier(struct zcomp *comp, @@ -138,7 +156,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_CANCELED: zstrm = *per_cpu_ptr(comp->stream, cpu); if (!IS_ERR_OR_NULL(zstrm)) - zcomp_strm_free(comp, zstrm); + zcomp_strm_free(zstrm); *per_cpu_ptr(comp->stream, cpu) = NULL; break; default: @@ -209,7 +227,7 @@ void zcomp_destroy(struct zcomp *comp) struct zcomp *zcomp_create(const char *compress) { struct zcomp *comp; - struct zcomp_backend *backend; + const char *backend; int error; backend = find_backend(compress); @@ -220,7 +238,7 @@ struct zcomp *zcomp_create(const char *compress) if (!comp) return ERR_PTR(-ENOMEM); - comp->backend = backend; + comp->name = backend; error = zcomp_init(comp); if (error) { kfree(comp); diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 944b8e60dd82..c914ab7972ef 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -13,12 +13,7 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; - /* - * The private data of the compression stream, only compression - * stream backend can touch this (e.g. compression algorithm - * working memory) - */ - void *private; + struct crypto_comp *tfm; }; /* static compression backend */ @@ -40,6 +35,8 @@ struct zcomp { struct zcomp_strm * __percpu *stream; struct zcomp_backend *backend; struct notifier_block notifier; + + const char *name; }; ssize_t zcomp_available_show(const char *comp, char *buf); @@ -51,11 +48,11 @@ void zcomp_destroy(struct zcomp *comp); struct zcomp_strm *zcomp_stream_get(struct zcomp *comp); void zcomp_stream_put(struct zcomp *comp); -int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm, - const unsigned char *src, size_t *dst_len); +int zcomp_compress(struct zcomp_strm *zstrm, + const void *src, unsigned int *dst_len); -int zcomp_decompress(struct zcomp *comp, const unsigned char *src, - size_t src_len, unsigned char *dst); +int zcomp_decompress(struct zcomp_strm *zstrm, + const void *src, unsigned int src_len, void *dst); bool zcomp_set_max_streams(struct zcomp *comp, int num_strm); #endif /* _ZCOMP_H_ */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9361a5db7de8..65d140336289 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -563,7 +563,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) unsigned char *cmem; struct zram_meta *meta = zram->meta; unsigned long handle; - size_t size; + unsigned int size; bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); handle = meta->table[index].handle; @@ -576,10 +576,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) } cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); - if (size == PAGE_SIZE) + if (size == PAGE_SIZE) { copy_page(mem, cmem); - else - ret = zcomp_decompress(zram->comp, cmem, size, mem); + } else { + struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); + + ret = zcomp_decompress(zstrm, cmem, size, mem); + zcomp_stream_put(zram->comp); + } zs_unmap_object(meta->mem_pool, handle); bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); @@ -646,7 +650,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, int offset) { int ret = 0; - size_t clen; + unsigned int clen; unsigned long handle = 0; struct page *page; unsigned char *user_mem, *cmem, *src, *uncmem = NULL; @@ -696,7 +700,7 @@ compress_again: } zstrm = zcomp_stream_get(zram->comp); - ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); + ret = zcomp_compress(zstrm, uncmem, &clen); if (!is_partial_io(bvec)) { kunmap_atomic(user_mem); user_mem = NULL; @@ -744,7 +748,7 @@ compress_again: if (handle) goto compress_again; - pr_err("Error allocating memory for compressed page: %u, size=%zu\n", + pr_err("Error allocating memory for compressed page: %u, size=%u\n", index, clen); ret = -ENOMEM; goto out; -- cgit v1.2.3-70-g09d2 From 415403be37e204632b17bdb6857890fe5a220cea Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:48 -0700 Subject: zram: use crypto api to check alg availability There is no way to get a string with all the crypto comp algorithms supported by the crypto comp engine, so we need to maintain our own backends list. At the same time we additionally need to use crypto_has_comp() to make sure that the user has requested a compression algorithm that is recognized by the crypto comp engine. Relying on /proc/crypto is not an options here, because it does not show not-yet-inserted compression modules. Example: modprobe zram cat /proc/crypto | grep -i lz4 modprobe lz4 cat /proc/crypto | grep -i lz4 name : lz4 driver : lz4-generic module : lz4 So the user can't tell exactly if the lz4 is really supported from /proc/crypto output, unless someone or something has loaded it. This patch also adds crypto_has_comp() to zcomp_available_show(). We store all the compression algorithms names in zcomp's `backends' array, regardless the CONFIG_CRYPTO_FOO configuration, but show only those that are also supported by crypto engine. This helps user to know the exact list of compression algorithms that can be used. Example: module lz4 is not loaded yet, but is supported by the crypto engine. /proc/crypto has no information on this module, while zram's `comp_algorithm' lists it: cat /proc/crypto | grep -i lz4 cat /sys/block/zram0/comp_algorithm [lzo] lz4 deflate lz4hc 842 We still use the `backends' array to determine if the requested compression backend is known to crypto api. This array, however, may not contain some entries, therefore as the last step we call crypto_has_comp() function which attempts to insmod the requested compression algorithm to determine if crypto api supports it. The advantage of this method is that now we permit the usage of out-of-tree crypto compression modules (implementing S/W or H/W compression). [sergey.senozhatsky@gmail.com: zram-use-crypto-api-to-check-alg-availability-v3] Link: http://lkml.kernel.org/r/20160604024902.11778-4-sergey.senozhatsky@gmail.com Link: http://lkml.kernel.org/r/20160531122017.2878-5-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/blockdev/zram.txt | 11 ++++++++ drivers/block/zram/zcomp.c | 61 +++++++++++++++++++++++++---------------- drivers/block/zram/zram_drv.c | 16 ++++++----- drivers/block/zram/zram_drv.h | 5 ++-- 4 files changed, 60 insertions(+), 33 deletions(-) diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 13100fb3c26d..7c05357360a7 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -83,6 +83,17 @@ pre-created. Default: 1. #select lzo compression algorithm echo lzo > /sys/block/zram0/comp_algorithm + For the time being, the `comp_algorithm' content does not necessarily + show every compression algorithm supported by the kernel. We keep this + list primarily to simplify device configuration and one can configure + a new device with a compression algorithm that is not listed in + `comp_algorithm'. The thing is that, internally, ZRAM uses Crypto API + and, if some of the algorithms were built as modules, it's impossible + to list all of them using, for instance, /proc/crypto or any other + method. This, however, has an advantage of permitting the usage of + custom crypto compression modules (implementing S/W or H/W + compression). + 4) Set Disksize Set disk size by writing the value to sysfs node 'disksize'. The value can be either in bytes or you can use mem suffixes. diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index f35726860a1b..a2b4eb85b41d 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -26,17 +26,6 @@ static const char * const backends[] = { NULL }; -static const char *find_backend(const char *compress) -{ - int i = 0; - while (backends[i]) { - if (sysfs_streq(compress, backends[i])) - break; - i++; - } - return backends[i]; -} - static void zcomp_strm_free(struct zcomp_strm *zstrm) { if (!IS_ERR_OR_NULL(zstrm->tfm)) @@ -68,30 +57,56 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) return zstrm; } +bool zcomp_available_algorithm(const char *comp) +{ + int i = 0; + + while (backends[i]) { + if (sysfs_streq(comp, backends[i])) + return true; + i++; + } + + /* + * Crypto does not ignore a trailing new line symbol, + * so make sure you don't supply a string containing + * one. + * This also means that we permit zcomp initialisation + * with any compressing algorithm known to crypto api. + */ + return crypto_has_comp(comp, 0, 0) == 1; +} + /* show available compressors */ ssize_t zcomp_available_show(const char *comp, char *buf) { + bool known_algorithm = false; ssize_t sz = 0; int i = 0; - while (backends[i]) { - if (!strcmp(comp, backends[i])) + for (; backends[i]; i++) { + if (!strcmp(comp, backends[i])) { + known_algorithm = true; sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "[%s] ", backends[i]); - else + } else { sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "%s ", backends[i]); - i++; + } } + + /* + * Out-of-tree module known to crypto api or a missing + * entry in `backends'. + */ + if (!known_algorithm && crypto_has_comp(comp, 0, 0) == 1) + sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, + "[%s] ", comp); + sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n"); return sz; } -bool zcomp_available_algorithm(const char *comp) -{ - return find_backend(comp) != NULL; -} - struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { return *get_cpu_ptr(comp->stream); @@ -227,18 +242,16 @@ void zcomp_destroy(struct zcomp *comp) struct zcomp *zcomp_create(const char *compress) { struct zcomp *comp; - const char *backend; int error; - backend = find_backend(compress); - if (!backend) + if (!zcomp_available_algorithm(compress)) return ERR_PTR(-EINVAL); comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL); if (!comp) return ERR_PTR(-ENOMEM); - comp->name = backend; + comp->name = compress; error = zcomp_init(comp); if (error) { kfree(comp); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 65d140336289..c2a1d7dbaec9 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -342,9 +342,16 @@ static ssize_t comp_algorithm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct zram *zram = dev_to_zram(dev); + char compressor[CRYPTO_MAX_ALG_NAME]; size_t sz; - if (!zcomp_available_algorithm(buf)) + strlcpy(compressor, buf, sizeof(compressor)); + /* ignore trailing newline */ + sz = strlen(compressor); + if (sz > 0 && compressor[sz - 1] == '\n') + compressor[sz - 1] = 0x00; + + if (!zcomp_available_algorithm(compressor)) return -EINVAL; down_write(&zram->init_lock); @@ -353,13 +360,8 @@ static ssize_t comp_algorithm_store(struct device *dev, pr_info("Can't change algorithm for initialized device\n"); return -EBUSY; } - strlcpy(zram->compressor, buf, sizeof(zram->compressor)); - - /* ignore trailing newline */ - sz = strlen(zram->compressor); - if (sz > 0 && zram->compressor[sz - 1] == '\n') - zram->compressor[sz - 1] = 0x00; + strlcpy(zram->compressor, compressor, sizeof(compressor)); up_write(&zram->init_lock); return len; } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 3f5bf66a27e4..74fcf10da374 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -15,8 +15,9 @@ #ifndef _ZRAM_DRV_H_ #define _ZRAM_DRV_H_ -#include +#include #include +#include #include "zcomp.h" @@ -113,7 +114,7 @@ struct zram { * we can store in a disk. */ u64 disksize; /* bytes */ - char compressor[10]; + char compressor[CRYPTO_MAX_ALG_NAME]; /* * zram is claimed so open request will be failed */ -- cgit v1.2.3-70-g09d2 From 69a30a8d2ac17c8080cf6ebfc91149fd6c2648b3 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:51 -0700 Subject: zram: cosmetic: cleanup documentation zram documentation is a mix of different styles: spaces, tabs, tabs + spaces, etc. Clean it up. Link: http://lkml.kernel.org/r/20160531122017.2878-6-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/blockdev/zram.txt | 91 ++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 46 deletions(-) diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt index 7c05357360a7..0535ae1f73e5 100644 --- a/Documentation/blockdev/zram.txt +++ b/Documentation/blockdev/zram.txt @@ -59,23 +59,23 @@ num_devices parameter is optional and tells zram how many devices should be pre-created. Default: 1. 2) Set max number of compression streams - Regardless the value passed to this attribute, ZRAM will always - allocate multiple compression streams - one per online CPUs - thus - allowing several concurrent compression operations. The number of - allocated compression streams goes down when some of the CPUs - become offline. There is no single-compression-stream mode anymore, - unless you are running a UP system or has only 1 CPU online. - - To find out how many streams are currently available: +Regardless the value passed to this attribute, ZRAM will always +allocate multiple compression streams - one per online CPUs - thus +allowing several concurrent compression operations. The number of +allocated compression streams goes down when some of the CPUs +become offline. There is no single-compression-stream mode anymore, +unless you are running a UP system or has only 1 CPU online. + +To find out how many streams are currently available: cat /sys/block/zram0/max_comp_streams 3) Select compression algorithm - Using comp_algorithm device attribute one can see available and - currently selected (shown in square brackets) compression algorithms, - change selected compression algorithm (once the device is initialised - there is no way to change compression algorithm). +Using comp_algorithm device attribute one can see available and +currently selected (shown in square brackets) compression algorithms, +change selected compression algorithm (once the device is initialised +there is no way to change compression algorithm). - Examples: +Examples: #show supported compression algorithms cat /sys/block/zram0/comp_algorithm lzo [lz4] @@ -83,28 +83,27 @@ pre-created. Default: 1. #select lzo compression algorithm echo lzo > /sys/block/zram0/comp_algorithm - For the time being, the `comp_algorithm' content does not necessarily - show every compression algorithm supported by the kernel. We keep this - list primarily to simplify device configuration and one can configure - a new device with a compression algorithm that is not listed in - `comp_algorithm'. The thing is that, internally, ZRAM uses Crypto API - and, if some of the algorithms were built as modules, it's impossible - to list all of them using, for instance, /proc/crypto or any other - method. This, however, has an advantage of permitting the usage of - custom crypto compression modules (implementing S/W or H/W - compression). +For the time being, the `comp_algorithm' content does not necessarily +show every compression algorithm supported by the kernel. We keep this +list primarily to simplify device configuration and one can configure +a new device with a compression algorithm that is not listed in +`comp_algorithm'. The thing is that, internally, ZRAM uses Crypto API +and, if some of the algorithms were built as modules, it's impossible +to list all of them using, for instance, /proc/crypto or any other +method. This, however, has an advantage of permitting the usage of +custom crypto compression modules (implementing S/W or H/W compression). 4) Set Disksize - Set disk size by writing the value to sysfs node 'disksize'. - The value can be either in bytes or you can use mem suffixes. - Examples: - # Initialize /dev/zram0 with 50MB disksize - echo $((50*1024*1024)) > /sys/block/zram0/disksize +Set disk size by writing the value to sysfs node 'disksize'. +The value can be either in bytes or you can use mem suffixes. +Examples: + # Initialize /dev/zram0 with 50MB disksize + echo $((50*1024*1024)) > /sys/block/zram0/disksize - # Using mem suffixes - echo 256K > /sys/block/zram0/disksize - echo 512M > /sys/block/zram0/disksize - echo 1G > /sys/block/zram0/disksize + # Using mem suffixes + echo 256K > /sys/block/zram0/disksize + echo 512M > /sys/block/zram0/disksize + echo 1G > /sys/block/zram0/disksize Note: There is little point creating a zram of greater than twice the size of memory @@ -112,20 +111,20 @@ since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the size of the disk when not in use so a huge zram is wasteful. 5) Set memory limit: Optional - Set memory limit by writing the value to sysfs node 'mem_limit'. - The value can be either in bytes or you can use mem suffixes. - In addition, you could change the value in runtime. - Examples: - # limit /dev/zram0 with 50MB memory - echo $((50*1024*1024)) > /sys/block/zram0/mem_limit - - # Using mem suffixes - echo 256K > /sys/block/zram0/mem_limit - echo 512M > /sys/block/zram0/mem_limit - echo 1G > /sys/block/zram0/mem_limit - - # To disable memory limit - echo 0 > /sys/block/zram0/mem_limit +Set memory limit by writing the value to sysfs node 'mem_limit'. +The value can be either in bytes or you can use mem suffixes. +In addition, you could change the value in runtime. +Examples: + # limit /dev/zram0 with 50MB memory + echo $((50*1024*1024)) > /sys/block/zram0/mem_limit + + # Using mem suffixes + echo 256K > /sys/block/zram0/mem_limit + echo 512M > /sys/block/zram0/mem_limit + echo 1G > /sys/block/zram0/mem_limit + + # To disable memory limit + echo 0 > /sys/block/zram0/mem_limit 6) Activate: mkswap /dev/zram0 -- cgit v1.2.3-70-g09d2 From ce1ed9f98e888aa220fb09da2e2bcfcfba218a27 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:54 -0700 Subject: zram: delete custom lzo/lz4 Remove lzo/lz4 backends, we use crypto API now. [sergey.senozhatsky@gmail.com: zram-delete-custom-lzo-lz4-v3] Link: http://lkml.kernel.org/r/20160604024902.11778-6-sergey.senozhatsky@gmail.com Link: http://lkml.kernel.org/r/20160531122017.2878-7-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/Kconfig | 9 ------- drivers/block/zram/Makefile | 4 +-- drivers/block/zram/zcomp.c | 2 +- drivers/block/zram/zcomp.h | 15 ----------- drivers/block/zram/zcomp_lz4.c | 56 ------------------------------------------ drivers/block/zram/zcomp_lz4.h | 17 ------------- drivers/block/zram/zcomp_lzo.c | 56 ------------------------------------------ drivers/block/zram/zcomp_lzo.h | 17 ------------- 8 files changed, 2 insertions(+), 174 deletions(-) delete mode 100644 drivers/block/zram/zcomp_lz4.c delete mode 100644 drivers/block/zram/zcomp_lz4.h delete mode 100644 drivers/block/zram/zcomp_lzo.c delete mode 100644 drivers/block/zram/zcomp_lzo.h diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig index 2252cd7d0e89..b8ecba6dcd3b 100644 --- a/drivers/block/zram/Kconfig +++ b/drivers/block/zram/Kconfig @@ -13,12 +13,3 @@ config ZRAM disks and maybe many more. See zram.txt for more information. - -config ZRAM_LZ4_COMPRESS - bool "Enable LZ4 algorithm support" - depends on ZRAM - select CRYPTO_LZ4 - default n - help - This option enables LZ4 compression algorithm support. Compression - algorithm can be changed using `comp_algorithm' device attribute. diff --git a/drivers/block/zram/Makefile b/drivers/block/zram/Makefile index be0763ff57a2..9e2b79e9a990 100644 --- a/drivers/block/zram/Makefile +++ b/drivers/block/zram/Makefile @@ -1,5 +1,3 @@ -zram-y := zcomp_lzo.o zcomp.o zram_drv.o - -zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o +zram-y := zcomp.o zram_drv.o obj-$(CONFIG_ZRAM) += zram.o diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index a2b4eb85b41d..9ab45d41624b 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -20,7 +20,7 @@ static const char * const backends[] = { "lzo", -#ifdef CONFIG_ZRAM_LZ4_COMPRESS +#if IS_ENABLED(CONFIG_CRYPTO_LZ4) "lz4", #endif NULL diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index c914ab7972ef..478cac2ed465 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -16,24 +16,9 @@ struct zcomp_strm { struct crypto_comp *tfm; }; -/* static compression backend */ -struct zcomp_backend { - int (*compress)(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private); - - int (*decompress)(const unsigned char *src, size_t src_len, - unsigned char *dst); - - void *(*create)(gfp_t flags); - void (*destroy)(void *private); - - const char *name; -}; - /* dynamic per-device compression frontend */ struct zcomp { struct zcomp_strm * __percpu *stream; - struct zcomp_backend *backend; struct notifier_block notifier; const char *name; diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c deleted file mode 100644 index 0110086accba..000000000000 --- a/drivers/block/zram/zcomp_lz4.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include - -#include "zcomp_lz4.h" - -static void *zcomp_lz4_create(gfp_t flags) -{ - void *ret; - - ret = kmalloc(LZ4_MEM_COMPRESS, flags); - if (!ret) - ret = __vmalloc(LZ4_MEM_COMPRESS, - flags | __GFP_HIGHMEM, - PAGE_KERNEL); - return ret; -} - -static void zcomp_lz4_destroy(void *private) -{ - kvfree(private); -} - -static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private) -{ - /* return : Success if return 0 */ - return lz4_compress(src, PAGE_SIZE, dst, dst_len, private); -} - -static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len, - unsigned char *dst) -{ - size_t dst_len = PAGE_SIZE; - /* return : Success if return 0 */ - return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len); -} - -struct zcomp_backend zcomp_lz4 = { - .compress = zcomp_lz4_compress, - .decompress = zcomp_lz4_decompress, - .create = zcomp_lz4_create, - .destroy = zcomp_lz4_destroy, - .name = "lz4", -}; diff --git a/drivers/block/zram/zcomp_lz4.h b/drivers/block/zram/zcomp_lz4.h deleted file mode 100644 index 60613fb29dd8..000000000000 --- a/drivers/block/zram/zcomp_lz4.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _ZCOMP_LZ4_H_ -#define _ZCOMP_LZ4_H_ - -#include "zcomp.h" - -extern struct zcomp_backend zcomp_lz4; - -#endif /* _ZCOMP_LZ4_H_ */ diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c deleted file mode 100644 index ed7a1f0549ec..000000000000 --- a/drivers/block/zram/zcomp_lzo.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include - -#include "zcomp_lzo.h" - -static void *lzo_create(gfp_t flags) -{ - void *ret; - - ret = kmalloc(LZO1X_MEM_COMPRESS, flags); - if (!ret) - ret = __vmalloc(LZO1X_MEM_COMPRESS, - flags | __GFP_HIGHMEM, - PAGE_KERNEL); - return ret; -} - -static void lzo_destroy(void *private) -{ - kvfree(private); -} - -static int lzo_compress(const unsigned char *src, unsigned char *dst, - size_t *dst_len, void *private) -{ - int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private); - return ret == LZO_E_OK ? 0 : ret; -} - -static int lzo_decompress(const unsigned char *src, size_t src_len, - unsigned char *dst) -{ - size_t dst_len = PAGE_SIZE; - int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len); - return ret == LZO_E_OK ? 0 : ret; -} - -struct zcomp_backend zcomp_lzo = { - .compress = lzo_compress, - .decompress = lzo_decompress, - .create = lzo_create, - .destroy = lzo_destroy, - .name = "lzo", -}; diff --git a/drivers/block/zram/zcomp_lzo.h b/drivers/block/zram/zcomp_lzo.h deleted file mode 100644 index 128c5807fa14..000000000000 --- a/drivers/block/zram/zcomp_lzo.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright (C) 2014 Sergey Senozhatsky. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _ZCOMP_LZO_H_ -#define _ZCOMP_LZO_H_ - -#include "zcomp.h" - -extern struct zcomp_backend zcomp_lzo; - -#endif /* _ZCOMP_LZO_H_ */ -- cgit v1.2.3-70-g09d2 From eb9f56d82547db407779967a2251ea28969245b0 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:56 -0700 Subject: zram: add more compression algorithms Add "deflate", "lz4hc", "842" algorithms to the list of known compression backends. The real availability of those algorithms, however, depends on the corresponding CONFIG_CRYPTO_FOO config options. [sergey.senozhatsky@gmail.com: zram-add-more-compression-algorithms-v3] Link: http://lkml.kernel.org/r/20160604024902.11778-7-sergey.senozhatsky@gmail.com Link: http://lkml.kernel.org/r/20160531122017.2878-8-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zcomp.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 9ab45d41624b..32e521a2b8c8 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -22,6 +22,15 @@ static const char * const backends[] = { "lzo", #if IS_ENABLED(CONFIG_CRYPTO_LZ4) "lz4", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_DEFLATE) + "deflate", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_LZ4HC) + "lz4hc", +#endif +#if IS_ENABLED(CONFIG_CRYPTO_842) + "842", #endif NULL }; -- cgit v1.2.3-70-g09d2 From 16d37725a042cc66f9ee95889dd40e734264508e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Tue, 26 Jul 2016 15:22:59 -0700 Subject: zram: drop gfp_t from zcomp_strm_alloc() We now allocate streams from CPU_UP hot-plug path, there are no context-dependent stream allocations anymore and we can schedule from zcomp_strm_alloc(). Use GFP_KERNEL directly and drop a gfp_t parameter. Link: http://lkml.kernel.org/r/20160531122017.2878-9-sergey.senozhatsky@gmail.com Signed-off-by: Sergey Senozhatsky Acked-by: Minchan Kim Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zcomp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 32e521a2b8c8..4b5cd3a7b2b6 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -47,9 +47,9 @@ static void zcomp_strm_free(struct zcomp_strm *zstrm) * allocate new zcomp_strm structure with ->tfm initialized by * backend, return NULL on error */ -static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) +static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) { - struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), flags); + struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL); if (!zstrm) return NULL; @@ -58,7 +58,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp, gfp_t flags) * allocate 2 pages. 1 for compressed data, plus 1 extra for the * case when compressed size is larger than the original one */ - zstrm->buffer = (void *)__get_free_pages(flags | __GFP_ZERO, 1); + zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); if (IS_ERR_OR_NULL(zstrm->tfm) || !zstrm->buffer) { zcomp_strm_free(zstrm); zstrm = NULL; @@ -169,7 +169,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, case CPU_UP_PREPARE: if (WARN_ON(*per_cpu_ptr(comp->stream, cpu))) break; - zstrm = zcomp_strm_alloc(comp, GFP_KERNEL); + zstrm = zcomp_strm_alloc(comp); if (IS_ERR_OR_NULL(zstrm)) { pr_err("Can't allocate a compression stream\n"); return NOTIFY_BAD; -- cgit v1.2.3-70-g09d2 From c6c919eb90e021fbcfcbfa9dd3d55930cdbb67f9 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:02 -0700 Subject: mm: use put_page() to free page instead of putback_lru_page() Recently, I got many reports about perfermance degradation in embedded system(Android mobile phone, webOS TV and so on) and easy fork fail. The problem was fragmentation caused by zram and GPU driver mainly. With memory pressure, their pages were spread out all of pageblock and it cannot be migrated with current compaction algorithm which supports only LRU pages. In the end, compaction cannot work well so reclaimer shrinks all of working set pages. It made system very slow and even to fail to fork easily which requires order-[2 or 3] allocations. Other pain point is that they cannot use CMA memory space so when OOM kill happens, I can see many free pages in CMA area, which is not memory efficient. In our product which has big CMA memory, it reclaims zones too exccessively to allocate GPU and zram page although there are lots of free space in CMA so system becomes very slow easily. To solve these problem, this patch tries to add facility to migrate non-lru pages via introducing new functions and page flags to help migration. struct address_space_operations { .. .. bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); .. } new page flags PG_movable PG_isolated For details, please read description in "mm: migrate: support non-lru movable page migration". Originally, Gioh Kim had tried to support this feature but he moved so I took over the work. I took many code from his work and changed a little bit and Konstantin Khlebnikov helped Gioh a lot so he should deserve to have many credit, too. And I should mention Chulmin who have tested this patchset heavily so I can find many bugs from him. :) Thanks, Gioh, Konstantin and Chulmin! This patchset consists of five parts. 1. clean up migration mm: use put_page to free page instead of putback_lru_page 2. add non-lru page migration feature mm: migrate: support non-lru movable page migration 3. rework KVM memory-ballooning mm: balloon: use general non-lru movable page feature 4. zsmalloc refactoring for preparing page migration zsmalloc: keep max_object in size_class zsmalloc: use bit_spin_lock zsmalloc: use accessor zsmalloc: factor page chain functionality out zsmalloc: introduce zspage structure zsmalloc: separate free_zspage from putback_zspage zsmalloc: use freeobj for index 5. zsmalloc page migration zsmalloc: page migration support zram: use __GFP_MOVABLE for memory allocation This patch (of 12): Procedure of page migration is as follows: First of all, it should isolate a page from LRU and try to migrate the page. If it is successful, it releases the page for freeing. Otherwise, it should put the page back to LRU list. For LRU pages, we have used putback_lru_page for both freeing and putback to LRU list. It's okay because put_page is aware of LRU list so if it releases last refcount of the page, it removes the page from LRU list. However, It makes unnecessary operations (e.g., lru_cache_add, pagevec and flags operations. It would be not significant but no worth to do) and harder to support new non-lru page migration because put_page isn't aware of non-lru page's data structure. To solve the problem, we can add new hook in put_page with PageMovable flags check but it can increase overhead in hot path and needs new locking scheme to stabilize the flag check with put_page. So, this patch cleans it up to divide two semantic(ie, put and putback). If migration is successful, use put_page instead of putback_lru_page and use putback_lru_page only on failure. That makes code more readable and doesn't add overhead in put_page. Comment from Vlastimil "Yeah, and compaction (perhaps also other migration users) has to drain the lru pvec... Getting rid of this stuff is worth even by itself." Link: http://lkml.kernel.org/r/1464736881-24886-2-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Acked-by: Vlastimil Babka Cc: Rik van Riel Cc: Mel Gorman Cc: Hugh Dickins Cc: Naoya Horiguchi Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 64 +++++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index bd3fdc202e8b..c74412b381ff 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -915,6 +915,19 @@ out_unlock: put_anon_vma(anon_vma); unlock_page(page); out: + /* + * If migration is successful, decrease refcount of the newpage + * which will not free the page because new page owner increased + * refcounter. As well, if it is LRU page, add the page to LRU + * list in here. + */ + if (rc == MIGRATEPAGE_SUCCESS) { + if (unlikely(__is_movable_balloon_page(newpage))) + put_page(newpage); + else + putback_lru_page(newpage); + } + return rc; } @@ -948,6 +961,12 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, if (page_count(page) == 1) { /* page was freed from under us. So we are done. */ + ClearPageActive(page); + ClearPageUnevictable(page); + if (put_new_page) + put_new_page(newpage, private); + else + put_page(newpage); goto out; } @@ -960,10 +979,8 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, } rc = __unmap_and_move(page, newpage, force, mode); - if (rc == MIGRATEPAGE_SUCCESS) { - put_new_page = NULL; + if (rc == MIGRATEPAGE_SUCCESS) set_page_owner_migrate_reason(newpage, reason); - } out: if (rc != -EAGAIN) { @@ -976,34 +993,33 @@ out: list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - /* Soft-offlined page shouldn't go through lru cache list */ - if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) { + } + + /* + * If migration is successful, releases reference grabbed during + * isolation. Otherwise, restore the page to right list unless + * we want to retry. + */ + if (rc == MIGRATEPAGE_SUCCESS) { + put_page(page); + if (reason == MR_MEMORY_FAILURE) { /* - * With this release, we free successfully migrated - * page and set PG_HWPoison on just freed page - * intentionally. Although it's rather weird, it's how - * HWPoison flag works at the moment. + * Set PG_HWPoison on just freed page + * intentionally. Although it's rather weird, + * it's how HWPoison flag works at the moment. */ - put_page(page); if (!test_set_page_hwpoison(page)) num_poisoned_pages_inc(); - } else + } + } else { + if (rc != -EAGAIN) putback_lru_page(page); + if (put_new_page) + put_new_page(newpage, private); + else + put_page(newpage); } - /* - * If migration was not successful and there's a freeing callback, use - * it. Otherwise, putback_lru_page() will drop the reference grabbed - * during isolation. - */ - if (put_new_page) - put_new_page(newpage, private); - else if (unlikely(__is_movable_balloon_page(newpage))) { - /* drop our reference, page already in the balloon */ - put_page(newpage); - } else - putback_lru_page(newpage); - if (result) { if (rc) *result = rc; -- cgit v1.2.3-70-g09d2 From bda807d4445414e8e77da704f116bb0880fe0c76 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:05 -0700 Subject: mm: migrate: support non-lru movable page migration We have allowed migration for only LRU pages until now and it was enough to make high-order pages. But recently, embedded system(e.g., webOS, android) uses lots of non-movable pages(e.g., zram, GPU memory) so we have seen several reports about troubles of small high-order allocation. For fixing the problem, there were several efforts (e,g,. enhance compaction algorithm, SLUB fallback to 0-order page, reserved memory, vmalloc and so on) but if there are lots of non-movable pages in system, their solutions are void in the long run. So, this patch is to support facility to change non-movable pages with movable. For the feature, this patch introduces functions related to migration to address_space_operations as well as some page flags. If a driver want to make own pages movable, it should define three functions which are function pointers of struct address_space_operations. 1. bool (*isolate_page) (struct page *page, isolate_mode_t mode); What VM expects on isolate_page function of driver is to return *true* if driver isolates page successfully. On returing true, VM marks the page as PG_isolated so concurrent isolation in several CPUs skip the page for isolation. If a driver cannot isolate the page, it should return *false*. Once page is successfully isolated, VM uses page.lru fields so driver shouldn't expect to preserve values in that fields. 2. int (*migratepage) (struct address_space *mapping, struct page *newpage, struct page *oldpage, enum migrate_mode); After isolation, VM calls migratepage of driver with isolated page. The function of migratepage is to move content of the old page to new page and set up fields of struct page newpage. Keep in mind that you should indicate to the VM the oldpage is no longer movable via __ClearPageMovable() under page_lock if you migrated the oldpage successfully and returns 0. If driver cannot migrate the page at the moment, driver can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time because VM interprets -EAGAIN as "temporal migration failure". On returning any error except -EAGAIN, VM will give up the page migration without retrying in this time. Driver shouldn't touch page.lru field VM using in the functions. 3. void (*putback_page)(struct page *); If migration fails on isolated page, VM should return the isolated page to the driver so VM calls driver's putback_page with migration failed page. In this function, driver should put the isolated page back to the own data structure. 4. non-lru movable page flags There are two page flags for supporting non-lru movable page. * PG_movable Driver should use the below function to make page movable under page_lock. void __SetPageMovable(struct page *page, struct address_space *mapping) It needs argument of address_space for registering migration family functions which will be called by VM. Exactly speaking, PG_movable is not a real flag of struct page. Rather than, VM reuses page->mapping's lower bits to represent it. #define PAGE_MAPPING_MOVABLE 0x2 page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; so driver shouldn't access page->mapping directly. Instead, driver should use page_mapping which mask off the low two bits of page->mapping so it can get right struct address_space. For testing of non-lru movable page, VM supports __PageMovable function. However, it doesn't guarantee to identify non-lru movable page because page->mapping field is unified with other variables in struct page. As well, if driver releases the page after isolation by VM, page->mapping doesn't have stable value although it has PAGE_MAPPING_MOVABLE (Look at __ClearPageMovable). But __PageMovable is cheap to catch whether page is LRU or non-lru movable once the page has been isolated. Because LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also good for just peeking to test non-lru movable pages before more expensive checking with lock_page in pfn scanning to select victim. For guaranteeing non-lru movable page, VM provides PageMovable function. Unlike __PageMovable, PageMovable functions validates page->mapping and mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden destroying of page->mapping. Driver using __SetPageMovable should clear the flag via __ClearMovablePage under page_lock before the releasing the page. * PG_isolated To prevent concurrent isolation among several CPUs, VM marks isolated page as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru movable page, it can skip it. Driver doesn't need to manipulate the flag because VM will set/clear it automatically. Keep in mind that if driver sees PG_isolated page, it means the page have been isolated by VM so it shouldn't touch page.lru field. PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag for own purpose. [opensource.ganesh@gmail.com: mm/compaction: remove local variable is_lru] Link: http://lkml.kernel.org/r/20160618014841.GA7422@leo-test Link: http://lkml.kernel.org/r/1464736881-24886-3-git-send-email-minchan@kernel.org Signed-off-by: Gioh Kim Signed-off-by: Minchan Kim Signed-off-by: Ganesh Mahendran Acked-by: Vlastimil Babka Cc: Sergey Senozhatsky Cc: Rik van Riel Cc: Joonsoo Kim Cc: Mel Gorman Cc: Hugh Dickins Cc: Rafael Aquini Cc: Jonathan Corbet Cc: John Einar Reitan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 4 + Documentation/filesystems/vfs.txt | 11 +++ Documentation/vm/page_migration | 107 ++++++++++++++++++++- include/linux/compaction.h | 17 ++++ include/linux/fs.h | 2 + include/linux/ksm.h | 3 +- include/linux/migrate.h | 2 + include/linux/mm.h | 1 + include/linux/page-flags.h | 33 +++++-- mm/compaction.c | 85 +++++++++++++---- mm/ksm.c | 4 +- mm/migrate.c | 192 ++++++++++++++++++++++++++++++++++---- mm/page_alloc.c | 2 +- mm/util.c | 6 +- 14 files changed, 416 insertions(+), 53 deletions(-) diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 75eea7ce3d7c..dda6e3f8e203 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -195,7 +195,9 @@ prototypes: int (*releasepage) (struct page *, int); void (*freepage)(struct page *); int (*direct_IO)(struct kiocb *, struct iov_iter *iter); + bool (*isolate_page) (struct page *, isolate_mode_t); int (*migratepage)(struct address_space *, struct page *, struct page *); + void (*putback_page) (struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); int (*error_remove_page)(struct address_space *, struct page *); @@ -219,7 +221,9 @@ invalidatepage: yes releasepage: yes freepage: yes direct_IO: +isolate_page: yes migratepage: yes (both) +putback_page: yes launder_page: yes is_partially_uptodate: yes error_remove_page: yes diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index c61a223ef3ff..900360cbcdae 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -592,9 +592,14 @@ struct address_space_operations { int (*releasepage) (struct page *, int); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); + /* isolate a page for migration */ + bool (*isolate_page) (struct page *, isolate_mode_t); /* migrate the contents of a page to the specified target */ int (*migratepage) (struct page *, struct page *); + /* put migration-failed page back to right list */ + void (*putback_page) (struct page *); int (*launder_page) (struct page *); + int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); void (*is_dirty_writeback) (struct page *, bool *, bool *); @@ -747,6 +752,10 @@ struct address_space_operations { and transfer data directly between the storage and the application's address space. + isolate_page: Called by the VM when isolating a movable non-lru page. + If page is successfully isolated, VM marks the page as PG_isolated + via __SetPageIsolated. + migrate_page: This is used to compact the physical memory usage. If the VM wants to relocate a page (maybe off a memory card that is signalling imminent failure) it will pass a new page @@ -754,6 +763,8 @@ struct address_space_operations { transfer any private data across and update any references that it has to the page. + putback_page: Called by the VM when isolated page's migration fails. + launder_page: Called before freeing a page - it writes back the dirty page. To prevent redirtying the page, it is kept locked during the whole operation. diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration index fea5c0864170..18d37c7ac50b 100644 --- a/Documentation/vm/page_migration +++ b/Documentation/vm/page_migration @@ -142,5 +142,110 @@ Steps: 20. The new page is moved to the LRU and can be scanned by the swapper etc again. -Christoph Lameter, May 8, 2006. +C. Non-LRU page migration +------------------------- + +Although original migration aimed for reducing the latency of memory access +for NUMA, compaction who want to create high-order page is also main customer. + +Current problem of the implementation is that it is designed to migrate only +*LRU* pages. However, there are potential non-lru pages which can be migrated +in drivers, for example, zsmalloc, virtio-balloon pages. + +For virtio-balloon pages, some parts of migration code path have been hooked +up and added virtio-balloon specific functions to intercept migration logics. +It's too specific to a driver so other drivers who want to make their pages +movable would have to add own specific hooks in migration path. + +To overclome the problem, VM supports non-LRU page migration which provides +generic functions for non-LRU movable pages without driver specific hooks +migration path. + +If a driver want to make own pages movable, it should define three functions +which are function pointers of struct address_space_operations. + +1. bool (*isolate_page) (struct page *page, isolate_mode_t mode); + +What VM expects on isolate_page function of driver is to return *true* +if driver isolates page successfully. On returing true, VM marks the page +as PG_isolated so concurrent isolation in several CPUs skip the page +for isolation. If a driver cannot isolate the page, it should return *false*. + +Once page is successfully isolated, VM uses page.lru fields so driver +shouldn't expect to preserve values in that fields. + +2. int (*migratepage) (struct address_space *mapping, + struct page *newpage, struct page *oldpage, enum migrate_mode); + +After isolation, VM calls migratepage of driver with isolated page. +The function of migratepage is to move content of the old page to new page +and set up fields of struct page newpage. Keep in mind that you should +indicate to the VM the oldpage is no longer movable via __ClearPageMovable() +under page_lock if you migrated the oldpage successfully and returns 0. +If driver cannot migrate the page at the moment, driver can return -EAGAIN. +On -EAGAIN, VM will retry page migration in a short time because VM interprets +-EAGAIN as "temporal migration failure". On returning any error except -EAGAIN, +VM will give up the page migration without retrying in this time. + +Driver shouldn't touch page.lru field VM using in the functions. + +3. void (*putback_page)(struct page *); + +If migration fails on isolated page, VM should return the isolated page +to the driver so VM calls driver's putback_page with migration failed page. +In this function, driver should put the isolated page back to the own data +structure. +4. non-lru movable page flags + +There are two page flags for supporting non-lru movable page. + +* PG_movable + +Driver should use the below function to make page movable under page_lock. + + void __SetPageMovable(struct page *page, struct address_space *mapping) + +It needs argument of address_space for registering migration family functions +which will be called by VM. Exactly speaking, PG_movable is not a real flag of +struct page. Rather than, VM reuses page->mapping's lower bits to represent it. + + #define PAGE_MAPPING_MOVABLE 0x2 + page->mapping = page->mapping | PAGE_MAPPING_MOVABLE; + +so driver shouldn't access page->mapping directly. Instead, driver should +use page_mapping which mask off the low two bits of page->mapping under +page lock so it can get right struct address_space. + +For testing of non-lru movable page, VM supports __PageMovable function. +However, it doesn't guarantee to identify non-lru movable page because +page->mapping field is unified with other variables in struct page. +As well, if driver releases the page after isolation by VM, page->mapping +doesn't have stable value although it has PAGE_MAPPING_MOVABLE +(Look at __ClearPageMovable). But __PageMovable is cheap to catch whether +page is LRU or non-lru movable once the page has been isolated. Because +LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also +good for just peeking to test non-lru movable pages before more expensive +checking with lock_page in pfn scanning to select victim. + +For guaranteeing non-lru movable page, VM provides PageMovable function. +Unlike __PageMovable, PageMovable functions validates page->mapping and +mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden +destroying of page->mapping. + +Driver using __SetPageMovable should clear the flag via __ClearMovablePage +under page_lock before the releasing the page. + +* PG_isolated + +To prevent concurrent isolation among several CPUs, VM marks isolated page +as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru +movable page, it can skip it. Driver doesn't need to manipulate the flag +because VM will set/clear it automatically. Keep in mind that if driver +sees PG_isolated page, it means the page have been isolated by VM so it +shouldn't touch page.lru field. +PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag +for own purpose. + +Christoph Lameter, May 8, 2006. +Minchan Kim, Mar 28, 2016. diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a58c852a268f..c6b47c861cea 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -54,6 +54,9 @@ enum compact_result { struct alloc_context; /* in mm/internal.h */ #ifdef CONFIG_COMPACTION +extern int PageMovable(struct page *page); +extern void __SetPageMovable(struct page *page, struct address_space *mapping); +extern void __ClearPageMovable(struct page *page); extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); @@ -151,6 +154,19 @@ extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else +static inline int PageMovable(struct page *page) +{ + return 0; +} +static inline void __SetPageMovable(struct page *page, + struct address_space *mapping) +{ +} + +static inline void __ClearPageMovable(struct page *page) +{ +} + static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, @@ -212,6 +228,7 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i #endif /* CONFIG_COMPACTION */ #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +struct node; extern int compaction_register_node(struct node *node); extern void compaction_unregister_node(struct node *node); diff --git a/include/linux/fs.h b/include/linux/fs.h index 0c9ebf530d9e..97fe08d17d89 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -402,6 +402,8 @@ struct address_space_operations { */ int (*migratepage) (struct address_space *, struct page *, struct page *, enum migrate_mode); + bool (*isolate_page)(struct page *, isolate_mode_t); + void (*putback_page)(struct page *); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, unsigned long, unsigned long); diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7ae216a39c9e..481c8c4627ca 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page) static inline void set_page_stable_node(struct page *page, struct stable_node *stable_node) { - page->mapping = (void *)stable_node + - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } /* diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b50325e4ddf..404fbfefeb33 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -37,6 +37,8 @@ extern int migrate_page(struct address_space *, struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern bool isolate_movable_page(struct page *page, isolate_mode_t mode); +extern void putback_movable_page(struct page *page); extern int migrate_prep(void); extern int migrate_prep_local(void); diff --git a/include/linux/mm.h b/include/linux/mm.h index ece042dfe23c..3e22335a435c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1035,6 +1035,7 @@ static inline pgoff_t page_file_index(struct page *page) } bool page_mapped(struct page *page); +struct address_space *page_mapping(struct page *page); /* * Return true only if the page has been allocated with diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e5a32445f930..f36dbb3a3060 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -129,6 +129,9 @@ enum pageflags { /* Compound pages. Stored in first tail page's flags */ PG_double_map = PG_private_2, + + /* non-lru isolated movable page */ + PG_isolated = PG_reclaim, }; #ifndef __GENERATING_BOUNDS_H @@ -357,29 +360,37 @@ PAGEFLAG(Idle, idle, PF_ANY) * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. * * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, - * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; - * and then page->mapping points, not to an anon_vma, but to a private + * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON + * bit; and then page->mapping points, not to an anon_vma, but to a private * structure which KSM associates with that merged page. See ksm.h. * - * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. + * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable + * page and then page->mapping points a struct address_space. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ -#define PAGE_MAPPING_ANON 1 -#define PAGE_MAPPING_KSM 2 -#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) +#define PAGE_MAPPING_ANON 0x1 +#define PAGE_MAPPING_MOVABLE 0x2 +#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) -static __always_inline int PageAnonHead(struct page *page) +static __always_inline int PageMappingFlags(struct page *page) { - return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } static __always_inline int PageAnon(struct page *page) { page = compound_head(page); - return PageAnonHead(page); + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + +static __always_inline int __PageMovable(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_MOVABLE; } #ifdef CONFIG_KSM @@ -393,7 +404,7 @@ static __always_inline int PageKsm(struct page *page) { page = compound_head(page); return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + PAGE_MAPPING_KSM; } #else TESTPAGEFLAG_FALSE(Ksm) @@ -641,6 +652,8 @@ static inline void __ClearPageBalloon(struct page *page) atomic_set(&page->_mapcount, -1); } +__PAGEFLAG(Isolated, isolated, PF_ANY); + /* * If network-based swap is enabled, sl*b must keep track of whether pages * were allocated from pfmemalloc reserves. diff --git a/mm/compaction.c b/mm/compaction.c index 7bc04778f84d..fe95d8d021c3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -81,6 +81,44 @@ static inline bool migrate_async_suitable(int migratetype) #ifdef CONFIG_COMPACTION +int PageMovable(struct page *page) +{ + struct address_space *mapping; + + VM_BUG_ON_PAGE(!PageLocked(page), page); + if (!__PageMovable(page)) + return 0; + + mapping = page_mapping(page); + if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) + return 1; + + return 0; +} +EXPORT_SYMBOL(PageMovable); + +void __SetPageMovable(struct page *page, struct address_space *mapping) +{ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); + page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); +} +EXPORT_SYMBOL(__SetPageMovable); + +void __ClearPageMovable(struct page *page) +{ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageMovable(page), page); + /* + * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE + * flag so that VM can catch up released page by driver after isolation. + * With it, VM migration doesn't try to put it back. + */ + page->mapping = (void *)((unsigned long)page->mapping & + PAGE_MAPPING_MOVABLE); +} +EXPORT_SYMBOL(__ClearPageMovable); + /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -670,7 +708,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { - bool is_lru; if (skip_on_failure && low_pfn >= next_skip_pfn) { /* @@ -732,21 +769,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, continue; } - /* - * Check may be lockless but that's ok as we recheck later. - * It's possible to migrate LRU pages and balloon pages - * Skip any other type of page - */ - is_lru = PageLRU(page); - if (!is_lru) { - if (unlikely(balloon_page_movable(page))) { - if (balloon_page_isolate(page)) { - /* Successfully isolated */ - goto isolate_success; - } - } - } - /* * Regardless of being on LRU, compound pages such as THP and * hugetlbfs are not to be compacted. We can potentially save @@ -763,8 +785,37 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail; } - if (!is_lru) + /* + * Check may be lockless but that's ok as we recheck later. + * It's possible to migrate LRU and non-lru movable pages. + * Skip any other type of page + */ + if (!PageLRU(page)) { + if (unlikely(balloon_page_movable(page))) { + if (balloon_page_isolate(page)) { + /* Successfully isolated */ + goto isolate_success; + } + } + + /* + * __PageMovable can return false positive so we need + * to verify it under page_lock. + */ + if (unlikely(__PageMovable(page)) && + !PageIsolated(page)) { + if (locked) { + spin_unlock_irqrestore(&zone->lru_lock, + flags); + locked = false; + } + + if (isolate_movable_page(page, isolate_mode)) + goto isolate_success; + } + goto isolate_fail; + } /* * Migration will fail if an anonymous page is pinned in memory, diff --git a/mm/ksm.c b/mm/ksm.c index 4786b4150f62..35b8aef867a9 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -532,8 +532,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) void *expected_mapping; unsigned long kpfn; - expected_mapping = (void *)stable_node + - (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + expected_mapping = (void *)((unsigned long)stable_node | + PAGE_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); page = pfn_to_page(kpfn); diff --git a/mm/migrate.c b/mm/migrate.c index c74412b381ff..8119fdc563f8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -73,6 +74,81 @@ int migrate_prep_local(void) return 0; } +bool isolate_movable_page(struct page *page, isolate_mode_t mode) +{ + struct address_space *mapping; + + /* + * Avoid burning cycles with pages that are yet under __free_pages(), + * or just got freed under us. + * + * In case we 'win' a race for a movable page being freed under us and + * raise its refcount preventing __free_pages() from doing its job + * the put_page() at the end of this block will take care of + * release this page, thus avoiding a nasty leakage. + */ + if (unlikely(!get_page_unless_zero(page))) + goto out; + + /* + * Check PageMovable before holding a PG_lock because page's owner + * assumes anybody doesn't touch PG_lock of newly allocated page + * so unconditionally grapping the lock ruins page's owner side. + */ + if (unlikely(!__PageMovable(page))) + goto out_putpage; + /* + * As movable pages are not isolated from LRU lists, concurrent + * compaction threads can race against page migration functions + * as well as race against the releasing a page. + * + * In order to avoid having an already isolated movable page + * being (wrongly) re-isolated while it is under migration, + * or to avoid attempting to isolate pages being released, + * lets be sure we have the page lock + * before proceeding with the movable page isolation steps. + */ + if (unlikely(!trylock_page(page))) + goto out_putpage; + + if (!PageMovable(page) || PageIsolated(page)) + goto out_no_isolated; + + mapping = page_mapping(page); + VM_BUG_ON_PAGE(!mapping, page); + + if (!mapping->a_ops->isolate_page(page, mode)) + goto out_no_isolated; + + /* Driver shouldn't use PG_isolated bit of page->flags */ + WARN_ON_ONCE(PageIsolated(page)); + __SetPageIsolated(page); + unlock_page(page); + + return true; + +out_no_isolated: + unlock_page(page); +out_putpage: + put_page(page); +out: + return false; +} + +/* It should be called on page which is PG_movable */ +void putback_movable_page(struct page *page) +{ + struct address_space *mapping; + + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(!PageIsolated(page), page); + + mapping = page_mapping(page); + mapping->a_ops->putback_page(page); + __ClearPageIsolated(page); +} + /* * Put previously isolated pages back onto the appropriate lists * from where they were once taken off for compaction/migration. @@ -94,10 +170,25 @@ void putback_movable_pages(struct list_head *l) list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (unlikely(isolated_balloon_page(page))) + if (unlikely(isolated_balloon_page(page))) { balloon_page_putback(page); - else + /* + * We isolated non-lru movable page so here we can use + * __PageMovable because LRU page's mapping cannot have + * PAGE_MAPPING_MOVABLE. + */ + } else if (unlikely(__PageMovable(page))) { + VM_BUG_ON_PAGE(!PageIsolated(page), page); + lock_page(page); + if (PageMovable(page)) + putback_movable_page(page); + else + __ClearPageIsolated(page); + unlock_page(page); + put_page(page); + } else { putback_lru_page(page); + } } } @@ -594,7 +685,7 @@ EXPORT_SYMBOL(migrate_page_copy); ***********************************************************/ /* - * Common logic to directly migrate a single page suitable for + * Common logic to directly migrate a single LRU page suitable for * pages that do not use PagePrivate/PagePrivate2. * * Pages are locked upon entry and exit. @@ -757,33 +848,72 @@ static int move_to_new_page(struct page *newpage, struct page *page, enum migrate_mode mode) { struct address_space *mapping; - int rc; + int rc = -EAGAIN; + bool is_lru = !__PageMovable(page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); mapping = page_mapping(page); - if (!mapping) - rc = migrate_page(mapping, newpage, page, mode); - else if (mapping->a_ops->migratepage) + + if (likely(is_lru)) { + if (!mapping) + rc = migrate_page(mapping, newpage, page, mode); + else if (mapping->a_ops->migratepage) + /* + * Most pages have a mapping and most filesystems + * provide a migratepage callback. Anonymous pages + * are part of swap space which also has its own + * migratepage callback. This is the most common path + * for page migration. + */ + rc = mapping->a_ops->migratepage(mapping, newpage, + page, mode); + else + rc = fallback_migrate_page(mapping, newpage, + page, mode); + } else { /* - * Most pages have a mapping and most filesystems provide a - * migratepage callback. Anonymous pages are part of swap - * space which also has its own migratepage callback. This - * is the most common path for page migration. + * In case of non-lru page, it could be released after + * isolation step. In that case, we shouldn't try migration. */ - rc = mapping->a_ops->migratepage(mapping, newpage, page, mode); - else - rc = fallback_migrate_page(mapping, newpage, page, mode); + VM_BUG_ON_PAGE(!PageIsolated(page), page); + if (!PageMovable(page)) { + rc = MIGRATEPAGE_SUCCESS; + __ClearPageIsolated(page); + goto out; + } + + rc = mapping->a_ops->migratepage(mapping, newpage, + page, mode); + WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS && + !PageIsolated(page)); + } /* * When successful, old pagecache page->mapping must be cleared before * page is freed; but stats require that PageAnon be left as PageAnon. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (!PageAnon(page)) + if (__PageMovable(page)) { + VM_BUG_ON_PAGE(!PageIsolated(page), page); + + /* + * We clear PG_movable under page_lock so any compactor + * cannot try to migrate this page. + */ + __ClearPageIsolated(page); + } + + /* + * Anonymous and movable page->mapping will be cleard by + * free_pages_prepare so don't reset it here for keeping + * the type to work PageAnon, for example. + */ + if (!PageMappingFlags(page)) page->mapping = NULL; } +out: return rc; } @@ -793,6 +923,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, int rc = -EAGAIN; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; + bool is_lru = !__PageMovable(page); if (!trylock_page(page)) { if (!force || mode == MIGRATE_ASYNC) @@ -873,6 +1004,11 @@ static int __unmap_and_move(struct page *page, struct page *newpage, goto out_unlock_both; } + if (unlikely(!is_lru)) { + rc = move_to_new_page(newpage, page, mode); + goto out_unlock_both; + } + /* * Corner case handling: * 1. When a new swap-cache page is read into, it is added to the LRU @@ -922,7 +1058,8 @@ out: * list in here. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (unlikely(__is_movable_balloon_page(newpage))) + if (unlikely(__is_movable_balloon_page(newpage) || + __PageMovable(newpage))) put_page(newpage); else putback_lru_page(newpage); @@ -963,6 +1100,12 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, /* page was freed from under us. So we are done. */ ClearPageActive(page); ClearPageUnevictable(page); + if (unlikely(__PageMovable(page))) { + lock_page(page); + if (!PageMovable(page)) + __ClearPageIsolated(page); + unlock_page(page); + } if (put_new_page) put_new_page(newpage, private); else @@ -1012,8 +1155,21 @@ out: num_poisoned_pages_inc(); } } else { - if (rc != -EAGAIN) - putback_lru_page(page); + if (rc != -EAGAIN) { + if (likely(!__PageMovable(page))) { + putback_lru_page(page); + goto put_new; + } + + lock_page(page); + if (PageMovable(page)) + putback_movable_page(page); + else + __ClearPageIsolated(page); + unlock_page(page); + put_page(page); + } +put_new: if (put_new_page) put_new_page(newpage, private); else diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f7bb1aef54f2..8b2623683431 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1016,7 +1016,7 @@ static __always_inline bool free_pages_prepare(struct page *page, (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; } } - if (PageAnonHead(page)) + if (PageMappingFlags(page)) page->mapping = NULL; if (check_free) bad += free_pages_check(page); diff --git a/mm/util.c b/mm/util.c index 917e0e3d0f8e..b756ee36f7f0 100644 --- a/mm/util.c +++ b/mm/util.c @@ -399,10 +399,12 @@ struct address_space *page_mapping(struct page *page) } mapping = page->mapping; - if ((unsigned long)mapping & PAGE_MAPPING_FLAGS) + if ((unsigned long)mapping & PAGE_MAPPING_ANON) return NULL; - return mapping; + + return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); } +EXPORT_SYMBOL(page_mapping); /* Slow path of page_mapcount() for compound pages */ int __page_mapcount(struct page *page) -- cgit v1.2.3-70-g09d2 From b1123ea6d3b3da25af5c8a9d843bd07ab63213f4 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:09 -0700 Subject: mm: balloon: use general non-lru movable page feature Now, VM has a feature to migrate non-lru movable pages so balloon doesn't need custom migration hooks in migrate.c and compaction.c. Instead, this patch implements the page->mapping->a_ops-> {isolate|migrate|putback} functions. With that, we could remove hooks for ballooning in general migration functions and make balloon compaction simple. [akpm@linux-foundation.org: compaction.h requires that the includer first include node.h] Link: http://lkml.kernel.org/r/1464736881-24886-4-git-send-email-minchan@kernel.org Signed-off-by: Gioh Kim Signed-off-by: Minchan Kim Acked-by: Vlastimil Babka Cc: Rafael Aquini Cc: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_balloon.c | 54 +++++++++++++++++++--- include/linux/balloon_compaction.h | 54 +++++++--------------- include/uapi/linux/magic.h | 1 + mm/balloon_compaction.c | 94 +++++++------------------------------- mm/compaction.c | 7 --- mm/migrate.c | 19 +------- mm/vmscan.c | 2 +- 7 files changed, 86 insertions(+), 145 deletions(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 476c0e3a7150..88d5609375de 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -30,6 +30,7 @@ #include #include #include +#include /* * Balloon device works in 4K page units. So each page is pointed to by @@ -45,6 +46,10 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES; module_param(oom_pages, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); +#ifdef CONFIG_BALLOON_COMPACTION +static struct vfsmount *balloon_mnt; +#endif + struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; @@ -488,8 +493,26 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, put_page(page); /* balloon reference */ - return MIGRATEPAGE_SUCCESS; + return 0; } + +static struct dentry *balloon_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + static const struct dentry_operations ops = { + .d_dname = simple_dname, + }; + + return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops, + BALLOON_KVM_MAGIC); +} + +static struct file_system_type balloon_fs = { + .name = "balloon-kvm", + .mount = balloon_mount, + .kill_sb = kill_anon_super, +}; + #endif /* CONFIG_BALLOON_COMPACTION */ static int virtballoon_probe(struct virtio_device *vdev) @@ -519,9 +542,6 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; balloon_devinfo_init(&vb->vb_dev_info); -#ifdef CONFIG_BALLOON_COMPACTION - vb->vb_dev_info.migratepage = virtballoon_migratepage; -#endif err = init_vqs(vb); if (err) @@ -531,13 +551,33 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY; err = register_oom_notifier(&vb->nb); if (err < 0) - goto out_oom_notify; + goto out_del_vqs; + +#ifdef CONFIG_BALLOON_COMPACTION + balloon_mnt = kern_mount(&balloon_fs); + if (IS_ERR(balloon_mnt)) { + err = PTR_ERR(balloon_mnt); + unregister_oom_notifier(&vb->nb); + goto out_del_vqs; + } + + vb->vb_dev_info.migratepage = virtballoon_migratepage; + vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); + if (IS_ERR(vb->vb_dev_info.inode)) { + err = PTR_ERR(vb->vb_dev_info.inode); + kern_unmount(balloon_mnt); + unregister_oom_notifier(&vb->nb); + vb->vb_dev_info.inode = NULL; + goto out_del_vqs; + } + vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; +#endif virtio_device_ready(vdev); return 0; -out_oom_notify: +out_del_vqs: vdev->config->del_vqs(vdev); out_free_vb: kfree(vb); @@ -571,6 +611,8 @@ static void virtballoon_remove(struct virtio_device *vdev) cancel_work_sync(&vb->update_balloon_stats_work); remove_common(vb); + if (vb->vb_dev_info.inode) + iput(vb->vb_dev_info.inode); kfree(vb); } diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 9b0a15d06a4f..504bd724e6ab 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -45,9 +45,11 @@ #define _LINUX_BALLOON_COMPACTION_H #include #include -#include +#include +#include #include #include +#include /* * Balloon device information descriptor. @@ -62,6 +64,7 @@ struct balloon_dev_info { struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); + struct inode *inode; }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); @@ -73,44 +76,18 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; + balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION -extern bool balloon_page_isolate(struct page *page); +extern const struct address_space_operations balloon_aops; +extern bool balloon_page_isolate(struct page *page, + isolate_mode_t mode); extern void balloon_page_putback(struct page *page); -extern int balloon_page_migrate(struct page *newpage, +extern int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, struct page *page, enum migrate_mode mode); -/* - * __is_movable_balloon_page - helper to perform @page PageBalloon tests - */ -static inline bool __is_movable_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - -/* - * balloon_page_movable - test PageBalloon to identify balloon pages - * and PagePrivate to check that the page is not - * isolated and can be moved by compaction/migration. - * - * As we might return false positives in the case of a balloon page being just - * released under us, this need to be re-tested later, under the page lock. - */ -static inline bool balloon_page_movable(struct page *page) -{ - return PageBalloon(page) && PagePrivate(page); -} - -/* - * isolated_balloon_page - identify an isolated balloon page on private - * compaction/migration page lists. - */ -static inline bool isolated_balloon_page(struct page *page) -{ - return PageBalloon(page); -} - /* * balloon_page_insert - insert a page into the balloon's page list and make * the page->private assignment accordingly. @@ -124,7 +101,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageBalloon(page); - SetPagePrivate(page); + __SetPageMovable(page, balloon->inode->i_mapping); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } @@ -140,11 +117,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, static inline void balloon_page_delete(struct page *page) { __ClearPageBalloon(page); + __ClearPageMovable(page); set_page_private(page, 0); - if (PagePrivate(page)) { - ClearPagePrivate(page); + /* + * No touch page.lru field once @page has been isolated + * because VM is using the field. + */ + if (!PageIsolated(page)) list_del(&page->lru); - } } /* diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 546b38886e11..d829ce63529d 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -80,5 +80,6 @@ #define BPF_FS_MAGIC 0xcafe4a11 /* Since UDF 2.01 is ISO 13346 based... */ #define UDF_SUPER_MAGIC 0x15013346 +#define BALLOON_KVM_MAGIC 0x13661366 #endif /* __LINUX_MAGIC_H__ */ diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 57b3e9bd6bc5..da91df50ba31 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -70,7 +70,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) */ if (trylock_page(page)) { #ifdef CONFIG_BALLOON_COMPACTION - if (!PagePrivate(page)) { + if (PageIsolated(page)) { /* raced with isolation */ unlock_page(page); continue; @@ -106,110 +106,50 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION -static inline void __isolate_balloon_page(struct page *page) +bool balloon_page_isolate(struct page *page, isolate_mode_t mode) + { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); - ClearPagePrivate(page); list_del(&page->lru); b_dev_info->isolated_pages++; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); + + return true; } -static inline void __putback_balloon_page(struct page *page) +void balloon_page_putback(struct page *page) { struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); - SetPagePrivate(page); list_add(&page->lru, &b_dev_info->pages); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } -/* __isolate_lru_page() counterpart for a ballooned page */ -bool balloon_page_isolate(struct page *page) -{ - /* - * Avoid burning cycles with pages that are yet under __free_pages(), - * or just got freed under us. - * - * In case we 'win' a race for a balloon page being freed under us and - * raise its refcount preventing __free_pages() from doing its job - * the put_page() at the end of this block will take care of - * release this page, thus avoiding a nasty leakage. - */ - if (likely(get_page_unless_zero(page))) { - /* - * As balloon pages are not isolated from LRU lists, concurrent - * compaction threads can race against page migration functions - * as well as race against the balloon driver releasing a page. - * - * In order to avoid having an already isolated balloon page - * being (wrongly) re-isolated while it is under migration, - * or to avoid attempting to isolate pages being released by - * the balloon driver, lets be sure we have the page lock - * before proceeding with the balloon page isolation steps. - */ - if (likely(trylock_page(page))) { - /* - * A ballooned page, by default, has PagePrivate set. - * Prevent concurrent compaction threads from isolating - * an already isolated balloon page by clearing it. - */ - if (balloon_page_movable(page)) { - __isolate_balloon_page(page); - unlock_page(page); - return true; - } - unlock_page(page); - } - put_page(page); - } - return false; -} - -/* putback_lru_page() counterpart for a ballooned page */ -void balloon_page_putback(struct page *page) -{ - /* - * 'lock_page()' stabilizes the page and prevents races against - * concurrent isolation threads attempting to re-isolate it. - */ - lock_page(page); - - if (__is_movable_balloon_page(page)) { - __putback_balloon_page(page); - /* drop the extra ref count taken for page isolation */ - put_page(page); - } else { - WARN_ON(1); - dump_page(page, "not movable balloon page"); - } - unlock_page(page); -} /* move_to_new_page() counterpart for a ballooned page */ -int balloon_page_migrate(struct page *newpage, - struct page *page, enum migrate_mode mode) +int balloon_page_migrate(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode) { struct balloon_dev_info *balloon = balloon_page_device(page); - int rc = -EAGAIN; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); - if (WARN_ON(!__is_movable_balloon_page(page))) { - dump_page(page, "not movable balloon page"); - return rc; - } + return balloon->migratepage(balloon, newpage, page, mode); +} - if (balloon && balloon->migratepage) - rc = balloon->migratepage(balloon, newpage, page, mode); +const struct address_space_operations balloon_aops = { + .migratepage = balloon_page_migrate, + .isolate_page = balloon_page_isolate, + .putback_page = balloon_page_putback, +}; +EXPORT_SYMBOL_GPL(balloon_aops); - return rc; -} #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/mm/compaction.c b/mm/compaction.c index fe95d8d021c3..d85520647d1d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -791,13 +791,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * Skip any other type of page */ if (!PageLRU(page)) { - if (unlikely(balloon_page_movable(page))) { - if (balloon_page_isolate(page)) { - /* Successfully isolated */ - goto isolate_success; - } - } - /* * __PageMovable can return false positive so we need * to verify it under page_lock. diff --git a/mm/migrate.c b/mm/migrate.c index 8119fdc563f8..f278005f609c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -170,14 +170,12 @@ void putback_movable_pages(struct list_head *l) list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - if (unlikely(isolated_balloon_page(page))) { - balloon_page_putback(page); /* * We isolated non-lru movable page so here we can use * __PageMovable because LRU page's mapping cannot have * PAGE_MAPPING_MOVABLE. */ - } else if (unlikely(__PageMovable(page))) { + if (unlikely(__PageMovable(page))) { VM_BUG_ON_PAGE(!PageIsolated(page), page); lock_page(page); if (PageMovable(page)) @@ -992,18 +990,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage, if (unlikely(!trylock_page(newpage))) goto out_unlock; - if (unlikely(isolated_balloon_page(page))) { - /* - * A ballooned page does not need any special attention from - * physical to virtual reverse mapping procedures. - * Skip any attempt to unmap PTEs or to remap swap cache, - * in order to avoid burning cycles at rmap level, and perform - * the page migration right away (proteced by page lock). - */ - rc = balloon_page_migrate(newpage, page, mode); - goto out_unlock_both; - } - if (unlikely(!is_lru)) { rc = move_to_new_page(newpage, page, mode); goto out_unlock_both; @@ -1058,8 +1044,7 @@ out: * list in here. */ if (rc == MIGRATEPAGE_SUCCESS) { - if (unlikely(__is_movable_balloon_page(newpage) || - __PageMovable(newpage))) + if (unlikely(__PageMovable(newpage))) put_page(newpage); else putback_lru_page(newpage); diff --git a/mm/vmscan.c b/mm/vmscan.c index c4a2f4512fca..93ba33789ac6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1254,7 +1254,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, list_for_each_entry_safe(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && - !isolated_balloon_page(page)) { + !__PageMovable(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } -- cgit v1.2.3-70-g09d2 From 1fc6e27d7b8613afe6e5c1b8cdf94339a1bce640 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:11 -0700 Subject: zsmalloc: keep max_object in size_class Every zspage in a size_class has same number of max objects so we could move it to a size_class. Link: http://lkml.kernel.org/r/1464736881-24886-5-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b6d4f258cb53..79295c73dc9f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -32,8 +32,6 @@ * page->freelist: points to the first free object in zspage. * Free objects are linked together using in-place * metadata. - * page->objects: maximum number of objects we can store in this - * zspage (class->zspage_order * PAGE_SIZE / class->size) * page->lru: links together first pages of various zspages. * Basically forming list of zspages in a fullness group. * page->mapping: class index and fullness group of the zspage @@ -213,6 +211,7 @@ struct size_class { * of ZS_ALIGN. */ int size; + int objs_per_zspage; unsigned int index; struct zs_size_stat stats; @@ -631,21 +630,22 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool) * the pool (not yet implemented). This function returns fullness * status of the given page. */ -static enum fullness_group get_fullness_group(struct page *first_page) +static enum fullness_group get_fullness_group(struct size_class *class, + struct page *first_page) { - int inuse, max_objects; + int inuse, objs_per_zspage; enum fullness_group fg; VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); inuse = first_page->inuse; - max_objects = first_page->objects; + objs_per_zspage = class->objs_per_zspage; if (inuse == 0) fg = ZS_EMPTY; - else if (inuse == max_objects) + else if (inuse == objs_per_zspage) fg = ZS_FULL; - else if (inuse <= 3 * max_objects / fullness_threshold_frac) + else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac) fg = ZS_ALMOST_EMPTY; else fg = ZS_ALMOST_FULL; @@ -732,7 +732,7 @@ static enum fullness_group fix_fullness_group(struct size_class *class, enum fullness_group currfg, newfg; get_zspage_mapping(first_page, &class_idx, &currfg); - newfg = get_fullness_group(first_page); + newfg = get_fullness_group(class, first_page); if (newfg == currfg) goto out; @@ -1012,9 +1012,6 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) init_zspage(class, first_page); first_page->freelist = location_to_obj(first_page, 0); - /* Maximum number of objects we can store in this zspage */ - first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size; - error = 0; /* Success */ cleanup: @@ -1242,11 +1239,11 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) return true; } -static bool zspage_full(struct page *first_page) +static bool zspage_full(struct size_class *class, struct page *first_page) { VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - return first_page->inuse == first_page->objects; + return first_page->inuse == class->objs_per_zspage; } unsigned long zs_get_total_pages(struct zs_pool *pool) @@ -1632,7 +1629,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, } /* Stop if there is no more space */ - if (zspage_full(d_page)) { + if (zspage_full(class, d_page)) { unpin_tag(handle); ret = -ENOMEM; break; @@ -1691,7 +1688,7 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, { enum fullness_group fullness; - fullness = get_fullness_group(first_page); + fullness = get_fullness_group(class, first_page); insert_zspage(class, fullness, first_page); set_zspage_mapping(first_page, class->index, fullness); @@ -1943,8 +1940,9 @@ struct zs_pool *zs_create_pool(const char *name) class->size = size; class->index = i; class->pages_per_zspage = pages_per_zspage; - if (pages_per_zspage == 1 && - get_maxobj_per_zspage(size, pages_per_zspage) == 1) + class->objs_per_zspage = class->pages_per_zspage * + PAGE_SIZE / class->size; + if (pages_per_zspage == 1 && class->objs_per_zspage == 1) class->huge = true; spin_lock_init(&class->lock); pool->size_class[i] = class; -- cgit v1.2.3-70-g09d2 From 1b8320b620d6caa5879380f83f3884908ceedd4a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:14 -0700 Subject: zsmalloc: use bit_spin_lock Use kernel standard bit spin-lock instead of custom mess. Even, it has a bug which doesn't disable preemption. The reason we don't have any problem is that we have used it during preemption disable section by class->lock spinlock. So no need to go to stable. Link: http://lkml.kernel.org/r/1464736881-24886-6-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 79295c73dc9f..39f29aedd5d6 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -868,21 +868,17 @@ static unsigned long obj_idx_to_offset(struct page *page, static inline int trypin_tag(unsigned long handle) { - unsigned long *ptr = (unsigned long *)handle; - - return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr); + return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void pin_tag(unsigned long handle) { - while (!trypin_tag(handle)); + bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void unpin_tag(unsigned long handle) { - unsigned long *ptr = (unsigned long *)handle; - - clear_bit_unlock(HANDLE_PIN_BIT, ptr); + bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); } static void reset_page(struct page *page) -- cgit v1.2.3-70-g09d2 From 4f42047bbde059823fe70381387257a9e3bd229c Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:17 -0700 Subject: zsmalloc: use accessor Upcoming patch will change how to encode zspage meta so for easy review, this patch wraps code to access metadata as accessor. Link: http://lkml.kernel.org/r/1464736881-24886-7-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 82 +++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 22 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 39f29aedd5d6..5da80961ff3e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -268,10 +268,14 @@ struct zs_pool { * A zspage's class index and fullness group * are encoded in its (first)page->mapping */ -#define CLASS_IDX_BITS 28 #define FULLNESS_BITS 4 -#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1) -#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1) +#define CLASS_BITS 28 + +#define FULLNESS_SHIFT 0 +#define CLASS_SHIFT (FULLNESS_SHIFT + FULLNESS_BITS) + +#define FULLNESS_MASK ((1UL << FULLNESS_BITS) - 1) +#define CLASS_MASK ((1UL << CLASS_BITS) - 1) struct mapping_area { #ifdef CONFIG_PGTABLE_MAPPING @@ -418,6 +422,41 @@ static int is_last_page(struct page *page) return PagePrivate2(page); } +static inline int get_zspage_inuse(struct page *first_page) +{ + return first_page->inuse; +} + +static inline void set_zspage_inuse(struct page *first_page, int val) +{ + first_page->inuse = val; +} + +static inline void mod_zspage_inuse(struct page *first_page, int val) +{ + first_page->inuse += val; +} + +static inline int get_first_obj_offset(struct page *page) +{ + return page->index; +} + +static inline void set_first_obj_offset(struct page *page, int offset) +{ + page->index = offset; +} + +static inline unsigned long get_freeobj(struct page *first_page) +{ + return (unsigned long)first_page->freelist; +} + +static inline void set_freeobj(struct page *first_page, unsigned long obj) +{ + first_page->freelist = (void *)obj; +} + static void get_zspage_mapping(struct page *first_page, unsigned int *class_idx, enum fullness_group *fullness) @@ -426,8 +465,8 @@ static void get_zspage_mapping(struct page *first_page, VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); m = (unsigned long)first_page->mapping; - *fullness = m & FULLNESS_MASK; - *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK; + *fullness = (m >> FULLNESS_SHIFT) & FULLNESS_MASK; + *class_idx = (m >> CLASS_SHIFT) & CLASS_MASK; } static void set_zspage_mapping(struct page *first_page, @@ -437,8 +476,7 @@ static void set_zspage_mapping(struct page *first_page, unsigned long m; VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) | - (fullness & FULLNESS_MASK); + m = (class_idx << CLASS_SHIFT) | (fullness << FULLNESS_SHIFT); first_page->mapping = (struct address_space *)m; } @@ -638,7 +676,7 @@ static enum fullness_group get_fullness_group(struct size_class *class, VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - inuse = first_page->inuse; + inuse = get_zspage_inuse(first_page); objs_per_zspage = class->objs_per_zspage; if (inuse == 0) @@ -684,7 +722,7 @@ static void insert_zspage(struct size_class *class, * empty/full. Put pages with higher ->inuse first. */ list_add_tail(&first_page->lru, &(*head)->lru); - if (first_page->inuse >= (*head)->inuse) + if (get_zspage_inuse(first_page) >= get_zspage_inuse(*head)) *head = first_page; } @@ -861,7 +899,7 @@ static unsigned long obj_idx_to_offset(struct page *page, unsigned long off = 0; if (!is_first_page(page)) - off = page->index; + off = get_first_obj_offset(page); return off + obj_idx * class_size; } @@ -896,7 +934,7 @@ static void free_zspage(struct page *first_page) struct page *nextp, *tmp, *head_extra; VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - VM_BUG_ON_PAGE(first_page->inuse, first_page); + VM_BUG_ON_PAGE(get_zspage_inuse(first_page), first_page); head_extra = (struct page *)page_private(first_page); @@ -937,7 +975,7 @@ static void init_zspage(struct size_class *class, struct page *first_page) * head of corresponding zspage's freelist. */ if (page != first_page) - page->index = off; + set_first_obj_offset(page, off); vaddr = kmap_atomic(page); link = (struct link_free *)vaddr + off / sizeof(*link); @@ -992,7 +1030,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) SetPagePrivate(page); set_page_private(page, 0); first_page = page; - first_page->inuse = 0; + set_zspage_inuse(first_page, 0); } if (i == 1) set_page_private(first_page, (unsigned long)page); @@ -1007,7 +1045,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) init_zspage(class, first_page); - first_page->freelist = location_to_obj(first_page, 0); + set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0)); error = 0; /* Success */ cleanup: @@ -1239,7 +1277,7 @@ static bool zspage_full(struct size_class *class, struct page *first_page) { VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - return first_page->inuse == class->objs_per_zspage; + return get_zspage_inuse(first_page) == class->objs_per_zspage; } unsigned long zs_get_total_pages(struct zs_pool *pool) @@ -1358,13 +1396,13 @@ static unsigned long obj_malloc(struct size_class *class, void *vaddr; handle |= OBJ_ALLOCATED_TAG; - obj = (unsigned long)first_page->freelist; + obj = get_freeobj(first_page); obj_to_location(obj, &m_page, &m_objidx); m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - first_page->freelist = link->next; + set_freeobj(first_page, (unsigned long)link->next); if (!class->huge) /* record handle in the header of allocated chunk */ link->handle = handle; @@ -1372,7 +1410,7 @@ static unsigned long obj_malloc(struct size_class *class, /* record handle in first_page->private */ set_page_private(first_page, handle); kunmap_atomic(vaddr); - first_page->inuse++; + mod_zspage_inuse(first_page, 1); zs_stat_inc(class, OBJ_USED, 1); return obj; @@ -1452,12 +1490,12 @@ static void obj_free(struct size_class *class, unsigned long obj) /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = first_page->freelist; + link->next = (void *)get_freeobj(first_page); if (class->huge) set_page_private(first_page, 0); kunmap_atomic(vaddr); - first_page->freelist = (void *)obj; - first_page->inuse--; + set_freeobj(first_page, obj); + mod_zspage_inuse(first_page, -1); zs_stat_dec(class, OBJ_USED, 1); } @@ -1573,7 +1611,7 @@ static unsigned long find_alloced_obj(struct size_class *class, void *addr = kmap_atomic(page); if (!is_first_page(page)) - offset = page->index; + offset = get_first_obj_offset(page); offset += class->size * index; while (offset < PAGE_SIZE) { -- cgit v1.2.3-70-g09d2 From bdb0af7ca8f0e9f4c03a9169a744b22890641b64 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:20 -0700 Subject: zsmalloc: factor page chain functionality out For page migration, we need to create page chain of zspage dynamically so this patch factors it out from alloc_zspage. Link: http://lkml.kernel.org/r/1464736881-24886-8-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 59 +++++++++++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 5da80961ff3e..07485a2e5b96 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -960,7 +960,8 @@ static void init_zspage(struct size_class *class, struct page *first_page) unsigned long off = 0; struct page *page = first_page; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); + first_page->freelist = NULL; + set_zspage_inuse(first_page, 0); while (page) { struct page *next_page; @@ -996,15 +997,16 @@ static void init_zspage(struct size_class *class, struct page *first_page) page = next_page; off %= PAGE_SIZE; } + + set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0)); } -/* - * Allocate a zspage for the given size class - */ -static struct page *alloc_zspage(struct size_class *class, gfp_t flags) +static void create_page_chain(struct page *pages[], int nr_pages) { - int i, error; - struct page *first_page = NULL, *uninitialized_var(prev_page); + int i; + struct page *page; + struct page *prev_page = NULL; + struct page *first_page = NULL; /* * Allocate individual pages and link them together as: @@ -1017,20 +1019,14 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) * (i.e. no other sub-page has this flag set) and PG_private_2 to * identify the last page. */ - error = -ENOMEM; - for (i = 0; i < class->pages_per_zspage; i++) { - struct page *page; - - page = alloc_page(flags); - if (!page) - goto cleanup; + for (i = 0; i < nr_pages; i++) { + page = pages[i]; INIT_LIST_HEAD(&page->lru); - if (i == 0) { /* first page */ + if (i == 0) { SetPagePrivate(page); set_page_private(page, 0); first_page = page; - set_zspage_inuse(first_page, 0); } if (i == 1) set_page_private(first_page, (unsigned long)page); @@ -1038,22 +1034,37 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags) set_page_private(page, (unsigned long)first_page); if (i >= 2) list_add(&page->lru, &prev_page->lru); - if (i == class->pages_per_zspage - 1) /* last page */ + if (i == nr_pages - 1) SetPagePrivate2(page); prev_page = page; } +} - init_zspage(class, first_page); +/* + * Allocate a zspage for the given size class + */ +static struct page *alloc_zspage(struct size_class *class, gfp_t flags) +{ + int i; + struct page *first_page = NULL; + struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; - set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0)); - error = 0; /* Success */ + for (i = 0; i < class->pages_per_zspage; i++) { + struct page *page; -cleanup: - if (unlikely(error) && first_page) { - free_zspage(first_page); - first_page = NULL; + page = alloc_page(flags); + if (!page) { + while (--i >= 0) + __free_page(pages[i]); + return NULL; + } + pages[i] = page; } + create_page_chain(pages, class->pages_per_zspage); + first_page = pages[0]; + init_zspage(class, first_page); + return first_page; } -- cgit v1.2.3-70-g09d2 From 3783689a1aa82ef27a6418b043dd7a077b8330c5 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:23 -0700 Subject: zsmalloc: introduce zspage structure We have squeezed meta data of zspage into first page's descriptor. So, to get meta data from subpage, we should get first page first of all. But it makes trouble to implment page migration feature of zsmalloc because any place where to get first page from subpage can be raced with first page migration. IOW, first page it got could be stale. For preventing it, I have tried several approahces but it made code complicated so finally, I concluded to separate metadata from first page. Of course, it consumes more memory. IOW, 16bytes per zspage on 32bit at the moment. It means we lost 1% at *worst case*(40B/4096B) which is not bad I think at the cost of maintenance. Link: http://lkml.kernel.org/r/1464736881-24886-9-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Cc: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 1 - mm/zsmalloc.c | 531 ++++++++++++++++++++++++++------------------------------ 2 files changed, 242 insertions(+), 290 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index d85520647d1d..6095055bd70f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 07485a2e5b96..c6d2cbe0f19f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -16,26 +16,11 @@ * struct page(s) to form a zspage. * * Usage of struct page fields: - * page->private: points to the first component (0-order) page - * page->index (union with page->freelist): offset of the first object - * starting in this page. For the first page, this is - * always 0, so we use this field (aka freelist) to point - * to the first free object in zspage. - * page->lru: links together all component pages (except the first page) - * of a zspage - * - * For _first_ page only: - * - * page->private: refers to the component page after the first page - * If the page is first_page for huge object, it stores handle. - * Look at size_class->huge. - * page->freelist: points to the first free object in zspage. - * Free objects are linked together using in-place - * metadata. - * page->lru: links together first pages of various zspages. - * Basically forming list of zspages in a fullness group. - * page->mapping: class index and fullness group of the zspage - * page->inuse: the number of objects that are used in this zspage + * page->private: points to zspage + * page->index: offset of the first object starting in this page. + * For the first page, this is always 0, so we use this field + * to store handle for huge object. + * page->next: links together all component pages of a zspage * * Usage of struct page flags: * PG_private: identifies the first component page @@ -147,7 +132,7 @@ * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN * (reason above) */ -#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8) +#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS) /* * We do not maintain any list for completely empty or full pages @@ -155,8 +140,6 @@ enum fullness_group { ZS_ALMOST_FULL, ZS_ALMOST_EMPTY, - _ZS_NR_FULLNESS_GROUPS, - ZS_EMPTY, ZS_FULL }; @@ -205,7 +188,7 @@ static const int fullness_threshold_frac = 4; struct size_class { spinlock_t lock; - struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS]; + struct list_head fullness_list[2]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. @@ -224,7 +207,7 @@ struct size_class { /* * Placed within free objects to form a singly linked list. - * For every zspage, first_page->freelist gives head of this list. + * For every zspage, zspage->freeobj gives head of this list. * * This must be power of 2 and less than or equal to ZS_ALIGN */ @@ -247,6 +230,7 @@ struct zs_pool { struct size_class **size_class; struct kmem_cache *handle_cachep; + struct kmem_cache *zspage_cachep; atomic_long_t pages_allocated; @@ -268,14 +252,19 @@ struct zs_pool { * A zspage's class index and fullness group * are encoded in its (first)page->mapping */ -#define FULLNESS_BITS 4 -#define CLASS_BITS 28 +#define FULLNESS_BITS 2 +#define CLASS_BITS 8 -#define FULLNESS_SHIFT 0 -#define CLASS_SHIFT (FULLNESS_SHIFT + FULLNESS_BITS) - -#define FULLNESS_MASK ((1UL << FULLNESS_BITS) - 1) -#define CLASS_MASK ((1UL << CLASS_BITS) - 1) +struct zspage { + struct { + unsigned int fullness:FULLNESS_BITS; + unsigned int class:CLASS_BITS; + }; + unsigned int inuse; + void *freeobj; + struct page *first_page; + struct list_head list; /* fullness list */ +}; struct mapping_area { #ifdef CONFIG_PGTABLE_MAPPING @@ -287,29 +276,51 @@ struct mapping_area { enum zs_mapmode vm_mm; /* mapping mode */ }; -static int create_handle_cache(struct zs_pool *pool) +static int create_cache(struct zs_pool *pool) { pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 0, 0, NULL); - return pool->handle_cachep ? 0 : 1; + if (!pool->handle_cachep) + return 1; + + pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), + 0, 0, NULL); + if (!pool->zspage_cachep) { + kmem_cache_destroy(pool->handle_cachep); + pool->handle_cachep = NULL; + return 1; + } + + return 0; } -static void destroy_handle_cache(struct zs_pool *pool) +static void destroy_cache(struct zs_pool *pool) { kmem_cache_destroy(pool->handle_cachep); + kmem_cache_destroy(pool->zspage_cachep); } -static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp) +static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { return (unsigned long)kmem_cache_alloc(pool->handle_cachep, gfp & ~__GFP_HIGHMEM); } -static void free_handle(struct zs_pool *pool, unsigned long handle) +static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); } +static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) +{ + return kmem_cache_alloc(pool->zspage_cachep, flags & ~__GFP_HIGHMEM); +}; + +static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) +{ + kmem_cache_free(pool->zspage_cachep, zspage); +} + static void record_obj(unsigned long handle, unsigned long obj) { /* @@ -417,67 +428,61 @@ static int is_first_page(struct page *page) return PagePrivate(page); } -static int is_last_page(struct page *page) -{ - return PagePrivate2(page); -} - -static inline int get_zspage_inuse(struct page *first_page) +static inline int get_zspage_inuse(struct zspage *zspage) { - return first_page->inuse; + return zspage->inuse; } -static inline void set_zspage_inuse(struct page *first_page, int val) +static inline void set_zspage_inuse(struct zspage *zspage, int val) { - first_page->inuse = val; + zspage->inuse = val; } -static inline void mod_zspage_inuse(struct page *first_page, int val) +static inline void mod_zspage_inuse(struct zspage *zspage, int val) { - first_page->inuse += val; + zspage->inuse += val; } static inline int get_first_obj_offset(struct page *page) { + if (is_first_page(page)) + return 0; + return page->index; } static inline void set_first_obj_offset(struct page *page, int offset) { + if (is_first_page(page)) + return; + page->index = offset; } -static inline unsigned long get_freeobj(struct page *first_page) +static inline unsigned long get_freeobj(struct zspage *zspage) { - return (unsigned long)first_page->freelist; + return (unsigned long)zspage->freeobj; } -static inline void set_freeobj(struct page *first_page, unsigned long obj) +static inline void set_freeobj(struct zspage *zspage, unsigned long obj) { - first_page->freelist = (void *)obj; + zspage->freeobj = (void *)obj; } -static void get_zspage_mapping(struct page *first_page, +static void get_zspage_mapping(struct zspage *zspage, unsigned int *class_idx, enum fullness_group *fullness) { - unsigned long m; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - - m = (unsigned long)first_page->mapping; - *fullness = (m >> FULLNESS_SHIFT) & FULLNESS_MASK; - *class_idx = (m >> CLASS_SHIFT) & CLASS_MASK; + *fullness = zspage->fullness; + *class_idx = zspage->class; } -static void set_zspage_mapping(struct page *first_page, +static void set_zspage_mapping(struct zspage *zspage, unsigned int class_idx, enum fullness_group fullness) { - unsigned long m; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - - m = (class_idx << CLASS_SHIFT) | (fullness << FULLNESS_SHIFT); - first_page->mapping = (struct address_space *)m; + zspage->class = class_idx; + zspage->fullness = fullness; } /* @@ -669,14 +674,12 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool) * status of the given page. */ static enum fullness_group get_fullness_group(struct size_class *class, - struct page *first_page) + struct zspage *zspage) { int inuse, objs_per_zspage; enum fullness_group fg; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - - inuse = get_zspage_inuse(first_page); + inuse = get_zspage_inuse(zspage); objs_per_zspage = class->objs_per_zspage; if (inuse == 0) @@ -698,32 +701,31 @@ static enum fullness_group get_fullness_group(struct size_class *class, * identified by . */ static void insert_zspage(struct size_class *class, - enum fullness_group fullness, - struct page *first_page) + struct zspage *zspage, + enum fullness_group fullness) { - struct page **head; - - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); + struct zspage *head; - if (fullness >= _ZS_NR_FULLNESS_GROUPS) + if (fullness >= ZS_EMPTY) return; + head = list_first_entry_or_null(&class->fullness_list[fullness], + struct zspage, list); + zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); - head = &class->fullness_list[fullness]; - if (!*head) { - *head = first_page; - return; - } - /* - * We want to see more ZS_FULL pages and less almost - * empty/full. Put pages with higher ->inuse first. + * We want to see more ZS_FULL pages and less almost empty/full. + * Put pages with higher ->inuse first. */ - list_add_tail(&first_page->lru, &(*head)->lru); - if (get_zspage_inuse(first_page) >= get_zspage_inuse(*head)) - *head = first_page; + if (head) { + if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { + list_add(&zspage->list, &head->list); + return; + } + } + list_add(&zspage->list, &class->fullness_list[fullness]); } /* @@ -731,25 +733,15 @@ static void insert_zspage(struct size_class *class, * by . */ static void remove_zspage(struct size_class *class, - enum fullness_group fullness, - struct page *first_page) + struct zspage *zspage, + enum fullness_group fullness) { - struct page **head; - - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - - if (fullness >= _ZS_NR_FULLNESS_GROUPS) + if (fullness >= ZS_EMPTY) return; - head = &class->fullness_list[fullness]; - VM_BUG_ON_PAGE(!*head, first_page); - if (list_empty(&(*head)->lru)) - *head = NULL; - else if (*head == first_page) - *head = (struct page *)list_entry((*head)->lru.next, - struct page, lru); + VM_BUG_ON(list_empty(&class->fullness_list[fullness])); - list_del_init(&first_page->lru); + list_del_init(&zspage->list); zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); } @@ -764,19 +756,19 @@ static void remove_zspage(struct size_class *class, * fullness group. */ static enum fullness_group fix_fullness_group(struct size_class *class, - struct page *first_page) + struct zspage *zspage) { int class_idx; enum fullness_group currfg, newfg; - get_zspage_mapping(first_page, &class_idx, &currfg); - newfg = get_fullness_group(class, first_page); + get_zspage_mapping(zspage, &class_idx, &currfg); + newfg = get_fullness_group(class, zspage); if (newfg == currfg) goto out; - remove_zspage(class, currfg, first_page); - insert_zspage(class, newfg, first_page); - set_zspage_mapping(first_page, class_idx, newfg); + remove_zspage(class, zspage, currfg); + insert_zspage(class, zspage, newfg); + set_zspage_mapping(zspage, class_idx, newfg); out: return newfg; @@ -818,31 +810,15 @@ static int get_pages_per_zspage(int class_size) return max_usedpc_order; } -/* - * A single 'zspage' is composed of many system pages which are - * linked together using fields in struct page. This function finds - * the first/head page, given any component page of a zspage. - */ -static struct page *get_first_page(struct page *page) + +static struct zspage *get_zspage(struct page *page) { - if (is_first_page(page)) - return page; - else - return (struct page *)page_private(page); + return (struct zspage *)page->private; } static struct page *get_next_page(struct page *page) { - struct page *next; - - if (is_last_page(page)) - next = NULL; - else if (is_first_page(page)) - next = (struct page *)page_private(page); - else - next = list_entry(page->lru.next, struct page, lru); - - return next; + return page->next; } /* @@ -888,7 +864,7 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page, { if (class->huge) { VM_BUG_ON_PAGE(!is_first_page(page), page); - return page_private(page); + return page->index; } else return *(unsigned long *)obj; } @@ -896,10 +872,9 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page, static unsigned long obj_idx_to_offset(struct page *page, unsigned long obj_idx, int class_size) { - unsigned long off = 0; + unsigned long off; - if (!is_first_page(page)) - off = get_first_obj_offset(page); + off = get_first_obj_offset(page); return off + obj_idx * class_size; } @@ -924,44 +899,31 @@ static void reset_page(struct page *page) clear_bit(PG_private, &page->flags); clear_bit(PG_private_2, &page->flags); set_page_private(page, 0); - page->mapping = NULL; - page->freelist = NULL; - page_mapcount_reset(page); + page->index = 0; } -static void free_zspage(struct page *first_page) +static void free_zspage(struct zs_pool *pool, struct zspage *zspage) { - struct page *nextp, *tmp, *head_extra; + struct page *page, *next; - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - VM_BUG_ON_PAGE(get_zspage_inuse(first_page), first_page); + VM_BUG_ON(get_zspage_inuse(zspage)); - head_extra = (struct page *)page_private(first_page); + next = page = zspage->first_page; + do { + next = page->next; + reset_page(page); + put_page(page); + page = next; + } while (page != NULL); - reset_page(first_page); - __free_page(first_page); - - /* zspage with only 1 system page */ - if (!head_extra) - return; - - list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) { - list_del(&nextp->lru); - reset_page(nextp); - __free_page(nextp); - } - reset_page(head_extra); - __free_page(head_extra); + cache_free_zspage(pool, zspage); } /* Initialize a newly allocated zspage */ -static void init_zspage(struct size_class *class, struct page *first_page) +static void init_zspage(struct size_class *class, struct zspage *zspage) { unsigned long off = 0; - struct page *page = first_page; - - first_page->freelist = NULL; - set_zspage_inuse(first_page, 0); + struct page *page = zspage->first_page; while (page) { struct page *next_page; @@ -969,14 +931,7 @@ static void init_zspage(struct size_class *class, struct page *first_page) unsigned int i = 1; void *vaddr; - /* - * page->index stores offset of first object starting - * in the page. For the first page, this is always 0, - * so we use first_page->index (aka ->freelist) to store - * head of corresponding zspage's freelist. - */ - if (page != first_page) - set_first_obj_offset(page, off); + set_first_obj_offset(page, off); vaddr = kmap_atomic(page); link = (struct link_free *)vaddr + off / sizeof(*link); @@ -998,44 +953,38 @@ static void init_zspage(struct size_class *class, struct page *first_page) off %= PAGE_SIZE; } - set_freeobj(first_page, (unsigned long)location_to_obj(first_page, 0)); + set_freeobj(zspage, + (unsigned long)location_to_obj(zspage->first_page, 0)); } -static void create_page_chain(struct page *pages[], int nr_pages) +static void create_page_chain(struct zspage *zspage, struct page *pages[], + int nr_pages) { int i; struct page *page; struct page *prev_page = NULL; - struct page *first_page = NULL; /* * Allocate individual pages and link them together as: - * 1. first page->private = first sub-page - * 2. all sub-pages are linked together using page->lru - * 3. each sub-page is linked to the first page using page->private + * 1. all pages are linked together using page->next + * 2. each sub-page point to zspage using page->private * - * For each size class, First/Head pages are linked together using - * page->lru. Also, we set PG_private to identify the first page - * (i.e. no other sub-page has this flag set) and PG_private_2 to - * identify the last page. + * we set PG_private to identify the first page (i.e. no other sub-page + * has this flag set) and PG_private_2 to identify the last page. */ for (i = 0; i < nr_pages; i++) { page = pages[i]; - - INIT_LIST_HEAD(&page->lru); + set_page_private(page, (unsigned long)zspage); if (i == 0) { + zspage->first_page = page; SetPagePrivate(page); - set_page_private(page, 0); - first_page = page; + } else { + prev_page->next = page; } - if (i == 1) - set_page_private(first_page, (unsigned long)page); - if (i >= 1) - set_page_private(page, (unsigned long)first_page); - if (i >= 2) - list_add(&page->lru, &prev_page->lru); - if (i == nr_pages - 1) + if (i == nr_pages - 1) { SetPagePrivate2(page); + page->next = NULL; + } prev_page = page; } } @@ -1043,43 +992,51 @@ static void create_page_chain(struct page *pages[], int nr_pages) /* * Allocate a zspage for the given size class */ -static struct page *alloc_zspage(struct size_class *class, gfp_t flags) +static struct zspage *alloc_zspage(struct zs_pool *pool, + struct size_class *class, + gfp_t gfp) { int i; - struct page *first_page = NULL; struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; + struct zspage *zspage = cache_alloc_zspage(pool, gfp); + + if (!zspage) + return NULL; + + memset(zspage, 0, sizeof(struct zspage)); for (i = 0; i < class->pages_per_zspage; i++) { struct page *page; - page = alloc_page(flags); + page = alloc_page(gfp); if (!page) { while (--i >= 0) __free_page(pages[i]); + cache_free_zspage(pool, zspage); return NULL; } pages[i] = page; } - create_page_chain(pages, class->pages_per_zspage); - first_page = pages[0]; - init_zspage(class, first_page); + create_page_chain(zspage, pages, class->pages_per_zspage); + init_zspage(class, zspage); - return first_page; + return zspage; } -static struct page *find_get_zspage(struct size_class *class) +static struct zspage *find_get_zspage(struct size_class *class) { int i; - struct page *page; + struct zspage *zspage; - for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { - page = class->fullness_list[i]; - if (page) + for (i = ZS_ALMOST_FULL; i <= ZS_ALMOST_EMPTY; i++) { + zspage = list_first_entry_or_null(&class->fullness_list[i], + struct zspage, list); + if (zspage) break; } - return page; + return zspage; } #ifdef CONFIG_PGTABLE_MAPPING @@ -1284,11 +1241,9 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) return true; } -static bool zspage_full(struct size_class *class, struct page *first_page) +static bool zspage_full(struct size_class *class, struct zspage *zspage) { - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - - return get_zspage_inuse(first_page) == class->objs_per_zspage; + return get_zspage_inuse(zspage) == class->objs_per_zspage; } unsigned long zs_get_total_pages(struct zs_pool *pool) @@ -1314,6 +1269,7 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages); void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm) { + struct zspage *zspage; struct page *page; unsigned long obj, obj_idx, off; @@ -1336,7 +1292,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); - get_zspage_mapping(get_first_page(page), &class_idx, &fg); + zspage = get_zspage(page); + get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; off = obj_idx_to_offset(page, obj_idx, class->size); @@ -1365,6 +1322,7 @@ EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { + struct zspage *zspage; struct page *page; unsigned long obj, obj_idx, off; @@ -1375,7 +1333,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); - get_zspage_mapping(get_first_page(page), &class_idx, &fg); + zspage = get_zspage(page); + get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; off = obj_idx_to_offset(page, obj_idx, class->size); @@ -1397,7 +1356,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) EXPORT_SYMBOL_GPL(zs_unmap_object); static unsigned long obj_malloc(struct size_class *class, - struct page *first_page, unsigned long handle) + struct zspage *zspage, unsigned long handle) { unsigned long obj; struct link_free *link; @@ -1407,21 +1366,22 @@ static unsigned long obj_malloc(struct size_class *class, void *vaddr; handle |= OBJ_ALLOCATED_TAG; - obj = get_freeobj(first_page); + obj = get_freeobj(zspage); obj_to_location(obj, &m_page, &m_objidx); m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - set_freeobj(first_page, (unsigned long)link->next); + set_freeobj(zspage, (unsigned long)link->next); if (!class->huge) /* record handle in the header of allocated chunk */ link->handle = handle; else - /* record handle in first_page->private */ - set_page_private(first_page, handle); + /* record handle to page->index */ + zspage->first_page->index = handle; + kunmap_atomic(vaddr); - mod_zspage_inuse(first_page, 1); + mod_zspage_inuse(zspage, 1); zs_stat_inc(class, OBJ_USED, 1); return obj; @@ -1441,12 +1401,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) { unsigned long handle, obj; struct size_class *class; - struct page *first_page; + struct zspage *zspage; if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) return 0; - handle = alloc_handle(pool, gfp); + handle = cache_alloc_handle(pool, gfp); if (!handle) return 0; @@ -1455,17 +1415,17 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) class = pool->size_class[get_size_class_index(size)]; spin_lock(&class->lock); - first_page = find_get_zspage(class); + zspage = find_get_zspage(class); - if (!first_page) { + if (!zspage) { spin_unlock(&class->lock); - first_page = alloc_zspage(class, gfp); - if (unlikely(!first_page)) { - free_handle(pool, handle); + zspage = alloc_zspage(pool, class, gfp); + if (unlikely(!zspage)) { + cache_free_handle(pool, handle); return 0; } - set_zspage_mapping(first_page, class->index, ZS_EMPTY); + set_zspage_mapping(zspage, class->index, ZS_EMPTY); atomic_long_add(class->pages_per_zspage, &pool->pages_allocated); @@ -1474,9 +1434,9 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) class->size, class->pages_per_zspage)); } - obj = obj_malloc(class, first_page, handle); + obj = obj_malloc(class, zspage, handle); /* Now move the zspage to another fullness group, if required */ - fix_fullness_group(class, first_page); + fix_fullness_group(class, zspage); record_obj(handle, obj); spin_unlock(&class->lock); @@ -1487,13 +1447,14 @@ EXPORT_SYMBOL_GPL(zs_malloc); static void obj_free(struct size_class *class, unsigned long obj) { struct link_free *link; - struct page *first_page, *f_page; + struct zspage *zspage; + struct page *f_page; unsigned long f_objidx, f_offset; void *vaddr; obj &= ~OBJ_ALLOCATED_TAG; obj_to_location(obj, &f_page, &f_objidx); - first_page = get_first_page(f_page); + zspage = get_zspage(f_page); f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); @@ -1501,18 +1462,17 @@ static void obj_free(struct size_class *class, unsigned long obj) /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = (void *)get_freeobj(first_page); - if (class->huge) - set_page_private(first_page, 0); + link->next = (void *)get_freeobj(zspage); kunmap_atomic(vaddr); - set_freeobj(first_page, obj); - mod_zspage_inuse(first_page, -1); + set_freeobj(zspage, obj); + mod_zspage_inuse(zspage, -1); zs_stat_dec(class, OBJ_USED, 1); } void zs_free(struct zs_pool *pool, unsigned long handle) { - struct page *first_page, *f_page; + struct zspage *zspage; + struct page *f_page; unsigned long obj, f_objidx; int class_idx; struct size_class *class; @@ -1524,25 +1484,25 @@ void zs_free(struct zs_pool *pool, unsigned long handle) pin_tag(handle); obj = handle_to_obj(handle); obj_to_location(obj, &f_page, &f_objidx); - first_page = get_first_page(f_page); + zspage = get_zspage(f_page); - get_zspage_mapping(first_page, &class_idx, &fullness); + get_zspage_mapping(zspage, &class_idx, &fullness); class = pool->size_class[class_idx]; spin_lock(&class->lock); obj_free(class, obj); - fullness = fix_fullness_group(class, first_page); + fullness = fix_fullness_group(class, zspage); if (fullness == ZS_EMPTY) { zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( class->size, class->pages_per_zspage)); atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); - free_zspage(first_page); + free_zspage(pool, zspage); } spin_unlock(&class->lock); unpin_tag(handle); - free_handle(pool, handle); + cache_free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); @@ -1621,8 +1581,7 @@ static unsigned long find_alloced_obj(struct size_class *class, unsigned long handle = 0; void *addr = kmap_atomic(page); - if (!is_first_page(page)) - offset = get_first_obj_offset(page); + offset = get_first_obj_offset(page); offset += class->size * index; while (offset < PAGE_SIZE) { @@ -1643,7 +1602,7 @@ static unsigned long find_alloced_obj(struct size_class *class, } struct zs_compact_control { - /* Source page for migration which could be a subpage of zspage. */ + /* Source spage for migration which could be a subpage of zspage */ struct page *s_page; /* Destination page for migration which should be a first page * of zspage. */ @@ -1674,14 +1633,14 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, } /* Stop if there is no more space */ - if (zspage_full(class, d_page)) { + if (zspage_full(class, get_zspage(d_page))) { unpin_tag(handle); ret = -ENOMEM; break; } used_obj = handle_to_obj(handle); - free_obj = obj_malloc(class, d_page, handle); + free_obj = obj_malloc(class, get_zspage(d_page), handle); zs_object_copy(class, free_obj, used_obj); index++; /* @@ -1703,39 +1662,46 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class, return ret; } -static struct page *isolate_target_page(struct size_class *class) +static struct zspage *isolate_zspage(struct size_class *class, bool source) { int i; - struct page *page; + struct zspage *zspage; + enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL}; - for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) { - page = class->fullness_list[i]; - if (page) { - remove_zspage(class, i, page); - break; + if (!source) { + fg[0] = ZS_ALMOST_FULL; + fg[1] = ZS_ALMOST_EMPTY; + } + + for (i = 0; i < 2; i++) { + zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], + struct zspage, list); + if (zspage) { + remove_zspage(class, zspage, fg[i]); + return zspage; } } - return page; + return zspage; } /* - * putback_zspage - add @first_page into right class's fullness list + * putback_zspage - add @zspage into right class's fullness list * @pool: target pool * @class: destination class - * @first_page: target page + * @zspage: target page * - * Return @fist_page's fullness_group + * Return @zspage's fullness_group */ static enum fullness_group putback_zspage(struct zs_pool *pool, struct size_class *class, - struct page *first_page) + struct zspage *zspage) { enum fullness_group fullness; - fullness = get_fullness_group(class, first_page); - insert_zspage(class, fullness, first_page); - set_zspage_mapping(first_page, class->index, fullness); + fullness = get_fullness_group(class, zspage); + insert_zspage(class, zspage, fullness); + set_zspage_mapping(zspage, class->index, fullness); if (fullness == ZS_EMPTY) { zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( @@ -1743,29 +1709,12 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated); - free_zspage(first_page); + free_zspage(pool, zspage); } return fullness; } -static struct page *isolate_source_page(struct size_class *class) -{ - int i; - struct page *page = NULL; - - for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) { - page = class->fullness_list[i]; - if (!page) - continue; - - remove_zspage(class, i, page); - break; - } - - return page; -} - /* * * Based on the number of unused allocated objects calculate @@ -1790,20 +1739,20 @@ static unsigned long zs_can_compact(struct size_class *class) static void __zs_compact(struct zs_pool *pool, struct size_class *class) { struct zs_compact_control cc; - struct page *src_page; - struct page *dst_page = NULL; + struct zspage *src_zspage; + struct zspage *dst_zspage = NULL; spin_lock(&class->lock); - while ((src_page = isolate_source_page(class))) { + while ((src_zspage = isolate_zspage(class, true))) { if (!zs_can_compact(class)) break; cc.index = 0; - cc.s_page = src_page; + cc.s_page = src_zspage->first_page; - while ((dst_page = isolate_target_page(class))) { - cc.d_page = dst_page; + while ((dst_zspage = isolate_zspage(class, false))) { + cc.d_page = dst_zspage->first_page; /* * If there is no more space in dst_page, resched * and see if anyone had allocated another zspage. @@ -1811,23 +1760,23 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) if (!migrate_zspage(pool, class, &cc)) break; - putback_zspage(pool, class, dst_page); + putback_zspage(pool, class, dst_zspage); } /* Stop if we couldn't find slot */ - if (dst_page == NULL) + if (dst_zspage == NULL) break; - putback_zspage(pool, class, dst_page); - if (putback_zspage(pool, class, src_page) == ZS_EMPTY) + putback_zspage(pool, class, dst_zspage); + if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY) pool->stats.pages_compacted += class->pages_per_zspage; spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); } - if (src_page) - putback_zspage(pool, class, src_page); + if (src_zspage) + putback_zspage(pool, class, src_zspage); spin_unlock(&class->lock); } @@ -1945,7 +1894,7 @@ struct zs_pool *zs_create_pool(const char *name) if (!pool->name) goto err; - if (create_handle_cache(pool)) + if (create_cache(pool)) goto err; /* @@ -1956,6 +1905,7 @@ struct zs_pool *zs_create_pool(const char *name) int size; int pages_per_zspage; struct size_class *class; + int fullness = 0; size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; if (size > ZS_MAX_ALLOC_SIZE) @@ -1991,6 +1941,9 @@ struct zs_pool *zs_create_pool(const char *name) class->huge = true; spin_lock_init(&class->lock); pool->size_class[i] = class; + for (fullness = ZS_ALMOST_FULL; fullness <= ZS_ALMOST_EMPTY; + fullness++) + INIT_LIST_HEAD(&class->fullness_list[fullness]); prev_class = class; } @@ -2029,8 +1982,8 @@ void zs_destroy_pool(struct zs_pool *pool) if (class->index != i) continue; - for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { - if (class->fullness_list[fg]) { + for (fg = ZS_ALMOST_FULL; fg <= ZS_ALMOST_EMPTY; fg++) { + if (!list_empty(&class->fullness_list[fg])) { pr_info("Freeing non-empty class with size %db, fullness group %d\n", class->size, fg); } @@ -2038,7 +1991,7 @@ void zs_destroy_pool(struct zs_pool *pool) kfree(class); } - destroy_handle_cache(pool); + destroy_cache(pool); kfree(pool->size_class); kfree(pool->name); kfree(pool); -- cgit v1.2.3-70-g09d2 From 4aa409cab7c39c90f4b725ff22f52bbf5d2fc4e0 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:26 -0700 Subject: zsmalloc: separate free_zspage from putback_zspage Currently, putback_zspage does free zspage under class->lock if fullness become ZS_EMPTY but it makes trouble to implement locking scheme for new zspage migration. So, this patch is to separate free_zspage from putback_zspage and free zspage out of class->lock which is preparation for zspage migration. Link: http://lkml.kernel.org/r/1464736881-24886-10-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c6d2cbe0f19f..dd3708611f65 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1687,14 +1687,12 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source) /* * putback_zspage - add @zspage into right class's fullness list - * @pool: target pool * @class: destination class * @zspage: target page * * Return @zspage's fullness_group */ -static enum fullness_group putback_zspage(struct zs_pool *pool, - struct size_class *class, +static enum fullness_group putback_zspage(struct size_class *class, struct zspage *zspage) { enum fullness_group fullness; @@ -1703,15 +1701,6 @@ static enum fullness_group putback_zspage(struct zs_pool *pool, insert_zspage(class, zspage, fullness); set_zspage_mapping(zspage, class->index, fullness); - if (fullness == ZS_EMPTY) { - zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); - atomic_long_sub(class->pages_per_zspage, - &pool->pages_allocated); - - free_zspage(pool, zspage); - } - return fullness; } @@ -1760,23 +1749,29 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) if (!migrate_zspage(pool, class, &cc)) break; - putback_zspage(pool, class, dst_zspage); + putback_zspage(class, dst_zspage); } /* Stop if we couldn't find slot */ if (dst_zspage == NULL) break; - putback_zspage(pool, class, dst_zspage); - if (putback_zspage(pool, class, src_zspage) == ZS_EMPTY) + putback_zspage(class, dst_zspage); + if (putback_zspage(class, src_zspage) == ZS_EMPTY) { + zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( + class->size, class->pages_per_zspage)); + atomic_long_sub(class->pages_per_zspage, + &pool->pages_allocated); + free_zspage(pool, src_zspage); pool->stats.pages_compacted += class->pages_per_zspage; + } spin_unlock(&class->lock); cond_resched(); spin_lock(&class->lock); } if (src_zspage) - putback_zspage(pool, class, src_zspage); + putback_zspage(class, src_zspage); spin_unlock(&class->lock); } -- cgit v1.2.3-70-g09d2 From bfd093f5e7f09c1e41c43e7605893069975cd734 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:28 -0700 Subject: zsmalloc: use freeobj for index Zsmalloc stores first free object's position into freeobj in each zspage. If we change it with index from first_page instead of position, it makes page migration simple because we don't need to correct other entries for linked list if a page is migrated out. Link: http://lkml.kernel.org/r/1464736881-24886-11-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Cc: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 139 ++++++++++++++++++++++++++++++---------------------------- 1 file changed, 73 insertions(+), 66 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index dd3708611f65..c6fb543cfb98 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -71,9 +71,7 @@ * Object location (, ) is encoded as * as single (unsigned long) handle value. * - * Note that object index is relative to system - * page it is stored in, so for each sub-page belonging - * to a zspage, obj_idx starts with 0. + * Note that object index starts from 0. * * This is made more complicated by various memory models and PAE. */ @@ -214,10 +212,10 @@ struct size_class { struct link_free { union { /* - * Position of next free chunk (encodes ) + * Free object index; * It's valid for non-allocated object */ - void *next; + unsigned long next; /* * Handle of allocated object. */ @@ -261,7 +259,7 @@ struct zspage { unsigned int class:CLASS_BITS; }; unsigned int inuse; - void *freeobj; + unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ }; @@ -459,14 +457,14 @@ static inline void set_first_obj_offset(struct page *page, int offset) page->index = offset; } -static inline unsigned long get_freeobj(struct zspage *zspage) +static inline unsigned int get_freeobj(struct zspage *zspage) { - return (unsigned long)zspage->freeobj; + return zspage->freeobj; } -static inline void set_freeobj(struct zspage *zspage, unsigned long obj) +static inline void set_freeobj(struct zspage *zspage, unsigned int obj) { - zspage->freeobj = (void *)obj; + zspage->freeobj = obj; } static void get_zspage_mapping(struct zspage *zspage, @@ -810,6 +808,10 @@ static int get_pages_per_zspage(int class_size) return max_usedpc_order; } +static struct page *get_first_page(struct zspage *zspage) +{ + return zspage->first_page; +} static struct zspage *get_zspage(struct page *page) { @@ -821,37 +823,33 @@ static struct page *get_next_page(struct page *page) return page->next; } -/* - * Encode as a single handle value. - * We use the least bit of handle for tagging. +/** + * obj_to_location - get (, ) from encoded object value + * @page: page object resides in zspage + * @obj_idx: object index */ -static void *location_to_obj(struct page *page, unsigned long obj_idx) +static void obj_to_location(unsigned long obj, struct page **page, + unsigned int *obj_idx) { - unsigned long obj; + obj >>= OBJ_TAG_BITS; + *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *obj_idx = (obj & OBJ_INDEX_MASK); +} - if (!page) { - VM_BUG_ON(obj_idx); - return NULL; - } +/** + * location_to_obj - get obj value encoded from (, ) + * @page: page object resides in zspage + * @obj_idx: object index + */ +static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) +{ + unsigned long obj; obj = page_to_pfn(page) << OBJ_INDEX_BITS; - obj |= ((obj_idx) & OBJ_INDEX_MASK); + obj |= obj_idx & OBJ_INDEX_MASK; obj <<= OBJ_TAG_BITS; - return (void *)obj; -} - -/* - * Decode pair from the given object handle. We adjust the - * decoded obj_idx back to its original value since it was adjusted in - * location_to_obj(). - */ -static void obj_to_location(unsigned long obj, struct page **page, - unsigned long *obj_idx) -{ - obj >>= OBJ_TAG_BITS; - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); - *obj_idx = (obj & OBJ_INDEX_MASK); + return obj; } static unsigned long handle_to_obj(unsigned long handle) @@ -869,16 +867,6 @@ static unsigned long obj_to_head(struct size_class *class, struct page *page, return *(unsigned long *)obj; } -static unsigned long obj_idx_to_offset(struct page *page, - unsigned long obj_idx, int class_size) -{ - unsigned long off; - - off = get_first_obj_offset(page); - - return off + obj_idx * class_size; -} - static inline int trypin_tag(unsigned long handle) { return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); @@ -922,13 +910,13 @@ static void free_zspage(struct zs_pool *pool, struct zspage *zspage) /* Initialize a newly allocated zspage */ static void init_zspage(struct size_class *class, struct zspage *zspage) { + unsigned int freeobj = 1; unsigned long off = 0; struct page *page = zspage->first_page; while (page) { struct page *next_page; struct link_free *link; - unsigned int i = 1; void *vaddr; set_first_obj_offset(page, off); @@ -937,7 +925,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { - link->next = location_to_obj(page, i++); + link->next = freeobj++ << OBJ_ALLOCATED_TAG; link += class->size / sizeof(*link); } @@ -947,14 +935,21 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) * page (if present) */ next_page = get_next_page(page); - link->next = location_to_obj(next_page, 0); + if (next_page) { + link->next = freeobj++ << OBJ_ALLOCATED_TAG; + } else { + /* + * Reset OBJ_ALLOCATED_TAG bit to last link to tell + * whether it's allocated object or not. + */ + link->next = -1 << OBJ_ALLOCATED_TAG; + } kunmap_atomic(vaddr); page = next_page; off %= PAGE_SIZE; } - set_freeobj(zspage, - (unsigned long)location_to_obj(zspage->first_page, 0)); + set_freeobj(zspage, 0); } static void create_page_chain(struct zspage *zspage, struct page *pages[], @@ -1271,7 +1266,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, { struct zspage *zspage; struct page *page; - unsigned long obj, obj_idx, off; + unsigned long obj, off; + unsigned int obj_idx; unsigned int class_idx; enum fullness_group fg; @@ -1295,7 +1291,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, zspage = get_zspage(page); get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; - off = obj_idx_to_offset(page, obj_idx, class->size); + off = (class->size * obj_idx) & ~PAGE_MASK; area = &get_cpu_var(zs_map_area); area->vm_mm = mm; @@ -1324,7 +1320,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; struct page *page; - unsigned long obj, obj_idx, off; + unsigned long obj, off; + unsigned int obj_idx; unsigned int class_idx; enum fullness_group fg; @@ -1336,7 +1333,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) zspage = get_zspage(page); get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; - off = obj_idx_to_offset(page, obj_idx, class->size); + off = (class->size * obj_idx) & ~PAGE_MASK; area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) @@ -1358,21 +1355,28 @@ EXPORT_SYMBOL_GPL(zs_unmap_object); static unsigned long obj_malloc(struct size_class *class, struct zspage *zspage, unsigned long handle) { + int i, nr_page, offset; unsigned long obj; struct link_free *link; struct page *m_page; - unsigned long m_objidx, m_offset; + unsigned long m_offset; void *vaddr; handle |= OBJ_ALLOCATED_TAG; obj = get_freeobj(zspage); - obj_to_location(obj, &m_page, &m_objidx); - m_offset = obj_idx_to_offset(m_page, m_objidx, class->size); + + offset = obj * class->size; + nr_page = offset >> PAGE_SHIFT; + m_offset = offset & ~PAGE_MASK; + m_page = get_first_page(zspage); + + for (i = 0; i < nr_page; i++) + m_page = get_next_page(m_page); vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - set_freeobj(zspage, (unsigned long)link->next); + set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); if (!class->huge) /* record handle in the header of allocated chunk */ link->handle = handle; @@ -1384,6 +1388,8 @@ static unsigned long obj_malloc(struct size_class *class, mod_zspage_inuse(zspage, 1); zs_stat_inc(class, OBJ_USED, 1); + obj = location_to_obj(m_page, obj); + return obj; } @@ -1449,22 +1455,22 @@ static void obj_free(struct size_class *class, unsigned long obj) struct link_free *link; struct zspage *zspage; struct page *f_page; - unsigned long f_objidx, f_offset; + unsigned long f_offset; + unsigned int f_objidx; void *vaddr; obj &= ~OBJ_ALLOCATED_TAG; obj_to_location(obj, &f_page, &f_objidx); + f_offset = (class->size * f_objidx) & ~PAGE_MASK; zspage = get_zspage(f_page); - f_offset = obj_idx_to_offset(f_page, f_objidx, class->size); - vaddr = kmap_atomic(f_page); /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = (void *)get_freeobj(zspage); + link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; kunmap_atomic(vaddr); - set_freeobj(zspage, obj); + set_freeobj(zspage, f_objidx); mod_zspage_inuse(zspage, -1); zs_stat_dec(class, OBJ_USED, 1); } @@ -1473,7 +1479,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; struct page *f_page; - unsigned long obj, f_objidx; + unsigned long obj; + unsigned int f_objidx; int class_idx; struct size_class *class; enum fullness_group fullness; @@ -1510,7 +1517,7 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { struct page *s_page, *d_page; - unsigned long s_objidx, d_objidx; + unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; int s_size, d_size, size; @@ -1521,8 +1528,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); - s_off = obj_idx_to_offset(s_page, s_objidx, class->size); - d_off = obj_idx_to_offset(d_page, d_objidx, class->size); + s_off = (class->size * s_objidx) & ~PAGE_MASK; + d_off = (class->size * d_objidx) & ~PAGE_MASK; if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; -- cgit v1.2.3-70-g09d2 From 48b4800a1c6af2cdda344ea4e2c843dcc1f6afc9 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:31 -0700 Subject: zsmalloc: page migration support This patch introduces run-time migration feature for zspage. For migration, VM uses page.lru field so it would be better to not use page.next field which is unified with page.lru for own purpose. For that, firstly, we can get first object offset of the page via runtime calculation instead of using page.index so we can use page.index as link for page chaining instead of page.next. In case of huge object, it stores handle to page.index instead of next link of page chaining because huge object doesn't need to next link for page chaining. So get_next_page need to identify huge object to return NULL. For it, this patch uses PG_owner_priv_1 flag of the page flag. For migration, it supports three functions * zs_page_isolate It isolates a zspage which includes a subpage VM want to migrate from class so anyone cannot allocate new object from the zspage. We could try to isolate a zspage by the number of subpage so subsequent isolation trial of other subpage of the zpsage shouldn't fail. For that, we introduce zspage.isolated count. With that, zs_page_isolate can know whether zspage is already isolated or not for migration so if it is isolated for migration, subsequent isolation trial can be successful without trying further isolation. * zs_page_migrate First of all, it holds write-side zspage->lock to prevent migrate other subpage in zspage. Then, lock all objects in the page VM want to migrate. The reason we should lock all objects in the page is due to race between zs_map_object and zs_page_migrate. zs_map_object zs_page_migrate pin_tag(handle) obj = handle_to_obj(handle) obj_to_location(obj, &page, &obj_idx); write_lock(&zspage->lock) if (!trypin_tag(handle)) goto unpin_object zspage = get_zspage(page); read_lock(&zspage->lock); If zs_page_migrate doesn't do trypin_tag, zs_map_object's page can be stale by migration so it goes crash. If it locks all of objects successfully, it copies content from old page to new one, finally, create new zspage chain with new page. And if it's last isolated subpage in the zspage, put the zspage back to class. * zs_page_putback It returns isolated zspage to right fullness_group list if it fails to migrate a page. If it find a zspage is ZS_EMPTY, it queues zspage freeing to workqueue. See below about async zspage freeing. This patch introduces asynchronous zspage free. The reason to need it is we need page_lock to clear PG_movable but unfortunately, zs_free path should be atomic so the apporach is try to grab page_lock. If it got page_lock of all of pages successfully, it can free zspage immediately. Otherwise, it queues free request and free zspage via workqueue in process context. If zs_free finds the zspage is isolated when it try to free zspage, it delays the freeing until zs_page_putback finds it so it will free free the zspage finally. In this patch, we expand fullness_list from ZS_EMPTY to ZS_FULL. First of all, it will use ZS_EMPTY list for delay freeing. And with adding ZS_FULL list, it makes to identify whether zspage is isolated or not via list_empty(&zspage->list) test. [minchan@kernel.org: zsmalloc: keep first object offset in struct page] Link: http://lkml.kernel.org/r/1465788015-23195-1-git-send-email-minchan@kernel.org [minchan@kernel.org: zsmalloc: zspage sanity check] Link: http://lkml.kernel.org/r/20160603010129.GC3304@bbox Link: http://lkml.kernel.org/r/1464736881-24886-12-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Cc: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/magic.h | 1 + mm/zsmalloc.c | 769 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 654 insertions(+), 116 deletions(-) diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index d829ce63529d..e398beac67b8 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -81,5 +81,6 @@ /* Since UDF 2.01 is ISO 13346 based... */ #define UDF_SUPER_MAGIC 0x15013346 #define BALLOON_KVM_MAGIC 0x13661366 +#define ZSMALLOC_MAGIC 0x58295829 #endif /* __LINUX_MAGIC_H__ */ diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c6fb543cfb98..04a4f063b4fd 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -17,14 +17,14 @@ * * Usage of struct page fields: * page->private: points to zspage - * page->index: offset of the first object starting in this page. - * For the first page, this is always 0, so we use this field - * to store handle for huge object. - * page->next: links together all component pages of a zspage + * page->freelist(index): links together all component pages of a zspage + * For the huge page, this is always 0, so we use this field + * to store handle. * * Usage of struct page flags: * PG_private: identifies the first component page * PG_private2: identifies the last component page + * PG_owner_priv_1: indentifies the huge component page * */ @@ -49,6 +49,11 @@ #include #include #include +#include +#include +#include + +#define ZSPAGE_MAGIC 0x58 /* * This must be power of 2 and greater than of equal to sizeof(link_free). @@ -136,25 +141,23 @@ * We do not maintain any list for completely empty or full pages */ enum fullness_group { - ZS_ALMOST_FULL, - ZS_ALMOST_EMPTY, ZS_EMPTY, - ZS_FULL + ZS_ALMOST_EMPTY, + ZS_ALMOST_FULL, + ZS_FULL, + NR_ZS_FULLNESS, }; enum zs_stat_type { + CLASS_EMPTY, + CLASS_ALMOST_EMPTY, + CLASS_ALMOST_FULL, + CLASS_FULL, OBJ_ALLOCATED, OBJ_USED, - CLASS_ALMOST_FULL, - CLASS_ALMOST_EMPTY, + NR_ZS_STAT_TYPE, }; -#ifdef CONFIG_ZSMALLOC_STAT -#define NR_ZS_STAT_TYPE (CLASS_ALMOST_EMPTY + 1) -#else -#define NR_ZS_STAT_TYPE (OBJ_USED + 1) -#endif - struct zs_size_stat { unsigned long objs[NR_ZS_STAT_TYPE]; }; @@ -163,6 +166,10 @@ struct zs_size_stat { static struct dentry *zs_stat_root; #endif +#ifdef CONFIG_COMPACTION +static struct vfsmount *zsmalloc_mnt; +#endif + /* * number of size_classes */ @@ -186,23 +193,36 @@ static const int fullness_threshold_frac = 4; struct size_class { spinlock_t lock; - struct list_head fullness_list[2]; + struct list_head fullness_list[NR_ZS_FULLNESS]; /* * Size of objects stored in this class. Must be multiple * of ZS_ALIGN. */ int size; int objs_per_zspage; - unsigned int index; - - struct zs_size_stat stats; - /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */ int pages_per_zspage; - /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ - bool huge; + + unsigned int index; + struct zs_size_stat stats; }; +/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ +static void SetPageHugeObject(struct page *page) +{ + SetPageOwnerPriv1(page); +} + +static void ClearPageHugeObject(struct page *page) +{ + ClearPageOwnerPriv1(page); +} + +static int PageHugeObject(struct page *page) +{ + return PageOwnerPriv1(page); +} + /* * Placed within free objects to form a singly linked list. * For every zspage, zspage->freeobj gives head of this list. @@ -244,6 +264,10 @@ struct zs_pool { #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif +#ifdef CONFIG_COMPACTION + struct inode *inode; + struct work_struct free_work; +#endif }; /* @@ -252,16 +276,23 @@ struct zs_pool { */ #define FULLNESS_BITS 2 #define CLASS_BITS 8 +#define ISOLATED_BITS 3 +#define MAGIC_VAL_BITS 8 struct zspage { struct { unsigned int fullness:FULLNESS_BITS; unsigned int class:CLASS_BITS; + unsigned int isolated:ISOLATED_BITS; + unsigned int magic:MAGIC_VAL_BITS; }; unsigned int inuse; unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ +#ifdef CONFIG_COMPACTION + rwlock_t lock; +#endif }; struct mapping_area { @@ -274,6 +305,28 @@ struct mapping_area { enum zs_mapmode vm_mm; /* mapping mode */ }; +#ifdef CONFIG_COMPACTION +static int zs_register_migration(struct zs_pool *pool); +static void zs_unregister_migration(struct zs_pool *pool); +static void migrate_lock_init(struct zspage *zspage); +static void migrate_read_lock(struct zspage *zspage); +static void migrate_read_unlock(struct zspage *zspage); +static void kick_deferred_free(struct zs_pool *pool); +static void init_deferred_free(struct zs_pool *pool); +static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); +#else +static int zsmalloc_mount(void) { return 0; } +static void zsmalloc_unmount(void) {} +static int zs_register_migration(struct zs_pool *pool) { return 0; } +static void zs_unregister_migration(struct zs_pool *pool) {} +static void migrate_lock_init(struct zspage *zspage) {} +static void migrate_read_lock(struct zspage *zspage) {} +static void migrate_read_unlock(struct zspage *zspage) {} +static void kick_deferred_free(struct zs_pool *pool) {} +static void init_deferred_free(struct zs_pool *pool) {} +static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} +#endif + static int create_cache(struct zs_pool *pool) { pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, @@ -301,7 +354,7 @@ static void destroy_cache(struct zs_pool *pool) static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { return (unsigned long)kmem_cache_alloc(pool->handle_cachep, - gfp & ~__GFP_HIGHMEM); + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); } static void cache_free_handle(struct zs_pool *pool, unsigned long handle) @@ -311,7 +364,8 @@ static void cache_free_handle(struct zs_pool *pool, unsigned long handle) static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) { - return kmem_cache_alloc(pool->zspage_cachep, flags & ~__GFP_HIGHMEM); + return kmem_cache_alloc(pool->zspage_cachep, + flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); }; static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) @@ -421,11 +475,17 @@ static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); +static bool is_zspage_isolated(struct zspage *zspage) +{ + return zspage->isolated; +} + static int is_first_page(struct page *page) { return PagePrivate(page); } +/* Protected by class->lock */ static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; @@ -441,20 +501,22 @@ static inline void mod_zspage_inuse(struct zspage *zspage, int val) zspage->inuse += val; } -static inline int get_first_obj_offset(struct page *page) +static inline struct page *get_first_page(struct zspage *zspage) { - if (is_first_page(page)) - return 0; + struct page *first_page = zspage->first_page; - return page->index; + VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); + return first_page; } -static inline void set_first_obj_offset(struct page *page, int offset) +static inline int get_first_obj_offset(struct page *page) { - if (is_first_page(page)) - return; + return page->units; +} - page->index = offset; +static inline void set_first_obj_offset(struct page *page, int offset) +{ + page->units = offset; } static inline unsigned int get_freeobj(struct zspage *zspage) @@ -471,6 +533,8 @@ static void get_zspage_mapping(struct zspage *zspage, unsigned int *class_idx, enum fullness_group *fullness) { + BUG_ON(zspage->magic != ZSPAGE_MAGIC); + *fullness = zspage->fullness; *class_idx = zspage->class; } @@ -504,23 +568,19 @@ static int get_size_class_index(int size) static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { - if (type < NR_ZS_STAT_TYPE) - class->stats.objs[type] += cnt; + class->stats.objs[type] += cnt; } static inline void zs_stat_dec(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { - if (type < NR_ZS_STAT_TYPE) - class->stats.objs[type] -= cnt; + class->stats.objs[type] -= cnt; } static inline unsigned long zs_stat_get(struct size_class *class, enum zs_stat_type type) { - if (type < NR_ZS_STAT_TYPE) - return class->stats.objs[type]; - return 0; + return class->stats.objs[type]; } #ifdef CONFIG_ZSMALLOC_STAT @@ -664,6 +724,7 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool) } #endif + /* * For each size class, zspages are divided into different groups * depending on how "full" they are. This was done so that we could @@ -704,15 +765,9 @@ static void insert_zspage(struct size_class *class, { struct zspage *head; - if (fullness >= ZS_EMPTY) - return; - + zs_stat_inc(class, fullness, 1); head = list_first_entry_or_null(&class->fullness_list[fullness], struct zspage, list); - - zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ? - CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); - /* * We want to see more ZS_FULL pages and less almost empty/full. * Put pages with higher ->inuse first. @@ -734,14 +789,11 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage, enum fullness_group fullness) { - if (fullness >= ZS_EMPTY) - return; - VM_BUG_ON(list_empty(&class->fullness_list[fullness])); + VM_BUG_ON(is_zspage_isolated(zspage)); list_del_init(&zspage->list); - zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ? - CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1); + zs_stat_dec(class, fullness, 1); } /* @@ -764,8 +816,11 @@ static enum fullness_group fix_fullness_group(struct size_class *class, if (newfg == currfg) goto out; - remove_zspage(class, zspage, currfg); - insert_zspage(class, zspage, newfg); + if (!is_zspage_isolated(zspage)) { + remove_zspage(class, zspage, currfg); + insert_zspage(class, zspage, newfg); + } + set_zspage_mapping(zspage, class_idx, newfg); out: @@ -808,19 +863,20 @@ static int get_pages_per_zspage(int class_size) return max_usedpc_order; } -static struct page *get_first_page(struct zspage *zspage) -{ - return zspage->first_page; -} - static struct zspage *get_zspage(struct page *page) { - return (struct zspage *)page->private; + struct zspage *zspage = (struct zspage *)page->private; + + BUG_ON(zspage->magic != ZSPAGE_MAGIC); + return zspage; } static struct page *get_next_page(struct page *page) { - return page->next; + if (unlikely(PageHugeObject(page))) + return NULL; + + return page->freelist; } /** @@ -857,16 +913,20 @@ static unsigned long handle_to_obj(unsigned long handle) return *(unsigned long *)handle; } -static unsigned long obj_to_head(struct size_class *class, struct page *page, - void *obj) +static unsigned long obj_to_head(struct page *page, void *obj) { - if (class->huge) { + if (unlikely(PageHugeObject(page))) { VM_BUG_ON_PAGE(!is_first_page(page), page); return page->index; } else return *(unsigned long *)obj; } +static inline int testpin_tag(unsigned long handle) +{ + return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); +} + static inline int trypin_tag(unsigned long handle) { return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); @@ -884,27 +944,94 @@ static void unpin_tag(unsigned long handle) static void reset_page(struct page *page) { + __ClearPageMovable(page); clear_bit(PG_private, &page->flags); clear_bit(PG_private_2, &page->flags); set_page_private(page, 0); - page->index = 0; + page_mapcount_reset(page); + ClearPageHugeObject(page); + page->freelist = NULL; +} + +/* + * To prevent zspage destroy during migration, zspage freeing should + * hold locks of all pages in the zspage. + */ +void lock_zspage(struct zspage *zspage) +{ + struct page *page = get_first_page(zspage); + + do { + lock_page(page); + } while ((page = get_next_page(page)) != NULL); +} + +int trylock_zspage(struct zspage *zspage) +{ + struct page *cursor, *fail; + + for (cursor = get_first_page(zspage); cursor != NULL; cursor = + get_next_page(cursor)) { + if (!trylock_page(cursor)) { + fail = cursor; + goto unlock; + } + } + + return 1; +unlock: + for (cursor = get_first_page(zspage); cursor != fail; cursor = + get_next_page(cursor)) + unlock_page(cursor); + + return 0; } -static void free_zspage(struct zs_pool *pool, struct zspage *zspage) +static void __free_zspage(struct zs_pool *pool, struct size_class *class, + struct zspage *zspage) { struct page *page, *next; + enum fullness_group fg; + unsigned int class_idx; + + get_zspage_mapping(zspage, &class_idx, &fg); + + assert_spin_locked(&class->lock); VM_BUG_ON(get_zspage_inuse(zspage)); + VM_BUG_ON(fg != ZS_EMPTY); - next = page = zspage->first_page; + next = page = get_first_page(zspage); do { - next = page->next; + VM_BUG_ON_PAGE(!PageLocked(page), page); + next = get_next_page(page); reset_page(page); + unlock_page(page); put_page(page); page = next; } while (page != NULL); cache_free_zspage(pool, zspage); + + zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( + class->size, class->pages_per_zspage)); + atomic_long_sub(class->pages_per_zspage, + &pool->pages_allocated); +} + +static void free_zspage(struct zs_pool *pool, struct size_class *class, + struct zspage *zspage) +{ + VM_BUG_ON(get_zspage_inuse(zspage)); + VM_BUG_ON(list_empty(&zspage->list)); + + if (!trylock_zspage(zspage)) { + kick_deferred_free(pool); + return; + } + + remove_zspage(class, zspage, ZS_EMPTY); + __free_zspage(pool, class, zspage); } /* Initialize a newly allocated zspage */ @@ -912,7 +1039,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) { unsigned int freeobj = 1; unsigned long off = 0; - struct page *page = zspage->first_page; + struct page *page = get_first_page(zspage); while (page) { struct page *next_page; @@ -952,16 +1079,17 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) set_freeobj(zspage, 0); } -static void create_page_chain(struct zspage *zspage, struct page *pages[], - int nr_pages) +static void create_page_chain(struct size_class *class, struct zspage *zspage, + struct page *pages[]) { int i; struct page *page; struct page *prev_page = NULL; + int nr_pages = class->pages_per_zspage; /* * Allocate individual pages and link them together as: - * 1. all pages are linked together using page->next + * 1. all pages are linked together using page->freelist * 2. each sub-page point to zspage using page->private * * we set PG_private to identify the first page (i.e. no other sub-page @@ -970,16 +1098,18 @@ static void create_page_chain(struct zspage *zspage, struct page *pages[], for (i = 0; i < nr_pages; i++) { page = pages[i]; set_page_private(page, (unsigned long)zspage); + page->freelist = NULL; if (i == 0) { zspage->first_page = page; SetPagePrivate(page); + if (unlikely(class->objs_per_zspage == 1 && + class->pages_per_zspage == 1)) + SetPageHugeObject(page); } else { - prev_page->next = page; + prev_page->freelist = page; } - if (i == nr_pages - 1) { + if (i == nr_pages - 1) SetPagePrivate2(page); - page->next = NULL; - } prev_page = page; } } @@ -999,6 +1129,8 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, return NULL; memset(zspage, 0, sizeof(struct zspage)); + zspage->magic = ZSPAGE_MAGIC; + migrate_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { struct page *page; @@ -1013,7 +1145,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, pages[i] = page; } - create_page_chain(zspage, pages, class->pages_per_zspage); + create_page_chain(class, zspage, pages); init_zspage(class, zspage); return zspage; @@ -1024,7 +1156,7 @@ static struct zspage *find_get_zspage(struct size_class *class) int i; struct zspage *zspage; - for (i = ZS_ALMOST_FULL; i <= ZS_ALMOST_EMPTY; i++) { + for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) { zspage = list_first_entry_or_null(&class->fullness_list[i], struct zspage, list); if (zspage) @@ -1289,6 +1421,10 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); + + /* migration cannot move any subpage in this zspage */ + migrate_read_lock(zspage); + get_zspage_mapping(zspage, &class_idx, &fg); class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; @@ -1309,7 +1445,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, ret = __zs_map_object(area, pages, off, class->size); out: - if (!class->huge) + if (likely(!PageHugeObject(page))) ret += ZS_HANDLE_SIZE; return ret; @@ -1348,6 +1484,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) __zs_unmap_object(area, pages, off, class->size); } put_cpu_var(zs_map_area); + + migrate_read_unlock(zspage); unpin_tag(handle); } EXPORT_SYMBOL_GPL(zs_unmap_object); @@ -1377,7 +1515,7 @@ static unsigned long obj_malloc(struct size_class *class, vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); - if (!class->huge) + if (likely(!PageHugeObject(m_page))) /* record handle in the header of allocated chunk */ link->handle = handle; else @@ -1407,6 +1545,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) { unsigned long handle, obj; struct size_class *class; + enum fullness_group newfg; struct zspage *zspage; if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) @@ -1422,28 +1561,37 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) spin_lock(&class->lock); zspage = find_get_zspage(class); - - if (!zspage) { + if (likely(zspage)) { + obj = obj_malloc(class, zspage, handle); + /* Now move the zspage to another fullness group, if required */ + fix_fullness_group(class, zspage); + record_obj(handle, obj); spin_unlock(&class->lock); - zspage = alloc_zspage(pool, class, gfp); - if (unlikely(!zspage)) { - cache_free_handle(pool, handle); - return 0; - } - set_zspage_mapping(zspage, class->index, ZS_EMPTY); - atomic_long_add(class->pages_per_zspage, - &pool->pages_allocated); + return handle; + } - spin_lock(&class->lock); - zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); + spin_unlock(&class->lock); + + zspage = alloc_zspage(pool, class, gfp); + if (!zspage) { + cache_free_handle(pool, handle); + return 0; } + spin_lock(&class->lock); obj = obj_malloc(class, zspage, handle); - /* Now move the zspage to another fullness group, if required */ - fix_fullness_group(class, zspage); + newfg = get_fullness_group(class, zspage); + insert_zspage(class, zspage, newfg); + set_zspage_mapping(zspage, class->index, newfg); record_obj(handle, obj); + atomic_long_add(class->pages_per_zspage, + &pool->pages_allocated); + zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage( + class->size, class->pages_per_zspage)); + + /* We completely set up zspage so mark them as movable */ + SetZsPageMovable(pool, zspage); spin_unlock(&class->lock); return handle; @@ -1484,6 +1632,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) int class_idx; struct size_class *class; enum fullness_group fullness; + bool isolated; if (unlikely(!handle)) return; @@ -1493,22 +1642,28 @@ void zs_free(struct zs_pool *pool, unsigned long handle) obj_to_location(obj, &f_page, &f_objidx); zspage = get_zspage(f_page); + migrate_read_lock(zspage); + get_zspage_mapping(zspage, &class_idx, &fullness); class = pool->size_class[class_idx]; spin_lock(&class->lock); obj_free(class, obj); fullness = fix_fullness_group(class, zspage); - if (fullness == ZS_EMPTY) { - zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); - atomic_long_sub(class->pages_per_zspage, - &pool->pages_allocated); - free_zspage(pool, zspage); + if (fullness != ZS_EMPTY) { + migrate_read_unlock(zspage); + goto out; } + + isolated = is_zspage_isolated(zspage); + migrate_read_unlock(zspage); + /* If zspage is isolated, zs_page_putback will free the zspage */ + if (likely(!isolated)) + free_zspage(pool, class, zspage); +out: + spin_unlock(&class->lock); unpin_tag(handle); - cache_free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); @@ -1592,7 +1747,7 @@ static unsigned long find_alloced_obj(struct size_class *class, offset += class->size * index; while (offset < PAGE_SIZE) { - head = obj_to_head(class, page, addr + offset); + head = obj_to_head(page, addr + offset); if (head & OBJ_ALLOCATED_TAG) { handle = head & ~OBJ_ALLOCATED_TAG; if (trypin_tag(handle)) @@ -1684,6 +1839,7 @@ static struct zspage *isolate_zspage(struct size_class *class, bool source) zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], struct zspage, list); if (zspage) { + VM_BUG_ON(is_zspage_isolated(zspage)); remove_zspage(class, zspage, fg[i]); return zspage; } @@ -1704,6 +1860,8 @@ static enum fullness_group putback_zspage(struct size_class *class, { enum fullness_group fullness; + VM_BUG_ON(is_zspage_isolated(zspage)); + fullness = get_fullness_group(class, zspage); insert_zspage(class, zspage, fullness); set_zspage_mapping(zspage, class->index, fullness); @@ -1711,6 +1869,378 @@ static enum fullness_group putback_zspage(struct size_class *class, return fullness; } +#ifdef CONFIG_COMPACTION +static struct dentry *zs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + static const struct dentry_operations ops = { + .d_dname = simple_dname, + }; + + return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC); +} + +static struct file_system_type zsmalloc_fs = { + .name = "zsmalloc", + .mount = zs_mount, + .kill_sb = kill_anon_super, +}; + +static int zsmalloc_mount(void) +{ + int ret = 0; + + zsmalloc_mnt = kern_mount(&zsmalloc_fs); + if (IS_ERR(zsmalloc_mnt)) + ret = PTR_ERR(zsmalloc_mnt); + + return ret; +} + +static void zsmalloc_unmount(void) +{ + kern_unmount(zsmalloc_mnt); +} + +static void migrate_lock_init(struct zspage *zspage) +{ + rwlock_init(&zspage->lock); +} + +static void migrate_read_lock(struct zspage *zspage) +{ + read_lock(&zspage->lock); +} + +static void migrate_read_unlock(struct zspage *zspage) +{ + read_unlock(&zspage->lock); +} + +static void migrate_write_lock(struct zspage *zspage) +{ + write_lock(&zspage->lock); +} + +static void migrate_write_unlock(struct zspage *zspage) +{ + write_unlock(&zspage->lock); +} + +/* Number of isolated subpage for *page migration* in this zspage */ +static void inc_zspage_isolation(struct zspage *zspage) +{ + zspage->isolated++; +} + +static void dec_zspage_isolation(struct zspage *zspage) +{ + zspage->isolated--; +} + +static void replace_sub_page(struct size_class *class, struct zspage *zspage, + struct page *newpage, struct page *oldpage) +{ + struct page *page; + struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + int idx = 0; + + page = get_first_page(zspage); + do { + if (page == oldpage) + pages[idx] = newpage; + else + pages[idx] = page; + idx++; + } while ((page = get_next_page(page)) != NULL); + + create_page_chain(class, zspage, pages); + set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); + if (unlikely(PageHugeObject(oldpage))) + newpage->index = oldpage->index; + __SetPageMovable(newpage, page_mapping(oldpage)); +} + +bool zs_page_isolate(struct page *page, isolate_mode_t mode) +{ + struct zs_pool *pool; + struct size_class *class; + int class_idx; + enum fullness_group fullness; + struct zspage *zspage; + struct address_space *mapping; + + /* + * Page is locked so zspage couldn't be destroyed. For detail, look at + * lock_zspage in free_zspage. + */ + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(PageIsolated(page), page); + + zspage = get_zspage(page); + + /* + * Without class lock, fullness could be stale while class_idx is okay + * because class_idx is constant unless page is freed so we should get + * fullness again under class lock. + */ + get_zspage_mapping(zspage, &class_idx, &fullness); + mapping = page_mapping(page); + pool = mapping->private_data; + class = pool->size_class[class_idx]; + + spin_lock(&class->lock); + if (get_zspage_inuse(zspage) == 0) { + spin_unlock(&class->lock); + return false; + } + + /* zspage is isolated for object migration */ + if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { + spin_unlock(&class->lock); + return false; + } + + /* + * If this is first time isolation for the zspage, isolate zspage from + * size_class to prevent further object allocation from the zspage. + */ + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) { + get_zspage_mapping(zspage, &class_idx, &fullness); + remove_zspage(class, zspage, fullness); + } + + inc_zspage_isolation(zspage); + spin_unlock(&class->lock); + + return true; +} + +int zs_page_migrate(struct address_space *mapping, struct page *newpage, + struct page *page, enum migrate_mode mode) +{ + struct zs_pool *pool; + struct size_class *class; + int class_idx; + enum fullness_group fullness; + struct zspage *zspage; + struct page *dummy; + void *s_addr, *d_addr, *addr; + int offset, pos; + unsigned long handle, head; + unsigned long old_obj, new_obj; + unsigned int obj_idx; + int ret = -EAGAIN; + + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(!PageIsolated(page), page); + + zspage = get_zspage(page); + + /* Concurrent compactor cannot migrate any subpage in zspage */ + migrate_write_lock(zspage); + get_zspage_mapping(zspage, &class_idx, &fullness); + pool = mapping->private_data; + class = pool->size_class[class_idx]; + offset = get_first_obj_offset(page); + + spin_lock(&class->lock); + if (!get_zspage_inuse(zspage)) { + ret = -EBUSY; + goto unlock_class; + } + + pos = offset; + s_addr = kmap_atomic(page); + while (pos < PAGE_SIZE) { + head = obj_to_head(page, s_addr + pos); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!trypin_tag(handle)) + goto unpin_objects; + } + pos += class->size; + } + + /* + * Here, any user cannot access all objects in the zspage so let's move. + */ + d_addr = kmap_atomic(newpage); + memcpy(d_addr, s_addr, PAGE_SIZE); + kunmap_atomic(d_addr); + + for (addr = s_addr + offset; addr < s_addr + pos; + addr += class->size) { + head = obj_to_head(page, addr); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!testpin_tag(handle)) + BUG(); + + old_obj = handle_to_obj(handle); + obj_to_location(old_obj, &dummy, &obj_idx); + new_obj = (unsigned long)location_to_obj(newpage, + obj_idx); + new_obj |= BIT(HANDLE_PIN_BIT); + record_obj(handle, new_obj); + } + } + + replace_sub_page(class, zspage, newpage, page); + get_page(newpage); + + dec_zspage_isolation(zspage); + + /* + * Page migration is done so let's putback isolated zspage to + * the list if @page is final isolated subpage in the zspage. + */ + if (!is_zspage_isolated(zspage)) + putback_zspage(class, zspage); + + reset_page(page); + put_page(page); + page = newpage; + + ret = 0; +unpin_objects: + for (addr = s_addr + offset; addr < s_addr + pos; + addr += class->size) { + head = obj_to_head(page, addr); + if (head & OBJ_ALLOCATED_TAG) { + handle = head & ~OBJ_ALLOCATED_TAG; + if (!testpin_tag(handle)) + BUG(); + unpin_tag(handle); + } + } + kunmap_atomic(s_addr); +unlock_class: + spin_unlock(&class->lock); + migrate_write_unlock(zspage); + + return ret; +} + +void zs_page_putback(struct page *page) +{ + struct zs_pool *pool; + struct size_class *class; + int class_idx; + enum fullness_group fg; + struct address_space *mapping; + struct zspage *zspage; + + VM_BUG_ON_PAGE(!PageMovable(page), page); + VM_BUG_ON_PAGE(!PageIsolated(page), page); + + zspage = get_zspage(page); + get_zspage_mapping(zspage, &class_idx, &fg); + mapping = page_mapping(page); + pool = mapping->private_data; + class = pool->size_class[class_idx]; + + spin_lock(&class->lock); + dec_zspage_isolation(zspage); + if (!is_zspage_isolated(zspage)) { + fg = putback_zspage(class, zspage); + /* + * Due to page_lock, we cannot free zspage immediately + * so let's defer. + */ + if (fg == ZS_EMPTY) + schedule_work(&pool->free_work); + } + spin_unlock(&class->lock); +} + +const struct address_space_operations zsmalloc_aops = { + .isolate_page = zs_page_isolate, + .migratepage = zs_page_migrate, + .putback_page = zs_page_putback, +}; + +static int zs_register_migration(struct zs_pool *pool) +{ + pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb); + if (IS_ERR(pool->inode)) { + pool->inode = NULL; + return 1; + } + + pool->inode->i_mapping->private_data = pool; + pool->inode->i_mapping->a_ops = &zsmalloc_aops; + return 0; +} + +static void zs_unregister_migration(struct zs_pool *pool) +{ + flush_work(&pool->free_work); + if (pool->inode) + iput(pool->inode); +} + +/* + * Caller should hold page_lock of all pages in the zspage + * In here, we cannot use zspage meta data. + */ +static void async_free_zspage(struct work_struct *work) +{ + int i; + struct size_class *class; + unsigned int class_idx; + enum fullness_group fullness; + struct zspage *zspage, *tmp; + LIST_HEAD(free_pages); + struct zs_pool *pool = container_of(work, struct zs_pool, + free_work); + + for (i = 0; i < zs_size_classes; i++) { + class = pool->size_class[i]; + if (class->index != i) + continue; + + spin_lock(&class->lock); + list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); + spin_unlock(&class->lock); + } + + + list_for_each_entry_safe(zspage, tmp, &free_pages, list) { + list_del(&zspage->list); + lock_zspage(zspage); + + get_zspage_mapping(zspage, &class_idx, &fullness); + VM_BUG_ON(fullness != ZS_EMPTY); + class = pool->size_class[class_idx]; + spin_lock(&class->lock); + __free_zspage(pool, pool->size_class[class_idx], zspage); + spin_unlock(&class->lock); + } +}; + +static void kick_deferred_free(struct zs_pool *pool) +{ + schedule_work(&pool->free_work); +} + +static void init_deferred_free(struct zs_pool *pool) +{ + INIT_WORK(&pool->free_work, async_free_zspage); +} + +static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) +{ + struct page *page = get_first_page(zspage); + + do { + WARN_ON(!trylock_page(page)); + __SetPageMovable(page, pool->inode->i_mapping); + unlock_page(page); + } while ((page = get_next_page(page)) != NULL); +} +#endif + /* * * Based on the number of unused allocated objects calculate @@ -1745,10 +2275,10 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) break; cc.index = 0; - cc.s_page = src_zspage->first_page; + cc.s_page = get_first_page(src_zspage); while ((dst_zspage = isolate_zspage(class, false))) { - cc.d_page = dst_zspage->first_page; + cc.d_page = get_first_page(dst_zspage); /* * If there is no more space in dst_page, resched * and see if anyone had allocated another zspage. @@ -1765,11 +2295,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class) putback_zspage(class, dst_zspage); if (putback_zspage(class, src_zspage) == ZS_EMPTY) { - zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage( - class->size, class->pages_per_zspage)); - atomic_long_sub(class->pages_per_zspage, - &pool->pages_allocated); - free_zspage(pool, src_zspage); + free_zspage(pool, class, src_zspage); pool->stats.pages_compacted += class->pages_per_zspage; } spin_unlock(&class->lock); @@ -1885,6 +2411,7 @@ struct zs_pool *zs_create_pool(const char *name) if (!pool) return NULL; + init_deferred_free(pool); pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), GFP_KERNEL); if (!pool->size_class) { @@ -1939,12 +2466,10 @@ struct zs_pool *zs_create_pool(const char *name) class->pages_per_zspage = pages_per_zspage; class->objs_per_zspage = class->pages_per_zspage * PAGE_SIZE / class->size; - if (pages_per_zspage == 1 && class->objs_per_zspage == 1) - class->huge = true; spin_lock_init(&class->lock); pool->size_class[i] = class; - for (fullness = ZS_ALMOST_FULL; fullness <= ZS_ALMOST_EMPTY; - fullness++) + for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; + fullness++) INIT_LIST_HEAD(&class->fullness_list[fullness]); prev_class = class; @@ -1953,6 +2478,9 @@ struct zs_pool *zs_create_pool(const char *name) /* debug only, don't abort if it fails */ zs_pool_stat_create(pool, name); + if (zs_register_migration(pool)) + goto err; + /* * Not critical, we still can use the pool * and user can trigger compaction manually. @@ -1972,6 +2500,7 @@ void zs_destroy_pool(struct zs_pool *pool) int i; zs_unregister_shrinker(pool); + zs_unregister_migration(pool); zs_pool_stat_destroy(pool); for (i = 0; i < zs_size_classes; i++) { @@ -1984,7 +2513,7 @@ void zs_destroy_pool(struct zs_pool *pool) if (class->index != i) continue; - for (fg = ZS_ALMOST_FULL; fg <= ZS_ALMOST_EMPTY; fg++) { + for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) { if (!list_empty(&class->fullness_list[fg])) { pr_info("Freeing non-empty class with size %db, fullness group %d\n", class->size, fg); @@ -2002,7 +2531,13 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool); static int __init zs_init(void) { - int ret = zs_register_cpu_notifier(); + int ret; + + ret = zsmalloc_mount(); + if (ret) + goto out; + + ret = zs_register_cpu_notifier(); if (ret) goto notifier_fail; @@ -2019,7 +2554,8 @@ static int __init zs_init(void) notifier_fail: zs_unregister_cpu_notifier(); - + zsmalloc_unmount(); +out: return ret; } @@ -2028,6 +2564,7 @@ static void __exit zs_exit(void) #ifdef CONFIG_ZPOOL zpool_unregister_driver(&zs_zpool_driver); #endif + zsmalloc_unmount(); zs_unregister_cpu_notifier(); zs_stat_exit(); -- cgit v1.2.3-70-g09d2 From 9bc482d3460501ac809457af26b46b72cd7dc212 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:34 -0700 Subject: zram: use __GFP_MOVABLE for memory allocation Zsmalloc is ready for page migration so zram can use __GFP_MOVABLE from now on. I did test to see how it helps to make higher order pages. Test scenario is as follows. KVM guest, 1G memory, ext4 formated zram block device, for i in `seq 1 8`; do dd if=/dev/vda1 of=mnt/test$i.txt bs=128M count=1 & done wait `pidof dd` for i in `seq 1 2 8`; do rm -rf mnt/test$i.txt done fstrim -v mnt echo "init" cat /proc/buddyinfo echo "compaction" echo 1 > /proc/sys/vm/compact_memory cat /proc/buddyinfo old: init Node 0, zone DMA 208 120 51 41 11 0 0 0 0 0 0 Node 0, zone DMA32 16380 13777 9184 3805 789 54 3 0 0 0 0 compaction Node 0, zone DMA 132 82 40 39 16 2 1 0 0 0 0 Node 0, zone DMA32 5219 5526 4969 3455 1831 677 139 15 0 0 0 new: init Node 0, zone DMA 379 115 97 19 2 0 0 0 0 0 0 Node 0, zone DMA32 18891 16774 10862 3947 637 21 0 0 0 0 0 compaction Node 0, zone DMA 214 66 87 29 10 3 0 0 0 0 0 Node 0, zone DMA32 1612 3139 3154 2469 1745 990 384 94 7 0 0 As you can see, compaction made so many high-order pages. Yay! Link: http://lkml.kernel.org/r/1464736881-24886-13-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Reviewed-by: Sergey Senozhatsky Cc: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c2a1d7dbaec9..9e2a83c3f19f 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -738,7 +738,8 @@ compress_again: handle = zs_malloc(meta->mem_pool, clen, __GFP_KSWAPD_RECLAIM | __GFP_NOWARN | - __GFP_HIGHMEM); + __GFP_HIGHMEM | + __GFP_MOVABLE); if (!handle) { zcomp_stream_put(zram->comp); zstrm = NULL; @@ -746,7 +747,8 @@ compress_again: atomic64_inc(&zram->stats.writestall); handle = zs_malloc(meta->mem_pool, clen, - GFP_NOIO | __GFP_HIGHMEM); + GFP_NOIO | __GFP_HIGHMEM | + __GFP_MOVABLE); if (handle) goto compress_again; -- cgit v1.2.3-70-g09d2 From 3b1d9ca65a80ced8ae737ffb11ae939334a882ca Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:23:37 -0700 Subject: zsmalloc: use OBJ_TAG_BIT for bit shifter Static check warns using tag as bit shifter. It doesn't break current working but not good for redability. Let's use OBJ_TAG_BIT as bit shifter instead of OBJ_ALLOCATED_TAG. Link: http://lkml.kernel.org/r/20160607045146.GF26230@bbox Signed-off-by: Minchan Kim Reported-by: Dan Carpenter Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 04a4f063b4fd..6b6986a02aa0 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1052,7 +1052,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; link += class->size / sizeof(*link); } @@ -1063,13 +1063,13 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) */ next_page = get_next_page(page); if (next_page) { - link->next = freeobj++ << OBJ_ALLOCATED_TAG; + link->next = freeobj++ << OBJ_TAG_BITS; } else { /* - * Reset OBJ_ALLOCATED_TAG bit to last link to tell + * Reset OBJ_TAG_BITS bit to last link to tell * whether it's allocated object or not. */ - link->next = -1 << OBJ_ALLOCATED_TAG; + link->next = -1 << OBJ_TAG_BITS; } kunmap_atomic(vaddr); page = next_page; @@ -1514,7 +1514,7 @@ static unsigned long obj_malloc(struct size_class *class, vaddr = kmap_atomic(m_page); link = (struct link_free *)vaddr + m_offset / sizeof(*link); - set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG); + set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!PageHugeObject(m_page))) /* record handle in the header of allocated chunk */ link->handle = handle; @@ -1616,7 +1616,7 @@ static void obj_free(struct size_class *class, unsigned long obj) /* Insert this object in containing zspage's freelist */ link = (struct link_free *)(vaddr + f_offset); - link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG; + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; kunmap_atomic(vaddr); set_freeobj(zspage, f_objidx); mod_zspage_inuse(zspage, -1); -- cgit v1.2.3-70-g09d2 From 66c64223ad4e7a4a9161fcd9606426d9f57227ca Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:40 -0700 Subject: mm/compaction: split freepages without holding the zone lock We don't need to split freepages with holding the zone lock. It will cause more contention on zone lock so not desirable. [rientjes@google.com: if __isolate_free_page() fails, avoid adding to freelist so we don't call map_pages() with it] Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1606211447001.43430@chino.kir.corp.google.com Link: http://lkml.kernel.org/r/1464230275-25791-1-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 - mm/compaction.c | 47 +++++++++++++++++++++++++++++++++-------------- mm/page_alloc.c | 27 --------------------------- 3 files changed, 33 insertions(+), 42 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3e22335a435c..6c9a394b2979 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -537,7 +537,6 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); -int split_free_page(struct page *page); /* * Compound pages have a destructor function. Provide a diff --git a/mm/compaction.c b/mm/compaction.c index 6095055bd70f..3cda95451d93 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -64,13 +64,31 @@ static unsigned long release_freepages(struct list_head *freelist) static void map_pages(struct list_head *list) { - struct page *page; + unsigned int i, order, nr_pages; + struct page *page, *next; + LIST_HEAD(tmp_list); + + list_for_each_entry_safe(page, next, list, lru) { + list_del(&page->lru); - list_for_each_entry(page, list, lru) { - arch_alloc_page(page, 0); - kernel_map_pages(page, 1, 1); - kasan_alloc_pages(page, 0); + order = page_private(page); + nr_pages = 1 << order; + set_page_private(page, 0); + set_page_refcounted(page); + + arch_alloc_page(page, order); + kernel_map_pages(page, nr_pages, 1); + kasan_alloc_pages(page, order); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } } + + list_splice(&tmp_list, list); } static inline bool migrate_async_suitable(int migratetype) @@ -405,12 +423,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; + unsigned int order; cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn++, cursor++) { - int isolated, i; + int isolated; struct page *page = cursor; /* @@ -476,17 +495,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, goto isolate_fail; } - /* Found a free page, break it into order-0 pages */ - isolated = split_free_page(page); + /* Found a free page, will break it into order-0 pages */ + order = page_order(page); + isolated = __isolate_free_page(page, order); if (!isolated) break; + set_page_private(page, order); total_isolated += isolated; cc->nr_freepages += isolated; - for (i = 0; i < isolated; i++) { - list_add(&page->lru, freelist); - page++; - } + list_add_tail(&page->lru, freelist); + if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; @@ -605,7 +624,7 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(&freelist); if (pfn < end_pfn) { @@ -1102,7 +1121,7 @@ static void isolate_freepages(struct compact_control *cc) } } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(freelist); /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8b2623683431..44cee1e1d65b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2526,33 +2526,6 @@ int __isolate_free_page(struct page *page, unsigned int order) return 1UL << order; } -/* - * Similar to split_page except the page is already free. As this is only - * being used for migration, the migratetype of the block also changes. - * As this is called with interrupts disabled, the caller is responsible - * for calling arch_alloc_page() and kernel_map_page() after interrupts - * are enabled. - * - * Note: this is probably too low level an operation for use in drivers. - * Please consult with lkml before using this in your driver. - */ -int split_free_page(struct page *page) -{ - unsigned int order; - int nr_pages; - - order = page_order(page); - - nr_pages = __isolate_free_page(page, order); - if (!nr_pages) - return 0; - - /* Split into individual pages */ - set_page_refcounted(page); - split_page(page, order); - return nr_pages; -} - /* * Update NUMA hit/miss statistics * -- cgit v1.2.3-70-g09d2 From 83358ece26b70f20c0ba2e0e00dc84b0ee24fe6d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:43 -0700 Subject: mm/page_owner: initialize page owner without holding the zone lock It's not necessary to initialized page_owner with holding the zone lock. It would cause more contention on the zone lock although it's not a big problem since it is just debug feature. But, it is better than before so do it. This is also preparation step to use stackdepot in page owner feature. Stackdepot allocates new pages when there is no reserved space and holding the zone lock in this case will cause deadlock. Link: http://lkml.kernel.org/r/1464230275-25791-2-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 3 +++ mm/page_alloc.c | 2 -- mm/page_isolation.c | 9 ++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 3cda95451d93..4ae1294068a8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "internal.h" #ifdef CONFIG_COMPACTION @@ -79,6 +80,8 @@ static void map_pages(struct list_head *list) arch_alloc_page(page, order); kernel_map_pages(page, nr_pages, 1); kasan_alloc_pages(page, order); + + set_page_owner(page, order, __GFP_MOVABLE); if (order) split_page(page, order); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 44cee1e1d65b..f07552fc43e1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2509,8 +2509,6 @@ int __isolate_free_page(struct page *page, unsigned int order) zone->free_area[order].nr_free--; rmv_page_order(page); - set_page_owner(page, order, __GFP_MOVABLE); - /* Set the pageblock if the isolated page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 612122bf6a42..927f5ee24c87 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "internal.h" #define CREATE_TRACE_POINTS @@ -108,8 +109,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); - kernel_map_pages(page, (1 << order), 1); - set_page_refcounted(page); isolated_page = page; } } @@ -128,8 +127,12 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); - if (isolated_page) + if (isolated_page) { + kernel_map_pages(page, (1 << order), 1); + set_page_refcounted(page); + set_page_owner(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); + } } static inline struct page * -- cgit v1.2.3-70-g09d2 From a8efe1c982a22c95884dee1ddf2e721567d1f483 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:46 -0700 Subject: mm/page_owner: copy last_migrate_reason in copy_page_owner() Currently, copy_page_owner() doesn't copy all the owner information. It skips last_migrate_reason because copy_page_owner() is used for migration and it will be properly set soon. But, following patch will use copy_page_owner() and this skip will cause the problem that allocated page has uninitialied last_migrate_reason. To prevent it, this patch also copy last_migrate_reason in copy_page_owner(). Link: http://lkml.kernel.org/r/1464230275-25791-3-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_owner.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/page_owner.c b/mm/page_owner.c index fedeba88c9cb..437877f5b774 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -118,6 +118,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; + new_ext->last_migrate_reason = old_ext->last_migrate_reason; new_ext->nr_entries = old_ext->nr_entries; for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) -- cgit v1.2.3-70-g09d2 From a9627bc5e34e79ae80a33241b8a1501cc498e191 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:49 -0700 Subject: mm/page_owner: introduce split_page_owner and replace manual handling split_page() calls set_page_owner() to set up page_owner to each pages. But, it has a drawback that head page and the others have different stacktrace because callsite of set_page_owner() is slightly differnt. To avoid this problem, this patch copies head page's page_owner to the others. It needs to introduce new function, split_page_owner() but it also remove the other function, get_page_owner_gfp() so looks good to do. Link: http://lkml.kernel.org/r/1464230275-25791-4-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_owner.h | 12 +++++------- mm/page_alloc.c | 8 ++------ mm/page_owner.c | 14 +++++++------- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 46f1b939948c..30583ab0ffb1 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -10,7 +10,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned int order); extern void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask); -extern gfp_t __get_page_owner_gfp(struct page *page); +extern void __split_page_owner(struct page *page, unsigned int order); extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(struct page *page); @@ -28,12 +28,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, unsigned int order) { if (static_branch_unlikely(&page_owner_inited)) - return __get_page_owner_gfp(page); - else - return 0; + __split_page_owner(page, order); } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { @@ -58,9 +56,9 @@ static inline void set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) { } -static inline gfp_t get_page_owner_gfp(struct page *page) +static inline void split_page_owner(struct page *page, + unsigned int order) { - return 0; } static inline void copy_page_owner(struct page *oldpage, struct page *newpage) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f07552fc43e1..a82b303c19b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2461,7 +2461,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) void split_page(struct page *page, unsigned int order) { int i; - gfp_t gfp_mask; VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); @@ -2475,12 +2474,9 @@ void split_page(struct page *page, unsigned int order) split_page(virt_to_page(page[0].shadow), order); #endif - gfp_mask = get_page_owner_gfp(page); - set_page_owner(page, 0, gfp_mask); - for (i = 1; i < (1 << order); i++) { + for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - set_page_owner(page + i, 0, gfp_mask); - } + split_page_owner(page, order); } EXPORT_SYMBOL_GPL(split_page); diff --git a/mm/page_owner.c b/mm/page_owner.c index 437877f5b774..31b69437a3d6 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -94,17 +94,17 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) page_ext->last_migrate_reason = reason; } -gfp_t __get_page_owner_gfp(struct page *page) +void __split_page_owner(struct page *page, unsigned int order) { + int i; struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) - /* - * The caller just returns 0 if no valid gfp - * So return 0 here too. - */ - return 0; + return; - return page_ext->gfp_mask; + page_ext->order = 0; + for (i = 1; i < (1 << order); i++) + __copy_page_owner(page, page + i); } void __copy_page_owner(struct page *oldpage, struct page *newpage) -- cgit v1.2.3-70-g09d2 From 371376750fce0abb09b1aa3fd8ae7025813a3488 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:52 -0700 Subject: tools/vm/page_owner: increase temporary buffer size Page owner will be changed to store more deep stacktrace so current temporary buffer size isn't enough. Increase it. Link: http://lkml.kernel.org/r/1464230275-25791-5-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- tools/vm/page_owner_sort.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c index 77147b42d598..f1c055f3c243 100644 --- a/tools/vm/page_owner_sort.c +++ b/tools/vm/page_owner_sort.c @@ -79,12 +79,12 @@ static void add_list(char *buf, int len) } } -#define BUF_SIZE 1024 +#define BUF_SIZE (128 * 1024) int main(int argc, char **argv) { FILE *fin, *fout; - char buf[BUF_SIZE]; + char *buf; int ret, i, count; struct block_list *list2; struct stat st; @@ -107,6 +107,11 @@ int main(int argc, char **argv) max_size = st.st_size / 100; /* hack ... */ list = malloc(max_size * sizeof(*list)); + buf = malloc(BUF_SIZE); + if (!list || !buf) { + printf("Out of memory\n"); + exit(1); + } for ( ; ; ) { ret = read_block(buf, BUF_SIZE, fin); -- cgit v1.2.3-70-g09d2 From f2ca0b55710752588ccff5224a11e6aea43a996a Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:55 -0700 Subject: mm/page_owner: use stackdepot to store stacktrace Currently, we store each page's allocation stacktrace on corresponding page_ext structure and it requires a lot of memory. This causes the problem that memory tight system doesn't work well if page_owner is enabled. Moreover, even with this large memory consumption, we cannot get full stacktrace because we allocate memory at boot time and just maintain 8 stacktrace slots to balance memory consumption. We could increase it to more but it would make system unusable or change system behaviour. To solve the problem, this patch uses stackdepot to store stacktrace. It obviously provides memory saving but there is a drawback that stackdepot could fail. stackdepot allocates memory at runtime so it could fail if system has not enough memory. But, most of allocation stack are generated at very early time and there are much memory at this time. So, failure would not happen easily. And, one failure means that we miss just one page's allocation stacktrace so it would not be a big problem. In this patch, when memory allocation failure happens, we store special stracktrace handle to the page that is failed to save stacktrace. With it, user can guess memory usage properly even if failure happens. Memory saving looks as following. (4GB memory system with page_owner) (before the patch -> after the patch) static allocation: 92274688 bytes -> 25165824 bytes dynamic allocation after boot + kernel build: 0 bytes -> 327680 bytes total: 92274688 bytes -> 25493504 bytes 72% reduction in total. Note that implementation looks complex than someone would imagine because there is recursion issue. stackdepot uses page allocator and page_owner is called at page allocation. Using stackdepot in page_owner could re-call page allcator and then page_owner. That is a recursion. To detect and avoid it, whenever we obtain stacktrace, recursion is checked and page_owner is set to dummy information if found. Dummy information means that this page is allocated for page_owner feature itself (such as stackdepot) and it's understandable behavior for user. [iamjoonsoo.kim@lge.com: mm-page_owner-use-stackdepot-to-store-stacktrace-v3] Link: http://lkml.kernel.org/r/1464230275-25791-6-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1466150259-27727-7-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1464230275-25791-6-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Acked-by: Michal Hocko Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_ext.h | 4 +- lib/Kconfig.debug | 1 + mm/page_owner.c | 142 ++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 126 insertions(+), 21 deletions(-) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index e1fe7cf5bddf..03f2a3e7d76d 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -3,6 +3,7 @@ #include #include +#include struct pglist_data; struct page_ext_operations { @@ -44,9 +45,8 @@ struct page_ext { #ifdef CONFIG_PAGE_OWNER unsigned int order; gfp_t gfp_mask; - unsigned int nr_entries; int last_migrate_reason; - unsigned long trace_entries[8]; + depot_stack_handle_t handle; #endif }; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 805b7048a1bd..f07842e2d69f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -244,6 +244,7 @@ config PAGE_OWNER depends on DEBUG_KERNEL && STACKTRACE_SUPPORT select DEBUG_FS select STACKTRACE + select STACKDEPOT select PAGE_EXTENSION help This keeps track of what call chain is the owner of a page, may diff --git a/mm/page_owner.c b/mm/page_owner.c index 31b69437a3d6..ec6dc1886f71 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -7,11 +7,22 @@ #include #include #include +#include + #include "internal.h" +/* + * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) + * to use off stack temporal storage + */ +#define PAGE_OWNER_STACK_DEPTH (16) + static bool page_owner_disabled = true; DEFINE_STATIC_KEY_FALSE(page_owner_inited); +static depot_stack_handle_t dummy_handle; +static depot_stack_handle_t failure_handle; + static void init_early_allocated_pages(void); static int early_page_owner_param(char *buf) @@ -34,11 +45,41 @@ static bool need_page_owner(void) return true; } +static noinline void register_dummy_stack(void) +{ + unsigned long entries[4]; + struct stack_trace dummy; + + dummy.nr_entries = 0; + dummy.max_entries = ARRAY_SIZE(entries); + dummy.entries = &entries[0]; + dummy.skip = 0; + + save_stack_trace(&dummy); + dummy_handle = depot_save_stack(&dummy, GFP_KERNEL); +} + +static noinline void register_failure_stack(void) +{ + unsigned long entries[4]; + struct stack_trace failure; + + failure.nr_entries = 0; + failure.max_entries = ARRAY_SIZE(entries); + failure.entries = &entries[0]; + failure.skip = 0; + + save_stack_trace(&failure); + failure_handle = depot_save_stack(&failure, GFP_KERNEL); +} + static void init_page_owner(void) { if (page_owner_disabled) return; + register_dummy_stack(); + register_failure_stack(); static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } @@ -61,25 +102,66 @@ void __reset_page_owner(struct page *page, unsigned int order) } } -void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) +static inline bool check_recursive_alloc(struct stack_trace *trace, + unsigned long ip) { - struct page_ext *page_ext = lookup_page_ext(page); + int i, count; + + if (!trace->nr_entries) + return false; + + for (i = 0, count = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip && ++count == 2) + return true; + } + return false; +} + +static noinline depot_stack_handle_t save_stack(gfp_t flags) +{ + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, - .max_entries = ARRAY_SIZE(page_ext->trace_entries), - .entries = &page_ext->trace_entries[0], - .skip = 3, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; + depot_stack_handle_t handle; + + save_stack_trace(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + /* + * We need to check recursion here because our request to stackdepot + * could trigger memory allocation to save new entry. New memory + * allocation would reach here and call depot_save_stack() again + * if we don't catch it. There is still not enough memory in stackdepot + * so it would try to allocate memory again and loop forever. + */ + if (check_recursive_alloc(&trace, _RET_IP_)) + return dummy_handle; + + handle = depot_save_stack(&trace, flags); + if (!handle) + handle = failure_handle; + + return handle; +} + +noinline void __set_page_owner(struct page *page, unsigned int order, + gfp_t gfp_mask) +{ + struct page_ext *page_ext = lookup_page_ext(page); if (unlikely(!page_ext)) return; - save_stack_trace(&trace); - + page_ext->handle = save_stack(gfp_mask); page_ext->order = order; page_ext->gfp_mask = gfp_mask; - page_ext->nr_entries = trace.nr_entries; page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); @@ -111,7 +193,6 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) { struct page_ext *old_ext = lookup_page_ext(oldpage); struct page_ext *new_ext = lookup_page_ext(newpage); - int i; if (unlikely(!old_ext || !new_ext)) return; @@ -119,10 +200,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) new_ext->order = old_ext->order; new_ext->gfp_mask = old_ext->gfp_mask; new_ext->last_migrate_reason = old_ext->last_migrate_reason; - new_ext->nr_entries = old_ext->nr_entries; - - for (i = 0; i < ARRAY_SIZE(new_ext->trace_entries); i++) - new_ext->trace_entries[i] = old_ext->trace_entries[i]; + new_ext->handle = old_ext->handle; /* * We don't clear the bit on the oldpage as it's going to be freed @@ -138,14 +216,18 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, - struct page *page, struct page_ext *page_ext) + struct page *page, struct page_ext *page_ext, + depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { - .nr_entries = page_ext->nr_entries, - .entries = &page_ext->trace_entries[0], + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); @@ -174,6 +256,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, if (ret >= count) goto err; + depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; @@ -204,10 +287,14 @@ err: void __dump_page_owner(struct page *page) { struct page_ext *page_ext = lookup_page_ext(page); + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { - .nr_entries = page_ext->nr_entries, - .entries = &page_ext->trace_entries[0], + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; + depot_stack_handle_t handle; gfp_t gfp_mask; int mt; @@ -223,6 +310,13 @@ void __dump_page_owner(struct page *page) return; } + handle = READ_ONCE(page_ext->handle); + if (!handle) { + pr_alert("page_owner info is not active (free page?)\n"); + return; + } + + depot_fetch_stack(handle, &trace); pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n", page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask); print_stack_trace(&trace, 0); @@ -238,6 +332,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) unsigned long pfn; struct page *page; struct page_ext *page_ext; + depot_stack_handle_t handle; if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; @@ -286,10 +381,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; + /* + * Access to page_ext->handle isn't synchronous so we should + * be careful to access it. + */ + handle = READ_ONCE(page_ext->handle); + if (!handle) + continue; + /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; - return print_page_owner(buf, count, pfn, page, page_ext); + return print_page_owner(buf, count, pfn, page, + page_ext, handle); } return 0; -- cgit v1.2.3-70-g09d2 From 46f24fd857b37bb86ddd5d0ac3d194e984dfdf1c Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:23:58 -0700 Subject: mm/page_alloc: introduce post allocation processing on page allocator This patch is motivated from Hugh and Vlastimil's concern [1]. There are two ways to get freepage from the allocator. One is using normal memory allocation API and the other is __isolate_free_page() which is internally used for compaction and pageblock isolation. Later usage is rather tricky since it doesn't do whole post allocation processing done by normal API. One problematic thing I already know is that poisoned page would not be checked if it is allocated by __isolate_free_page(). Perhaps, there would be more. We could add more debug logic for allocated page in the future and this separation would cause more problem. I'd like to fix this situation at this time. Solution is simple. This patch commonize some logic for newly allocated page and uses it on all sites. This will solve the problem. [1] http://marc.info/?i=alpine.LSU.2.11.1604270029350.7066%40eggly.anvils%3E [iamjoonsoo.kim@lge.com: mm-page_alloc-introduce-post-allocation-processing-on-page-allocator-v3] Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1466150259-27727-9-git-send-email-iamjoonsoo.kim@lge.com Link: http://lkml.kernel.org/r/1464230275-25791-7-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Minchan Kim Cc: Alexander Potapenko Cc: Hugh Dickins Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 8 +------- mm/internal.h | 2 ++ mm/page_alloc.c | 23 ++++++++++++++--------- mm/page_isolation.c | 4 +--- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 4ae1294068a8..64df5fe052db 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -74,14 +74,8 @@ static void map_pages(struct list_head *list) order = page_private(page); nr_pages = 1 << order; - set_page_private(page, 0); - set_page_refcounted(page); - arch_alloc_page(page, order); - kernel_map_pages(page, nr_pages, 1); - kasan_alloc_pages(page, order); - - set_page_owner(page, order, __GFP_MOVABLE); + post_alloc_hook(page, order, __GFP_MOVABLE); if (order) split_page(page, order); diff --git a/mm/internal.h b/mm/internal.h index 2524ec880e24..fbfba0cc2c35 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -150,6 +150,8 @@ extern int __isolate_free_page(struct page *page, unsigned int order); extern void __free_pages_bootmem(struct page *page, unsigned long pfn, unsigned int order); extern void prep_compound_page(struct page *page, unsigned int order); +extern void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a82b303c19b1..13cf4c665321 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1724,6 +1724,19 @@ static bool check_new_pages(struct page *page, unsigned int order) return false; } +inline void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags) +{ + set_page_private(page, 0); + set_page_refcounted(page); + + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + kernel_poison_pages(page, 1 << order, 1); + kasan_alloc_pages(page, order); + set_page_owner(page, order, gfp_flags); +} + static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, unsigned int alloc_flags) { @@ -1736,13 +1749,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags poisoned &= page_is_poisoned(p); } - set_page_private(page, 0); - set_page_refcounted(page); - - arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); - kernel_poison_pages(page, 1 << order, 1); - kasan_alloc_pages(page, order); + post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) @@ -1751,8 +1758,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); - set_page_owner(page, order, gfp_flags); - /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 927f5ee24c87..4639163b78f9 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -128,9 +128,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) { - kernel_map_pages(page, (1 << order), 1); - set_page_refcounted(page); - set_page_owner(page, order, __GFP_MOVABLE); + post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); } } -- cgit v1.2.3-70-g09d2 From e3a2713c3cfa9c41fac57447533079d8da23319b Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Tue, 26 Jul 2016 15:24:01 -0700 Subject: mm/page_isolation: clean up confused code When there is an isolated_page, post_alloc_hook() is called with page but __free_pages() is called with isolated_page. Since they are the same so no problem but it's very confusing. To reduce it, this patch changes isolated_page to boolean type and uses page variable consistently. Link: http://lkml.kernel.org/r/1466150259-27727-10-git-send-email-iamjoonsoo.kim@lge.com Signed-off-by: Joonsoo Kim Acked-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_isolation.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 4639163b78f9..064b7fb6e0b5 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -81,7 +81,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) { struct zone *zone; unsigned long flags, nr_pages; - struct page *isolated_page = NULL; + bool isolated_page = false; unsigned int order; unsigned long page_idx, buddy_idx; struct page *buddy; @@ -109,7 +109,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); - isolated_page = page; + isolated_page = true; } } } @@ -129,7 +129,7 @@ out: spin_unlock_irqrestore(&zone->lock, flags); if (isolated_page) { post_alloc_hook(page, order, __GFP_MOVABLE); - __free_pages(isolated_page, order); + __free_pages(page, order); } } -- cgit v1.2.3-70-g09d2 From 337d9abf1cd1a59645d91b6d0b1685a476b81978 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Tue, 26 Jul 2016 15:24:03 -0700 Subject: mm: thp: check pmd_trans_unstable() after split_huge_pmd() split_huge_pmd() doesn't guarantee that the pmd is normal pmd pointing to pte entries, which can be checked with pmd_trans_unstable(). Some callers make this assertion and some do it differently and some not, so let's do it in a unified manner. Link: http://lkml.kernel.org/r/1464741400-12143-1-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi Cc: "Kirill A. Shutemov" Cc: Hugh Dickins Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/gup.c | 2 ++ mm/mempolicy.c | 2 ++ mm/mprotect.c | 2 +- mm/mremap.c | 3 +-- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index c057784c8444..dee142e100f4 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -279,6 +279,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); + if (pmd_trans_unstable(pmd)) + ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 297d6854f849..fe90e5051012 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -512,6 +512,8 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, } } + if (pmd_trans_unstable(pmd)) + return 0; retry: pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { diff --git a/mm/mprotect.c b/mm/mprotect.c index 5019a1ef2848..a4830f0325fe 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -163,7 +163,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) { split_huge_pmd(vma, pmd, addr); - if (pmd_none(*pmd)) + if (pmd_trans_unstable(pmd)) continue; } else { int nr_ptes = change_huge_pmd(vma, pmd, addr, diff --git a/mm/mremap.c b/mm/mremap.c index 1f157adfdaf9..da22ad2a5678 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -210,9 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, } } split_huge_pmd(vma, old_pmd, old_addr); - if (pmd_none(*old_pmd)) + if (pmd_trans_unstable(old_pmd)) continue; - VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr)) break; -- cgit v1.2.3-70-g09d2 From 31d49da5ad01728e48a1bb2b43795598b23de68a Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:06 -0700 Subject: mm/hugetlb: simplify hugetlb unmap For hugetlb like THP (and unlike regular page), we do tlb flush after dropping ptl. Because of the above, we don't need to track force_flush like we do now. Instead we can simply call tlb_remove_page() which will do the flush if needed. No functionality change in this patch. Link: http://lkml.kernel.org/r/1465049193-22197-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 54 +++++++++++++++++++++--------------------------------- 1 file changed, 21 insertions(+), 33 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index addfe4accc07..524c078ce67b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3177,7 +3177,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) { - int force_flush = 0; struct mm_struct *mm = vma->vm_mm; unsigned long address; pte_t *ptep; @@ -3196,19 +3195,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb_start_vma(tlb, vma); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); address = start; -again: for (; address < end; address += sz) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; ptl = huge_pte_lock(h, mm, ptep); - if (huge_pmd_unshare(mm, &address, ptep)) - goto unlock; + if (huge_pmd_unshare(mm, &address, ptep)) { + spin_unlock(ptl); + continue; + } pte = huge_ptep_get(ptep); - if (huge_pte_none(pte)) - goto unlock; + if (huge_pte_none(pte)) { + spin_unlock(ptl); + continue; + } /* * Migrating hugepage or HWPoisoned hugepage is already @@ -3216,7 +3218,8 @@ again: */ if (unlikely(!pte_present(pte))) { huge_pte_clear(mm, address, ptep); - goto unlock; + spin_unlock(ptl); + continue; } page = pte_page(pte); @@ -3226,9 +3229,10 @@ again: * are about to unmap is the actual page of interest. */ if (ref_page) { - if (page != ref_page) - goto unlock; - + if (page != ref_page) { + spin_unlock(ptl); + continue; + } /* * Mark the VMA as having unmapped its page so that * future faults in this VMA will fail rather than @@ -3244,30 +3248,14 @@ again: hugetlb_count_sub(pages_per_huge_page(h), mm); page_remove_rmap(page, true); - force_flush = !__tlb_remove_page(tlb, page); - if (force_flush) { - address += sz; - spin_unlock(ptl); - break; - } - /* Bail out after unmapping reference page if supplied */ - if (ref_page) { - spin_unlock(ptl); - break; - } -unlock: + spin_unlock(ptl); - } - /* - * mmu_gather ran out of room to batch pages, we break out of - * the PTE lock to avoid doing the potential expensive TLB invalidate - * and page-free while holding it. - */ - if (force_flush) { - force_flush = 0; - tlb_flush_mmu(tlb); - if (address < end && !ref_page) - goto again; + tlb_remove_page(tlb, page); + /* + * Bail out after unmapping reference page if supplied + */ + if (ref_page) + break; } mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); tlb_end_vma(tlb, vma); -- cgit v1.2.3-70-g09d2 From e9d55e157034a9efd99405c99c1565d64619d82b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:09 -0700 Subject: mm: change the interface for __tlb_remove_page() This updates the generic and arch specific implementation to return true if we need to do a tlb flush. That means if a __tlb_remove_page indicate a flush is needed, the page we try to remove need to be tracked and added again after the flush. We need to track it because we have already update the pte to none and we can't just loop back. This change is done to enable us to do a tlb_flush when we try to flush a range that consists of different page sizes. For architectures like ppc64, we can do a range based tlb flush and we need to track page size for that. When we try to remove a huge page, we will force a tlb flush and starts a new mmu gather. [aneesh.kumar@linux.vnet.ibm.com: mm-change-the-interface-for-__tlb_remove_page-v3] Link: http://lkml.kernel.org/r/1465049193-22197-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1464860389-29019-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 17 +++++++++++++---- arch/ia64/include/asm/tlb.h | 19 ++++++++++++++----- arch/s390/include/asm/tlb.h | 9 +++++++-- arch/sh/include/asm/tlb.h | 8 +++++++- arch/um/include/asm/tlb.h | 8 +++++++- include/asm-generic/tlb.h | 44 +++++++++++++++++++++++++++++++++----------- mm/memory.c | 19 +++++++++++++------ 7 files changed, 94 insertions(+), 30 deletions(-) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 3cadb726ec88..a9d2aee3826f 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -209,17 +209,26 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) tlb_flush(tlb); } -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + if (tlb->nr == tlb->max) + return true; tlb->pages[tlb->nr++] = page; - VM_BUG_ON(tlb->nr > tlb->max); - return tlb->max - tlb->nr; + return false; } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (!__tlb_remove_page(tlb, page)) + if (__tlb_remove_page(tlb, page)) { tlb_flush_mmu(tlb); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index 39d64e0df1de..e7da41aa9110 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -205,17 +205,18 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) * must be delayed until after the TLB has been flushed (see comments at the beginning of * this file). */ -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + if (tlb->nr == tlb->max) + return true; + tlb->need_flush = 1; if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); tlb->pages[tlb->nr++] = page; - VM_BUG_ON(tlb->nr > tlb->max); - - return tlb->max - tlb->nr; + return false; } static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) @@ -235,8 +236,16 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (!__tlb_remove_page(tlb, page)) + if (__tlb_remove_page(tlb, page)) { tlb_flush_mmu(tlb); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); } /* diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 7a92e69c50bc..30759b560849 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -87,10 +87,10 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb, * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page * has already been freed, so just do free_page_and_swap_cache. */ -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ + return false; /* avoid calling tlb_flush_mmu */ } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) @@ -98,6 +98,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) free_page_and_swap_cache(page); } +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} /* * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 62f80d2a9df9..21ae8f5546b2 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -101,7 +101,7 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ + return false; /* avoid calling tlb_flush_mmu */ } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) @@ -109,6 +109,12 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 16eb63fac57d..3dc4cbb3c2c0 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -102,7 +102,7 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; free_page_and_swap_cache(page); - return 1; /* avoid calling tlb_flush_mmu */ + return false; /* avoid calling tlb_flush_mmu */ } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) @@ -110,6 +110,12 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, + struct page *page) +{ + return __tlb_remove_page(tlb, page); +} + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 9dbb739cafa0..7b899a46a4cb 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -107,6 +107,11 @@ struct mmu_gather { struct mmu_gather_batch local; struct page *__pages[MMU_GATHER_BUNDLE]; unsigned int batch_count; + /* + * __tlb_adjust_range will track the new addr here, + * that that we can adjust the range after the flush + */ + unsigned long addr; }; #define HAVE_GENERIC_MMU_GATHER @@ -115,23 +120,19 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); - -/* tlb_remove_page - * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when - * required. - */ -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) -{ - if (!__tlb_remove_page(tlb, page)) - tlb_flush_mmu(tlb); -} +bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) { tlb->start = min(tlb->start, address); tlb->end = max(tlb->end, address + PAGE_SIZE); + /* + * Track the last address with which we adjusted the range. This + * will be used later to adjust again after a mmu_flush due to + * failed __tlb_remove_page + */ + tlb->addr = address; } static inline void __tlb_reset_range(struct mmu_gather *tlb) @@ -144,6 +145,27 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +/* tlb_remove_page + * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when + * required. + */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (__tlb_remove_page(tlb, page)) { + tlb_flush_mmu(tlb); + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page(tlb, page); + } +} + +static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) +{ + /* active->nr should be zero when we call this */ + VM_BUG_ON_PAGE(tlb->active->nr, page); + __tlb_adjust_range(tlb, tlb->addr); + return __tlb_remove_page(tlb, page); +} + /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, diff --git a/mm/memory.c b/mm/memory.c index 9e046819e619..12f31501c323 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -292,23 +292,24 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e * handling the additional races in SMP caused by other CPUs caching valid * mappings in their TLBs. Returns the number of free page slots left. * When out of page slots we must call tlb_flush_mmu(). + *returns true if the caller should flush. */ -int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); batch = tlb->active; - batch->pages[batch->nr++] = page; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) - return 0; + return true; batch = tlb->active; } VM_BUG_ON_PAGE(batch->nr > batch->max, page); - return batch->max - batch->nr; + batch->pages[batch->nr++] = page; + return false; } #endif /* HAVE_GENERIC_MMU_GATHER */ @@ -1109,6 +1110,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *start_pte; pte_t *pte; swp_entry_t entry; + struct page *pending_page = NULL; again: init_rss_vec(rss); @@ -1160,8 +1162,9 @@ again: page_remove_rmap(page, false); if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); - if (unlikely(!__tlb_remove_page(tlb, page))) { + if (unlikely(__tlb_remove_page(tlb, page))) { force_flush = 1; + pending_page = page; addr += PAGE_SIZE; break; } @@ -1202,7 +1205,11 @@ again: if (force_flush) { force_flush = 0; tlb_flush_mmu_free(tlb); - + if (pending_page) { + /* remove the page with new size */ + __tlb_remove_pte_page(tlb, pending_page); + pending_page = NULL; + } if (addr != end) goto again; } -- cgit v1.2.3-70-g09d2 From e77b0852b551ffd8b29fa0225e1ef62c195e3160 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 26 Jul 2016 15:24:12 -0700 Subject: mm/mmu_gather: track page size with mmu gather and force flush if page size change This allows an arch which needs to do special handing with respect to different page size when flushing tlb to implement the same in mmu gather. Link: http://lkml.kernel.org/r/1465049193-22197-3-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/include/asm/tlb.h | 12 ++++++++++++ arch/ia64/include/asm/tlb.h | 12 ++++++++++++ arch/s390/include/asm/tlb.h | 13 +++++++++++++ arch/sh/include/asm/tlb.h | 12 ++++++++++++ arch/um/include/asm/tlb.h | 12 ++++++++++++ include/asm-generic/tlb.h | 27 +++++++++++++++++++++------ mm/huge_memory.c | 2 +- mm/hugetlb.c | 2 +- mm/memory.c | 10 +++++++++- 9 files changed, 93 insertions(+), 9 deletions(-) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index a9d2aee3826f..1e25cd80589e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -225,12 +225,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) } } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index e7da41aa9110..77e541cf0e5d 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -242,12 +242,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) } } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /* * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any * PTE, not just those pointing to (normal) physical memory. diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 30759b560849..15711de10403 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -98,11 +98,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) free_page_and_swap_cache(page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } + +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /* * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 21ae8f5546b2..025cdb1032f6 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -109,12 +109,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 3dc4cbb3c2c0..821ff0acfe17 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -110,12 +110,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) __tlb_remove_page(tlb, page); } +static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return __tlb_remove_page(tlb, page); +} + static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { return __tlb_remove_page(tlb, page); } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + return tlb_remove_page(tlb, page); +} + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 7b899a46a4cb..c6d667187608 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -112,6 +112,7 @@ struct mmu_gather { * that that we can adjust the range after the flush */ unsigned long addr; + int page_size; }; #define HAVE_GENERIC_MMU_GATHER @@ -120,7 +121,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long void tlb_flush_mmu(struct mmu_gather *tlb); void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end); -bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page); +extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, + int page_size); static inline void __tlb_adjust_range(struct mmu_gather *tlb, unsigned long address) @@ -145,23 +147,36 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb) } } +static inline void tlb_remove_page_size(struct mmu_gather *tlb, + struct page *page, int page_size) +{ + if (__tlb_remove_page_size(tlb, page, page_size)) { + tlb_flush_mmu(tlb); + tlb->page_size = page_size; + __tlb_adjust_range(tlb, tlb->addr); + __tlb_remove_page_size(tlb, page, page_size); + } +} + +static bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + return __tlb_remove_page_size(tlb, page, PAGE_SIZE); +} + /* tlb_remove_page * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when * required. */ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (__tlb_remove_page(tlb, page)) { - tlb_flush_mmu(tlb); - __tlb_adjust_range(tlb, tlb->addr); - __tlb_remove_page(tlb, page); - } + return tlb_remove_page_size(tlb, page, PAGE_SIZE); } static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, struct page *page) { /* active->nr should be zero when we call this */ VM_BUG_ON_PAGE(tlb->active->nr, page); + tlb->page_size = PAGE_SIZE; __tlb_adjust_range(tlb, tlb->addr); return __tlb_remove_page(tlb, page); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 343a2b7e57aa..23d1bf42fef1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1689,7 +1689,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); atomic_long_dec(&tlb->mm->nr_ptes); spin_unlock(ptl); - tlb_remove_page(tlb, page); + tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); } return 1; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 524c078ce67b..a9a8c313d133 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3250,7 +3250,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, page_remove_rmap(page, true); spin_unlock(ptl); - tlb_remove_page(tlb, page); + tlb_remove_page_size(tlb, page, huge_page_size(h)); /* * Bail out after unmapping reference page if supplied */ diff --git a/mm/memory.c b/mm/memory.c index 12f31501c323..a329149e1c54 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -233,6 +233,7 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif + tlb->page_size = 0; __tlb_reset_range(tlb); } @@ -294,12 +295,19 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e * When out of page slots we must call tlb_flush_mmu(). *returns true if the caller should flush. */ -bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); + if (!tlb->page_size) + tlb->page_size = page_size; + else { + if (page_size != tlb->page_size) + return true; + } + batch = tlb->active; if (batch->nr == batch->max) { if (!tlb_next_batch(tlb)) -- cgit v1.2.3-70-g09d2 From 99691addb42919251dcc082a70b7a11733dfcbcc Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:16 -0700 Subject: mm: remove pointless struct in struct page definition This patchset implements per kmemcg accounting of page tables (x86-only), pipe buffers, and unix socket buffers. Patches 1-3 are just cleanups that are not supposed to introduce any functional changes. Patches 4 and 5 move charge/uncharge to generic page allocator paths for the sake of accounting pipe and unix socket buffers. Patches 5-7 make x86 page tables, pipe buffers, and unix socket buffers accountable. This patch (of 8): ... to reduce indentation level thus leaving more space for comments. Link: http://lkml.kernel.org/r/f34ffe70fce2b0b9220856437f77972d67c14275.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 68 +++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 36 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 917f2b6a0cde..a50ad735d518 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -60,51 +60,47 @@ struct page { }; /* Second double word */ - struct { - union { - pgoff_t index; /* Our offset within mapping. */ - void *freelist; /* sl[aou]b first free object */ - /* page_deferred_list().prev -- second tail page */ - }; + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* sl[aou]b first free object */ + /* page_deferred_list().prev -- second tail page */ + }; - union { + union { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) - /* Used for cmpxchg_double in slub */ - unsigned long counters; + /* Used for cmpxchg_double in slub */ + unsigned long counters; #else - /* - * Keep _refcount separate from slub cmpxchg_double - * data. As the rest of the double word is protected by - * slab_lock but _refcount is not. - */ - unsigned counters; + /* + * Keep _refcount separate from slub cmpxchg_double data. + * As the rest of the double word is protected by slab_lock + * but _refcount is not. + */ + unsigned counters; #endif + struct { - struct { - - union { - /* - * Count of ptes mapped in mms, to show - * when page is mapped & limit reverse - * map searches. - */ - atomic_t _mapcount; - - struct { /* SLUB */ - unsigned inuse:16; - unsigned objects:15; - unsigned frozen:1; - }; - int units; /* SLOB */ - }; + union { /* - * Usage count, *USE WRAPPER FUNCTION* - * when manual accounting. See page_ref.h + * Count of ptes mapped in mms, to show when + * page is mapped & limit reverse map searches. */ - atomic_t _refcount; + atomic_t _mapcount; + + unsigned int active; /* SLAB */ + struct { /* SLUB */ + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + int units; /* SLOB */ }; - unsigned int active; /* SLAB */ + /* + * Usage count, *USE WRAPPER FUNCTION* when manual + * accounting. See page_ref.h + */ + atomic_t _refcount; }; }; -- cgit v1.2.3-70-g09d2 From 632c0a1affd861f81abdd136c886418571e19a51 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:18 -0700 Subject: mm: clean up non-standard page->_mapcount users - Add a proper comment to page->_mapcount. - Introduce a macro for generating helper functions. - Place all special page->_mapcount values next to each other so that readers can see all possible values and so we don't get duplicates. Link: http://lkml.kernel.org/r/502f49000e0b63e6c62e338fac6b420bf34fb526.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 5 ++++ include/linux/page-flags.h | 73 ++++++++++++++++++++-------------------------- scripts/tags.sh | 3 ++ 3 files changed, 40 insertions(+), 41 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index a50ad735d518..79472b22d23f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -85,6 +85,11 @@ struct page { /* * Count of ptes mapped in mms, to show when * page is mapped & limit reverse map searches. + * + * Extra information about page type may be + * stored here for pages that are never mapped, + * in which case the value MUST BE <= -2. + * See page-flags.h for more details. */ atomic_t _mapcount; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f36dbb3a3060..96084ee74ee8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -604,54 +604,45 @@ TESTPAGEFLAG_FALSE(DoubleMap) #endif /* - * PageBuddy() indicate that the page is free and in the buddy system - * (see mm/page_alloc.c). - * - * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to - * -2 so that an underflow of the page_mapcount() won't be mistaken - * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very - * efficiently by most CPU architectures. + * For pages that are never mapped to userspace, page->mapcount may be + * used for storing extra information about page type. Any value used + * for this purpose must be <= -2, but it's better start not too close + * to -2 so that an underflow of the page_mapcount() won't be mistaken + * for a special page. */ -#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) - -static inline int PageBuddy(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; +#define PAGE_MAPCOUNT_OPS(uname, lname) \ +static __always_inline int Page##uname(struct page *page) \ +{ \ + return atomic_read(&page->_mapcount) == \ + PAGE_##lname##_MAPCOUNT_VALUE; \ +} \ +static __always_inline void __SetPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \ + atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \ +} \ +static __always_inline void __ClearPage##uname(struct page *page) \ +{ \ + VM_BUG_ON_PAGE(!Page##uname(page), page); \ + atomic_set(&page->_mapcount, -1); \ } -static inline void __SetPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); -} +/* + * PageBuddy() indicate that the page is free and in the buddy system + * (see mm/page_alloc.c). + */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) +PAGE_MAPCOUNT_OPS(Buddy, BUDDY) -static inline void __ClearPageBuddy(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBuddy(page), page); - atomic_set(&page->_mapcount, -1); -} +/* + * PageBalloon() is set on pages that are on the balloon page list + * (see mm/balloon_compaction.c). + */ +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) +PAGE_MAPCOUNT_OPS(Balloon, BALLOON) extern bool is_free_buddy_page(struct page *page); -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) - -static inline int PageBalloon(struct page *page) -{ - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; -} - -static inline void __SetPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); -} - -static inline void __ClearPageBalloon(struct page *page) -{ - VM_BUG_ON_PAGE(!PageBalloon(page), page); - atomic_set(&page->_mapcount, -1); -} - __PAGEFLAG(Isolated, isolated, PF_ANY); /* diff --git a/scripts/tags.sh b/scripts/tags.sh index f72f48f638ae..ed7eef24ef89 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -185,6 +185,9 @@ regex_c=( '/\ Date: Tue, 26 Jul 2016 15:24:21 -0700 Subject: mm: memcontrol: cleanup kmem charge functions - Handle memcg_kmem_enabled check out to the caller. This reduces the number of function definitions making the code easier to follow. At the same time it doesn't result in code bloat, because all of these functions are used only in one or two places. - Move __GFP_ACCOUNT check to the caller as well so that one wouldn't have to dive deep into memcg implementation to see which allocations are charged and which are not. - Refresh comments. Link: http://lkml.kernel.org/r/52882a28b542c1979fd9a033b4dc8637fc347399.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 103 +++------------------------------------------ mm/memcontrol.c | 75 ++++++++++++++++++++++++--------- mm/page_alloc.c | 9 ++-- mm/slab.h | 16 +++++-- 4 files changed, 80 insertions(+), 123 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 56e6069d2452..71aff733a497 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -749,6 +749,13 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) } #endif +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); +void memcg_kmem_put_cache(struct kmem_cache *cachep); +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg); +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); +void memcg_kmem_uncharge(struct page *page, int order); + #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) extern struct static_key_false memcg_kmem_enabled_key; @@ -769,22 +776,6 @@ static inline bool memcg_kmem_enabled(void) return static_branch_unlikely(&memcg_kmem_enabled_key); } -/* - * In general, we'll do everything in our power to not incur in any overhead - * for non-memcg users for the kmem functions. Not even a function call, if we - * can avoid it. - * - * Therefore, we'll inline all those functions so that in the best case, we'll - * see that kmemcg is off for everybody and proceed quickly. If it is on, - * we'll still do most of the flag checking inline. We check a lot of - * conditions, but because they are pretty simple, they are expected to be - * fast. - */ -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg); -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); -void __memcg_kmem_uncharge(struct page *page, int order); - /* * helper for accessing a memcg's index. It will be used as an index in the * child cache array in kmem_cache, and also to derive its name. This function @@ -795,67 +786,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); -void __memcg_kmem_put_cache(struct kmem_cache *cachep); - -static inline bool __memcg_kmem_bypass(void) -{ - if (!memcg_kmem_enabled()) - return true; - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) - return true; - return false; -} - -/** - * memcg_kmem_charge: charge a kmem page - * @page: page to charge - * @gfp: reclaim mode - * @order: allocation order - * - * Returns 0 on success, an error code on failure. - */ -static __always_inline int memcg_kmem_charge(struct page *page, - gfp_t gfp, int order) -{ - if (__memcg_kmem_bypass()) - return 0; - if (!(gfp & __GFP_ACCOUNT)) - return 0; - return __memcg_kmem_charge(page, gfp, order); -} - -/** - * memcg_kmem_uncharge: uncharge a kmem page - * @page: page to uncharge - * @order: allocation order - */ -static __always_inline void memcg_kmem_uncharge(struct page *page, int order) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_uncharge(page, order); -} - -/** - * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation - * @cachep: the original global kmem cache - * - * All memory allocated from a per-memcg cache is charged to the owner memcg. - */ -static __always_inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - if (__memcg_kmem_bypass()) - return cachep; - return __memcg_kmem_get_cache(cachep, gfp); -} - -static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ - if (memcg_kmem_enabled()) - __memcg_kmem_put_cache(cachep); -} - /** * memcg_kmem_update_page_stat - update kmem page state statistics * @page: the page @@ -878,15 +808,6 @@ static inline bool memcg_kmem_enabled(void) return false; } -static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) -{ - return 0; -} - -static inline void memcg_kmem_uncharge(struct page *page, int order) -{ -} - static inline int memcg_cache_id(struct mem_cgroup *memcg) { return -1; @@ -900,16 +821,6 @@ static inline void memcg_put_cache_ids(void) { } -static inline struct kmem_cache * -memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) -{ - return cachep; -} - -static inline void memcg_kmem_put_cache(struct kmem_cache *cachep) -{ -} - static inline void memcg_kmem_update_page_stat(struct page *page, enum mem_cgroup_stat_index idx, int val) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index caea25a21c70..089ef3614155 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2273,20 +2273,30 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, current->memcg_kmem_skip_account = 0; } -/* +static inline bool memcg_kmem_bypass(void) +{ + if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) + return true; + return false; +} + +/** + * memcg_kmem_get_cache: select the correct per-memcg cache for allocation + * @cachep: the original global kmem cache + * * Return the kmem_cache we're supposed to use for a slab allocation. * We try to use the current memcg's version of the cache. * - * If the cache does not exist yet, if we are the first user of it, - * we either create it immediately, if possible, or create it asynchronously - * in a workqueue. - * In the latter case, we will let the current allocation go through with - * the original cache. + * If the cache does not exist yet, if we are the first user of it, we + * create it asynchronously in a workqueue and let the current allocation + * go through with the original cache. * - * Can't be called in interrupt context or from kernel threads. - * This function needs to be called with rcu_read_lock() held. + * This function takes a reference to the cache it returns to assure it + * won't get destroyed while we are working with it. Once the caller is + * done with it, memcg_kmem_put_cache() must be called to release the + * reference. */ -struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) +struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) { struct mem_cgroup *memcg; struct kmem_cache *memcg_cachep; @@ -2294,10 +2304,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) VM_BUG_ON(!is_root_cache(cachep)); - if (cachep->flags & SLAB_ACCOUNT) - gfp |= __GFP_ACCOUNT; - - if (!(gfp & __GFP_ACCOUNT)) + if (memcg_kmem_bypass()) return cachep; if (current->memcg_kmem_skip_account) @@ -2330,14 +2337,27 @@ out: return cachep; } -void __memcg_kmem_put_cache(struct kmem_cache *cachep) +/** + * memcg_kmem_put_cache: drop reference taken by memcg_kmem_get_cache + * @cachep: the cache returned by memcg_kmem_get_cache + */ +void memcg_kmem_put_cache(struct kmem_cache *cachep) { if (!is_root_cache(cachep)) css_put(&cachep->memcg_params.memcg->css); } -int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, - struct mem_cgroup *memcg) +/** + * memcg_kmem_charge: charge a kmem page + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * @memcg: memory cgroup to charge + * + * Returns 0 on success, an error code on failure. + */ +int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, + struct mem_cgroup *memcg) { unsigned int nr_pages = 1 << order; struct page_counter *counter; @@ -2358,19 +2378,34 @@ int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, return 0; } -int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) +/** + * memcg_kmem_charge: charge a kmem page to the current memory cgroup + * @page: page to charge + * @gfp: reclaim mode + * @order: allocation order + * + * Returns 0 on success, an error code on failure. + */ +int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) { struct mem_cgroup *memcg; int ret = 0; + if (memcg_kmem_bypass()) + return 0; + memcg = get_mem_cgroup_from_mm(current->mm); if (!mem_cgroup_is_root(memcg)) - ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg); + ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); css_put(&memcg->css); return ret; } - -void __memcg_kmem_uncharge(struct page *page, int order) +/** + * memcg_kmem_uncharge: uncharge a kmem page + * @page: page to uncharge + * @order: allocation order + */ +void memcg_kmem_uncharge(struct page *page, int order) { struct mem_cgroup *memcg = page->mem_cgroup; unsigned int nr_pages = 1 << order; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 13cf4c665321..de2491c42d4f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4009,7 +4009,8 @@ struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) struct page *page; page = alloc_pages(gfp_mask, order); - if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && + page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } @@ -4021,7 +4022,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) struct page *page; page = alloc_pages_node(nid, gfp_mask, order); - if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && + page && memcg_kmem_charge(page, gfp_mask, order) != 0) { __free_pages(page, order); page = NULL; } @@ -4034,7 +4036,8 @@ struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) */ void __free_kmem_pages(struct page *page, unsigned int order) { - memcg_kmem_uncharge(page, order); + if (memcg_kmem_enabled()) + memcg_kmem_uncharge(page, order); __free_pages(page, order); } diff --git a/mm/slab.h b/mm/slab.h index 5fa8b8f20eb1..f33980ab0406 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -254,8 +254,7 @@ static __always_inline int memcg_charge_slab(struct page *page, if (is_root_cache(s)) return 0; - ret = __memcg_kmem_charge_memcg(page, gfp, order, - s->memcg_params.memcg); + ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); if (ret) return ret; @@ -269,6 +268,9 @@ static __always_inline int memcg_charge_slab(struct page *page, static __always_inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { + if (!memcg_kmem_enabled()) + return; + memcg_kmem_update_page_stat(page, (s->flags & SLAB_RECLAIM_ACCOUNT) ? MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE, @@ -391,7 +393,11 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, if (should_failslab(s, flags)) return NULL; - return memcg_kmem_get_cache(s, flags); + if (memcg_kmem_enabled() && + ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) + return memcg_kmem_get_cache(s); + + return s; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, @@ -408,7 +414,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, s->flags, flags); kasan_slab_alloc(s, object, flags); } - memcg_kmem_put_cache(s); + + if (memcg_kmem_enabled()) + memcg_kmem_put_cache(s); } #ifndef CONFIG_SLOB -- cgit v1.2.3-70-g09d2 From 4949148ad433f6f11cf837978b2907092ec99f3a Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:24 -0700 Subject: mm: charge/uncharge kmemcg from generic page allocator paths Currently, to charge a non-slab allocation to kmemcg one has to use alloc_kmem_pages helper with __GFP_ACCOUNT flag. A page allocated with this helper should finally be freed using free_kmem_pages, otherwise it won't be uncharged. This API suits its current users fine, but it turns out to be impossible to use along with page reference counting, i.e. when an allocation is supposed to be freed with put_page, as it is the case with pipe or unix socket buffers. To overcome this limitation, this patch moves charging/uncharging to generic page allocator paths, i.e. to __alloc_pages_nodemask and free_pages_prepare, and zaps alloc/free_kmem_pages helpers. This way, one can use any of the available page allocation functions to get the allocated page charged to kmemcg - it's enough to pass __GFP_ACCOUNT, just like in case of kmalloc and friends. A charged page will be automatically uncharged on free. To make it possible, we need to mark pages charged to kmemcg somehow. To avoid introducing a new page flag, we make use of page->_mapcount for marking such pages. Since pages charged to kmemcg are not supposed to be mapped to userspace, it should work just fine. There are other (ab)users of page->_mapcount - buddy and balloon pages - but we don't conflict with them. In case kmemcg is compiled out or not used at runtime, this patch introduces no overhead to generic page allocator paths. If kmemcg is used, it will be plus one gfp flags check on alloc and plus one page->_mapcount check on free, which shouldn't hurt performance, because the data accessed are hot. Link: http://lkml.kernel.org/r/a9736d856f895bcb465d9f257b54efe32eda6f99.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 10 +------ include/linux/page-flags.h | 7 +++++ kernel/fork.c | 6 ++--- mm/page_alloc.c | 66 +++++++++------------------------------------- mm/slab_common.c | 2 +- mm/slub.c | 6 ++--- mm/vmalloc.c | 6 ++--- 7 files changed, 31 insertions(+), 72 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 570383a41853..c29e9d347bc6 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -78,8 +78,7 @@ struct vm_area_struct; * __GFP_THISNODE forces the allocation to be satisified from the requested * node with no fallbacks or placement policy enforcements. * - * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant - * to kmem allocations). + * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) @@ -486,10 +485,6 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) -extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order); -extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, - unsigned int order); - extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); @@ -513,9 +508,6 @@ extern void *__alloc_page_frag(struct page_frag_cache *nc, unsigned int fragsz, gfp_t gfp_mask); extern void __free_page_frag(void *addr); -extern void __free_kmem_pages(struct page *page, unsigned int order); -extern void free_kmem_pages(unsigned long addr, unsigned int order); - #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 96084ee74ee8..7c8e82ac2eb7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -641,6 +641,13 @@ PAGE_MAPCOUNT_OPS(Buddy, BUDDY) #define PAGE_BALLOON_MAPCOUNT_VALUE (-256) PAGE_MAPCOUNT_OPS(Balloon, BALLOON) +/* + * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on + * pages allocated with __GFP_ACCOUNT. It gets cleared on page free. + */ +#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512) +PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG) + extern bool is_free_buddy_page(struct page *page); __PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/kernel/fork.c b/kernel/fork.c index 4a7ec0c6c88c..de21f25e0d2c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -162,8 +162,8 @@ void __weak arch_release_thread_stack(unsigned long *stack) static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) { - struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, - THREAD_SIZE_ORDER); + struct page *page = alloc_pages_node(node, THREADINFO_GFP, + THREAD_SIZE_ORDER); if (page) memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, @@ -178,7 +178,7 @@ static inline void free_thread_stack(unsigned long *stack) memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, -(1 << THREAD_SIZE_ORDER)); - __free_kmem_pages(page, THREAD_SIZE_ORDER); + __free_pages(page, THREAD_SIZE_ORDER); } # else static struct kmem_cache *thread_stack_cache; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index de2491c42d4f..7023a31edc5c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -1018,6 +1019,10 @@ static __always_inline bool free_pages_prepare(struct page *page, } if (PageMappingFlags(page)) page->mapping = NULL; + if (memcg_kmem_enabled() && PageKmemcg(page)) { + memcg_kmem_uncharge(page, order); + __ClearPageKmemcg(page); + } if (check_free) bad += free_pages_check(page); if (bad) @@ -3841,6 +3846,14 @@ no_zone: } out: + if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page) { + if (unlikely(memcg_kmem_charge(page, gfp_mask, order))) { + __free_pages(page, order); + page = NULL; + } else + __SetPageKmemcg(page); + } + if (kmemcheck_enabled && page) kmemcheck_pagealloc_alloc(page, order, gfp_mask); @@ -3996,59 +4009,6 @@ void __free_page_frag(void *addr) } EXPORT_SYMBOL(__free_page_frag); -/* - * alloc_kmem_pages charges newly allocated pages to the kmem resource counter - * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is - * equivalent to alloc_pages. - * - * It should be used when the caller would like to use kmalloc, but since the - * allocation is large, it has to fall back to the page allocator. - */ -struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) -{ - struct page *page; - - page = alloc_pages(gfp_mask, order); - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && - page && memcg_kmem_charge(page, gfp_mask, order) != 0) { - __free_pages(page, order); - page = NULL; - } - return page; -} - -struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) -{ - struct page *page; - - page = alloc_pages_node(nid, gfp_mask, order); - if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && - page && memcg_kmem_charge(page, gfp_mask, order) != 0) { - __free_pages(page, order); - page = NULL; - } - return page; -} - -/* - * __free_kmem_pages and free_kmem_pages will free pages allocated with - * alloc_kmem_pages. - */ -void __free_kmem_pages(struct page *page, unsigned int order) -{ - if (memcg_kmem_enabled()) - memcg_kmem_uncharge(page, order); - __free_pages(page, order); -} - -void free_kmem_pages(unsigned long addr, unsigned int order) -{ - if (addr != 0) { - VM_BUG_ON(!virt_addr_valid((void *)addr)); - __free_kmem_pages(virt_to_page((void *)addr), order); - } -} - static void *make_alloc_exact(unsigned long addr, unsigned int order, size_t size) { diff --git a/mm/slab_common.c b/mm/slab_common.c index da88c1588752..71f0b28a1bec 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1012,7 +1012,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) struct page *page; flags |= __GFP_COMP; - page = alloc_kmem_pages(flags, order); + page = alloc_pages(flags, order); ret = page ? page_address(page) : NULL; kmemleak_alloc(ret, size, 1, flags); kasan_kmalloc_large(ret, size, flags); diff --git a/mm/slub.c b/mm/slub.c index c0cfa2722539..f9da8716b8b3 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2977,7 +2977,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(object); - __free_kmem_pages(page, compound_order(page)); + __free_pages(page, compound_order(page)); p[size] = NULL; /* mark object processed */ return size; } @@ -3693,7 +3693,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node) void *ptr = NULL; flags |= __GFP_COMP | __GFP_NOTRACK; - page = alloc_kmem_pages_node(node, flags, get_order(size)); + page = alloc_pages_node(node, flags, get_order(size)); if (page) ptr = page_address(page); @@ -3774,7 +3774,7 @@ void kfree(const void *x) if (unlikely(!PageSlab(page))) { BUG_ON(!PageCompound(page)); kfree_hook(x); - __free_kmem_pages(page, compound_order(page)); + __free_pages(page, compound_order(page)); return; } slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index e11475cdeb7a..91f44e78c516 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1501,7 +1501,7 @@ static void __vunmap(const void *addr, int deallocate_pages) struct page *page = area->pages[i]; BUG_ON(!page); - __free_kmem_pages(page, 0); + __free_pages(page, 0); } kvfree(area->pages); @@ -1629,9 +1629,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, struct page *page; if (node == NUMA_NO_NODE) - page = alloc_kmem_pages(alloc_mask, order); + page = alloc_pages(alloc_mask, order); else - page = alloc_kmem_pages_node(node, alloc_mask, order); + page = alloc_pages_node(node, alloc_mask, order); if (unlikely(!page)) { /* Successfully allocated i pages, free them in __vunmap() */ -- cgit v1.2.3-70-g09d2 From 5e8d35f849b1969b900695ae191326bfacf6bfc6 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:27 -0700 Subject: mm: memcontrol: teach uncharge_list to deal with kmem pages Page table pages are batched-freed in release_pages on most architectures. If we want to charge them to kmemcg (this is what is done later in this series), we need to teach mem_cgroup_uncharge_list to handle kmem pages. Link: http://lkml.kernel.org/r/18d5c09e97f80074ed25b97a7d0f32b95d875717.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 089ef3614155..ff53e348c4bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5505,15 +5505,18 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, unsigned long nr_anon, unsigned long nr_file, - unsigned long nr_huge, struct page *dummy_page) + unsigned long nr_huge, unsigned long nr_kmem, + struct page *dummy_page) { - unsigned long nr_pages = nr_anon + nr_file; + unsigned long nr_pages = nr_anon + nr_file + nr_kmem; unsigned long flags; if (!mem_cgroup_is_root(memcg)) { page_counter_uncharge(&memcg->memory, nr_pages); if (do_memsw_account()) page_counter_uncharge(&memcg->memsw, nr_pages); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) + page_counter_uncharge(&memcg->kmem, nr_kmem); memcg_oom_recover(memcg); } @@ -5536,6 +5539,7 @@ static void uncharge_list(struct list_head *page_list) unsigned long nr_anon = 0; unsigned long nr_file = 0; unsigned long nr_huge = 0; + unsigned long nr_kmem = 0; unsigned long pgpgout = 0; struct list_head *next; struct page *page; @@ -5546,8 +5550,6 @@ static void uncharge_list(struct list_head *page_list) */ next = page_list->next; do { - unsigned int nr_pages = 1; - page = list_entry(next, struct page, lru); next = page->lru.next; @@ -5566,31 +5568,35 @@ static void uncharge_list(struct list_head *page_list) if (memcg != page->mem_cgroup) { if (memcg) { uncharge_batch(memcg, pgpgout, nr_anon, nr_file, - nr_huge, page); - pgpgout = nr_anon = nr_file = nr_huge = 0; + nr_huge, nr_kmem, page); + pgpgout = nr_anon = nr_file = + nr_huge = nr_kmem = 0; } memcg = page->mem_cgroup; } - if (PageTransHuge(page)) { - nr_pages <<= compound_order(page); - VM_BUG_ON_PAGE(!PageTransHuge(page), page); - nr_huge += nr_pages; - } + if (!PageKmemcg(page)) { + unsigned int nr_pages = 1; - if (PageAnon(page)) - nr_anon += nr_pages; - else - nr_file += nr_pages; + if (PageTransHuge(page)) { + nr_pages <<= compound_order(page); + VM_BUG_ON_PAGE(!PageTransHuge(page), page); + nr_huge += nr_pages; + } + if (PageAnon(page)) + nr_anon += nr_pages; + else + nr_file += nr_pages; + pgpgout++; + } else + nr_kmem += 1 << compound_order(page); page->mem_cgroup = NULL; - - pgpgout++; } while (next != page_list); if (memcg) uncharge_batch(memcg, pgpgout, nr_anon, nr_file, - nr_huge, page); + nr_huge, nr_kmem, page); } /** -- cgit v1.2.3-70-g09d2 From 3e79ec7ddc33e5c69c20ce7f768d0e5c8b824f69 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:30 -0700 Subject: arch: x86: charge page tables to kmemcg Page tables can bite a relatively big chunk off system memory and their allocations are easy to trigger from userspace, so they should be accounted to kmemcg. This patch marks page table allocations as __GFP_ACCOUNT for x86. Note we must not charge allocations of kernel page tables, because they can be shared among processes from different cgroups so accounting them to a particular one can pin other cgroups for indefinitely long. So we clear __GFP_ACCOUNT flag if a page table is allocated for the kernel. Link: http://lkml.kernel.org/r/7d5c54f6a2bcbe76f03171689440003d87e6c742.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/pgalloc.h | 12 ++++++++++-- arch/x86/mm/pgtable.c | 10 +++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 574c23cf761a..b6d425999f99 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -81,7 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { struct page *page; - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + page = alloc_pages(gfp, 0); if (!page) return NULL; if (!pgtable_pmd_page_ctor(page)) { @@ -125,7 +129,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { - return (pud_t *)get_zeroed_page(GFP_KERNEL); + gfp_t gfp = GFP_KERNEL_ACCOUNT; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; + return (pud_t *)get_zeroed_page(gfp); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index aa0ff4b02a96..3feec5af4e67 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -6,7 +6,7 @@ #include #include -#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO +#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO) #ifdef CONFIG_HIGHPTE #define PGALLOC_USER_GFP __GFP_HIGHMEM @@ -18,7 +18,7 @@ gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - return (pte_t *)__get_free_page(PGALLOC_GFP); + return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); } pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) @@ -207,9 +207,13 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) { int i; bool failed = false; + gfp_t gfp = PGALLOC_GFP; + + if (mm == &init_mm) + gfp &= ~__GFP_ACCOUNT; for(i = 0; i < PREALLOCATED_PMDS; i++) { - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); + pmd_t *pmd = (pmd_t *)__get_free_page(gfp); if (!pmd) failed = true; if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) { -- cgit v1.2.3-70-g09d2 From d86133bd396f5e4a8d5629c1b853b574de4faf32 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:33 -0700 Subject: pipe: account to kmemcg Pipes can consume a significant amount of system memory, hence they should be accounted to kmemcg. This patch marks pipe_inode_info and anonymous pipe buffer page allocations as __GFP_ACCOUNT so that they would be charged to kmemcg. Note, since a pipe buffer page can be "stolen" and get reused for other purposes, including mapping to userspace, we clear PageKmemcg thus resetting page->_mapcount and uncharge it in anon_pipe_buf_steal, which is introduced by this patch. A note regarding anon_pipe_buf_steal implementation. We allow to steal the page if its ref count equals 1. It looks racy, but it is correct for anonymous pipe buffer pages, because: - We lock out all other pipe users, because ->steal is called with pipe_lock held, so the page can't be spliced to another pipe from under us. - The page is not on LRU and it never was. - Thus a parallel thread can access it only by PFN. Although this is quite possible (e.g. see page_idle_get_page and balloon_page_isolate) this is not dangerous, because all such functions do is increase page ref count, check if the page is the one they are looking for, and decrease ref count if it isn't. Since our page is clean except for PageKmemcg mark, which doesn't conflict with other _mapcount users, the worst that can happen is we see page_count > 2 due to a transient ref, in which case we false-positively abort ->steal, which is still fine, because ->steal is not guaranteed to succeed. Link: http://lkml.kernel.org/r/20160527150313.GD26059@esperanza Signed-off-by: Vladimir Davydov Cc: Alexander Viro Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/pipe.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/fs/pipe.c b/fs/pipe.c index 0d3f5165cb0b..4b32928f5426 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -137,6 +138,22 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, put_page(page); } +static int anon_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + struct page *page = buf->page; + + if (page_count(page) == 1) { + if (memcg_kmem_enabled()) { + memcg_kmem_uncharge(page, 0); + __ClearPageKmemcg(page); + } + __SetPageLocked(page); + return 0; + } + return 1; +} + /** * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer * @pipe: the pipe that the buffer belongs to @@ -219,7 +236,7 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, - .steal = generic_pipe_buf_steal, + .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; @@ -227,7 +244,7 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = { .can_merge = 0, .confirm = generic_pipe_buf_confirm, .release = anon_pipe_buf_release, - .steal = generic_pipe_buf_steal, + .steal = anon_pipe_buf_steal, .get = generic_pipe_buf_get, }; @@ -405,7 +422,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) int copied; if (!page) { - page = alloc_page(GFP_HIGHUSER); + page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT); if (unlikely(!page)) { ret = ret ? : -ENOMEM; break; @@ -611,7 +628,7 @@ struct pipe_inode_info *alloc_pipe_info(void) { struct pipe_inode_info *pipe; - pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); + pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT); if (pipe) { unsigned long pipe_bufs = PIPE_DEF_BUFFERS; struct user_struct *user = get_current_user(); @@ -619,7 +636,9 @@ struct pipe_inode_info *alloc_pipe_info(void) if (!too_many_pipe_buffers_hard(user)) { if (too_many_pipe_buffers_soft(user)) pipe_bufs = 1; - pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL); + pipe->bufs = kcalloc(pipe_bufs, + sizeof(struct pipe_buffer), + GFP_KERNEL_ACCOUNT); } if (pipe->bufs) { @@ -1010,7 +1029,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) if (nr_pages < pipe->nrbufs) return -EBUSY; - bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN); + bufs = kcalloc(nr_pages, sizeof(*bufs), + GFP_KERNEL_ACCOUNT | __GFP_NOWARN); if (unlikely(!bufs)) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 3aa9799e13645fda605e1c68831f2d4256a38537 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Tue, 26 Jul 2016 15:24:36 -0700 Subject: af_unix: charge buffers to kmemcg Unix sockets can consume a significant amount of system memory, hence they should be accounted to kmemcg. Since unix socket buffers are always allocated from process context, all we need to do to charge them to kmemcg is set __GFP_ACCOUNT in sock->sk_allocation mask. Eric asked: > 1) What happens when a buffer, allocated from socket lands in a > different socket , maybe owned by another user/process. > > Who owns it now, in term of kmemcg accounting ? We never move memcg charges. E.g. if two processes from different cgroups are sharing a memory region, each page will be charged to the process which touched it first. Or if two processes are working with the same directory tree, inodes and dentries will be charged to the first user. The same is fair for unix socket buffers - they will be charged to the sender. > 2) Has performance impact been evaluated ? I ran netperf STREAM_STREAM with default options in a kmemcg on a 4 core x2 HT box. The results are below: # clients bandwidth (10^6bits/sec) base patched 1 67643 +- 725 64874 +- 353 - 4.0 % 4 193585 +- 2516 186715 +- 1460 - 3.5 % 8 194820 +- 377 187443 +- 1229 - 3.7 % So the accounting doesn't come for free - it takes ~4% of performance. I believe we could optimize it by using per cpu batching not only on charge, but also on uncharge in memcg core, but that's beyond the scope of this patch set - I'll take a look at this later. Anyway, if performance impact is found to be unacceptable, it is always possible to disable kmem accounting at boot time (cgroup.memory=nokmem) or not use memory cgroups at runtime at all (thanks to jump labels there'll be no overhead even if they are compiled in). Link: http://lkml.kernel.org/r/fcfe6cae27a59fbc5e40145664b3cf085a560c68.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov Cc: "David S. Miller" Cc: Johannes Weiner Cc: Michal Hocko Cc: Eric Dumazet Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- net/unix/af_unix.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 735362c26c8e..f1dffe84f0d5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -769,6 +769,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) lockdep_set_class(&sk->sk_receive_queue.lock, &af_unix_sk_receive_queue_lock_key); + sk->sk_allocation = GFP_KERNEL_ACCOUNT; sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; sk->sk_destruct = unix_sock_destructor; -- cgit v1.2.3-70-g09d2 From fbe84a09da746f781553051bb3dbc63f7b0a5162 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 26 Jul 2016 15:24:39 -0700 Subject: mm,oom: remove unused argument from oom_scan_process_thread(). oom_scan_process_thread() does not use totalpages argument. oom_badness() uses it. Link: http://lkml.kernel.org/r/1463796041-7889-1-git-send-email-penguin-kernel@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/oom.h | 2 +- mm/memcontrol.c | 2 +- mm/oom_kill.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/oom.h b/include/linux/oom.h index cbc24a5fe28d..606137b3b778 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -92,7 +92,7 @@ extern void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint); extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, - struct task_struct *task, unsigned long totalpages); + struct task_struct *task); extern bool out_of_memory(struct oom_control *oc); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ff53e348c4bb..1a1a3093a5c9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1290,7 +1290,7 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, css_task_iter_start(&iter->css, &it); while ((task = css_task_iter_next(&it))) { - switch (oom_scan_process_thread(&oc, task, totalpages)) { + switch (oom_scan_process_thread(&oc, task)) { case OOM_SCAN_SELECT: if (chosen) put_task_struct(chosen); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index a376f1ebdad5..c11f8bdd0c12 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -274,7 +274,7 @@ static enum oom_constraint constrained_alloc(struct oom_control *oc, #endif enum oom_scan_t oom_scan_process_thread(struct oom_control *oc, - struct task_struct *task, unsigned long totalpages) + struct task_struct *task) { if (oom_unkillable_task(task, NULL, oc->nodemask)) return OOM_SCAN_CONTINUE; @@ -311,7 +311,7 @@ static struct task_struct *select_bad_process(struct oom_control *oc, for_each_process(p) { unsigned int points; - switch (oom_scan_process_thread(oc, p, totalpages)) { + switch (oom_scan_process_thread(oc, p)) { case OOM_SCAN_SELECT: chosen = p; chosen_points = ULONG_MAX; -- cgit v1.2.3-70-g09d2 From 8ea1d2a1985a7ae096edf5850a31d844ad1b8e97 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Tue, 26 Jul 2016 15:24:42 -0700 Subject: mm, frontswap: convert frontswap_enabled to static key I have noticed that frontswap.h first declares "frontswap_enabled" as extern bool variable, and then overrides it with "#define frontswap_enabled (1)" for CONFIG_FRONTSWAP=Y or (0) when disabled. The bool variable isn't actually instantiated anywhere. This all looks like an unfinished attempt to make frontswap_enabled reflect whether a backend is instantiated. But in the current state, all frontswap hooks call unconditionally into frontswap.c just to check if frontswap_ops is non-NULL. This should at least be checked inline, but we can further eliminate the overhead when CONFIG_FRONTSWAP is enabled and no backend registered, using a static key that is initially disabled, and gets enabled only upon first backend registration. Thus, checks for "frontswap_enabled" are replaced with "frontswap_enabled()" wrapping the static key check. There are two exceptions: - xen's selfballoon_process() was testing frontswap_enabled in code guarded by #ifdef CONFIG_FRONTSWAP, which was effectively always true when reachable. The patch just removes this check. Using frontswap_enabled() does not sound correct here, as this can be true even without xen's own backend being registered. - in SYSCALL_DEFINE2(swapon), change the check to IS_ENABLED(CONFIG_FRONTSWAP) as it seems the bitmap allocation cannot currently be postponed until a backend is registered. This means that frontswap will still have some memory overhead by being configured, but without a backend. After the patch, we can expect that some functions in frontswap.c are called only when frontswap_ops is non-NULL. Change the checks there to VM_BUG_ONs. While at it, convert other BUG_ONs to VM_BUG_ONs as frontswap has been stable for some time. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1463152235-9717-1-git-send-email-vbabka@suse.cz Signed-off-by: Vlastimil Babka Cc: Konrad Rzeszutek Wilk Cc: Boris Ostrovsky Cc: David Vrabel Cc: Juergen Gross Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/xen/xen-selfballoon.c | 4 ++-- include/linux/frontswap.h | 34 ++++++++++++++++++++-------------- mm/frontswap.c | 35 +++++++++++++++-------------------- mm/swapfile.c | 2 +- 4 files changed, 38 insertions(+), 37 deletions(-) diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 53a085fca00c..66620713242a 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c @@ -195,7 +195,7 @@ static void selfballoon_process(struct work_struct *work) MB2PAGES(selfballoon_reserved_mb); #ifdef CONFIG_FRONTSWAP /* allow space for frontswap pages to be repatriated */ - if (frontswap_selfshrinking && frontswap_enabled) + if (frontswap_selfshrinking) goal_pages += frontswap_curr_pages(); #endif if (cur_pages > goal_pages) @@ -230,7 +230,7 @@ static void selfballoon_process(struct work_struct *work) reset_timer = true; } #ifdef CONFIG_FRONTSWAP - if (frontswap_selfshrinking && frontswap_enabled) { + if (frontswap_selfshrinking) { frontswap_selfshrink(); reset_timer = true; } diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index e65ef959546c..c46d2aa16d81 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -4,6 +4,7 @@ #include #include #include +#include struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ @@ -14,7 +15,6 @@ struct frontswap_ops { struct frontswap_ops *next; /* private pointer to next ops */ }; -extern bool frontswap_enabled; extern void frontswap_register_ops(struct frontswap_ops *ops); extern void frontswap_shrink(unsigned long); extern unsigned long frontswap_curr_pages(void); @@ -30,7 +30,12 @@ extern void __frontswap_invalidate_page(unsigned, pgoff_t); extern void __frontswap_invalidate_area(unsigned); #ifdef CONFIG_FRONTSWAP -#define frontswap_enabled (1) +extern struct static_key_false frontswap_enabled_key; + +static inline bool frontswap_enabled(void) +{ + return static_branch_unlikely(&frontswap_enabled_key); +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -50,7 +55,10 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) #else /* all inline routines become no-ops and all externs are ignored */ -#define frontswap_enabled (0) +static inline bool frontswap_enabled(void) +{ + return false; +} static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset) { @@ -70,37 +78,35 @@ static inline unsigned long *frontswap_map_get(struct swap_info_struct *p) static inline int frontswap_store(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_store(page); - if (frontswap_enabled) - ret = __frontswap_store(page); - return ret; + return -1; } static inline int frontswap_load(struct page *page) { - int ret = -1; + if (frontswap_enabled()) + return __frontswap_load(page); - if (frontswap_enabled) - ret = __frontswap_load(page); - return ret; + return -1; } static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_page(type, offset); } static inline void frontswap_invalidate_area(unsigned type) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_invalidate_area(type); } static inline void frontswap_init(unsigned type, unsigned long *map) { - if (frontswap_enabled) + if (frontswap_enabled()) __frontswap_init(type, map); } diff --git a/mm/frontswap.c b/mm/frontswap.c index 27a9924caf61..fec8b5044040 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -20,6 +20,8 @@ #include #include +DEFINE_STATIC_KEY_FALSE(frontswap_enabled_key); + /* * frontswap_ops are added by frontswap_register_ops, and provide the * frontswap "backend" implementation functions. Multiple implementations @@ -139,6 +141,8 @@ void frontswap_register_ops(struct frontswap_ops *ops) ops->next = frontswap_ops; } while (cmpxchg(&frontswap_ops, ops->next, ops) != ops->next); + static_branch_inc(&frontswap_enabled_key); + spin_lock(&swap_lock); plist_for_each_entry(si, &swap_active_head, list) { if (si->frontswap_map) @@ -189,7 +193,7 @@ void __frontswap_init(unsigned type, unsigned long *map) struct swap_info_struct *sis = swap_info[type]; struct frontswap_ops *ops; - BUG_ON(sis == NULL); + VM_BUG_ON(sis == NULL); /* * p->frontswap is a bitmap that we MUST have to figure out which page @@ -248,15 +252,9 @@ int __frontswap_store(struct page *page) pgoff_t offset = swp_offset(entry); struct frontswap_ops *ops; - /* - * Return if no backend registed. - * Don't need to inc frontswap_failed_stores here. - */ - if (!frontswap_ops) - return -1; - - BUG_ON(!PageLocked(page)); - BUG_ON(sis == NULL); + VM_BUG_ON(!frontswap_ops); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(sis == NULL); /* * If a dup, we must remove the old page first; we can't leave the @@ -303,11 +301,10 @@ int __frontswap_load(struct page *page) pgoff_t offset = swp_offset(entry); struct frontswap_ops *ops; - if (!frontswap_ops) - return -1; + VM_BUG_ON(!frontswap_ops); + VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON(sis == NULL); - BUG_ON(!PageLocked(page)); - BUG_ON(sis == NULL); if (!__frontswap_test(sis, offset)) return -1; @@ -337,10 +334,9 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset) struct swap_info_struct *sis = swap_info[type]; struct frontswap_ops *ops; - if (!frontswap_ops) - return; + VM_BUG_ON(!frontswap_ops); + VM_BUG_ON(sis == NULL); - BUG_ON(sis == NULL); if (!__frontswap_test(sis, offset)) return; @@ -360,10 +356,9 @@ void __frontswap_invalidate_area(unsigned type) struct swap_info_struct *sis = swap_info[type]; struct frontswap_ops *ops; - if (!frontswap_ops) - return; + VM_BUG_ON(!frontswap_ops); + VM_BUG_ON(sis == NULL); - BUG_ON(sis == NULL); if (sis->frontswap_map == NULL) return; diff --git a/mm/swapfile.c b/mm/swapfile.c index 031713ab40ce..78cfa292a29a 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2493,7 +2493,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) goto bad_swap; } /* frontswap enabled? set up bit-per-page map for frontswap */ - if (frontswap_enabled) + if (IS_ENABLED(CONFIG_FRONTSWAP)) frontswap_map = vzalloc(BITS_TO_LONGS(maxpages) * sizeof(long)); if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { -- cgit v1.2.3-70-g09d2 From 91537fee001361b1a4d485f1af65d8efa03d49b5 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:24:45 -0700 Subject: mm: add NR_ZSMALLOC to vmstat zram is very popular for some of the embedded world (e.g., TV, mobile phones). On those system, zsmalloc's consumed memory size is never trivial (one of example from real product system, total memory: 800M, zsmalloc consumed: 150M), so we have used this out of tree patch to monitor system memory behavior via /proc/vmstat. With zsmalloc in vmstat, it helps in tracking down system behavior due to memory usage. [minchan@kernel.org: zsmalloc: follow up zsmalloc vmstat] Link: http://lkml.kernel.org/r/20160607091737.GC23435@bbox [akpm@linux-foundation.org: fix build with CONFIG_ZSMALLOC=m] Link: http://lkml.kernel.org/r/1464919731-13255-1-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim Cc: Sangseok Lee Cc: Chanho Min Cc: Chan Gyun Jeong Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 3 +++ mm/vmstat.c | 4 +++- mm/zsmalloc.c | 7 ++++++- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3388ccbab7d6..3d7ab30d4940 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -140,6 +140,9 @@ enum zone_stat_item { NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ NR_PAGES_SCANNED, /* pages scanned since last reclaim */ +#if IS_ENABLED(CONFIG_ZSMALLOC) + NR_ZSPAGES, /* allocated in zsmalloc */ +#endif #ifdef CONFIG_NUMA NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ diff --git a/mm/vmstat.c b/mm/vmstat.c index cb2a67bb4158..2a0f26bdae39 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -718,7 +718,9 @@ const char * const vmstat_text[] = { "nr_dirtied", "nr_written", "nr_pages_scanned", - +#if IS_ENABLED(CONFIG_ZSMALLOC) + "nr_zspages", +#endif #ifdef CONFIG_NUMA "numa_hit", "numa_miss", diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 6b6986a02aa0..e4e8081b160b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1007,6 +1007,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, next = get_next_page(page); reset_page(page); unlock_page(page); + dec_zone_page_state(page, NR_ZSPAGES); put_page(page); page = next; } while (page != NULL); @@ -1137,11 +1138,15 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, page = alloc_page(gfp); if (!page) { - while (--i >= 0) + while (--i >= 0) { + dec_zone_page_state(pages[i], NR_ZSPAGES); __free_page(pages[i]); + } cache_free_zspage(pool, zspage); return NULL; } + + inc_zone_page_state(page, NR_ZSPAGES); pages[i] = page; } -- cgit v1.2.3-70-g09d2 From ba6c19fd113a3965f8cf4c183a813d528008d03e Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Tue, 26 Jul 2016 15:24:47 -0700 Subject: include/linux/memblock.h: Clean up code for several trivial details Correct the function parameters alignment, since original code already use both tabs and white spaces together for the incorrect parameters alignment functions. If one line can hold one statement within 80 columns, let it in one line (original code did not consider about the tabs/spaces for 2nd line when a statement is separated into 2 lines). Try to let '' aligned within one macro, since all related lines are short enough. Remove useless statement "idx = 0;", and always assign rgn within the 'for' statement. Link: http://lkml.kernel.org/r/1464904899-1714-1-git-send-email-chengang@emindsoft.com.cn Signed-off-by: Chen Gang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memblock.h | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 3106ac1c895e..6c14b6179727 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -73,8 +73,8 @@ extern bool movable_node_enabled; if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, - phys_addr_t start, phys_addr_t end, - int nid, ulong flags); + phys_addr_t start, phys_addr_t end, + int nid, ulong flags); phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align); phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); @@ -110,7 +110,7 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, phys_addr_t *out_end, int *out_nid); void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, - phys_addr_t *out_end); + phys_addr_t *out_end); /** * for_each_mem_range - iterate through memblock areas from type_a and not @@ -148,7 +148,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, p_start, p_end, p_nid) \ for (i = (u64)ULLONG_MAX, \ __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ - p_start, p_end, p_nid); \ + p_start, p_end, p_nid); \ i != (u64)ULLONG_MAX; \ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) @@ -163,8 +163,7 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, * is initialized. */ #define for_each_reserved_mem_region(i, p_start, p_end) \ - for (i = 0UL, \ - __next_reserved_mem_region(&i, p_start, p_end); \ + for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ i != (u64)ULLONG_MAX; \ __next_reserved_mem_region(&i, p_start, p_end)) @@ -403,15 +402,14 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo } #define for_each_memblock(memblock_type, region) \ - for (region = memblock.memblock_type.regions; \ + for (region = memblock.memblock_type.regions; \ region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ region++) #define for_each_memblock_type(memblock_type, rgn) \ - idx = 0; \ - rgn = &memblock_type->regions[idx]; \ - for (idx = 0; idx < memblock_type->cnt; \ - idx++,rgn = &memblock_type->regions[idx]) + for (idx = 0, rgn = &memblock_type->regions[0]; \ + idx < memblock_type->cnt; \ + idx++, rgn = &memblock_type->regions[idx]) #ifdef CONFIG_MEMTEST extern void early_memtest(phys_addr_t start, phys_addr_t end); -- cgit v1.2.3-70-g09d2 From e5e3f4c4f0e95ecbad2f8d2f4f6a29bb8a90226b Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 26 Jul 2016 15:24:50 -0700 Subject: mm, oom_reaper: make sure that mmput_async is called only when memory was reaped Tetsuo is worried that mmput_async might still lead to a premature new oom victim selection due to the following race: __oom_reap_task exit_mm find_lock_task_mm atomic_inc(mm->mm_users) # = 2 task_unlock task_lock task->mm = NULL up_read(&mm->mmap_sem) < somebody write locks mmap_sem > task_unlock mmput atomic_dec_and_test # = 1 exit_oom_victim down_read_trylock # failed - no reclaim mmput_async # Takes unpredictable amount of time < new OOM situation > the final __mmput will be executed in the delayed context which might happen far in the future. Such a race is highly unlikely because the write holder of mmap_sem would have to be an external task (all direct holders are already killed or exiting) and it usually have to pin mm_users in order to do anything reasonable. We can, however, make sure that the mmput_async is only called when we do not back off and reap some memory. That would reduce the impact of the delayed __mmput because the real content would be already freed. Pin mm_count to keep it alive after we drop task_lock and before we try to get mmap_sem. If the mmap_sem succeeds we can try to grab mm_users reference and then go on with unmapping the address space. It is not clear whether this race is possible at all but it is better to be more robust and do not pin mm_users unless we are sure we are actually doing some real work during __oom_reap_task. Link: http://lkml.kernel.org/r/1465306987-30297-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko Reported-by: Tetsuo Handa Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/oom_kill.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c11f8bdd0c12..d4a929d79470 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -452,7 +452,7 @@ static bool __oom_reap_task(struct task_struct *tsk) * We have to make sure to not race with the victim exit path * and cause premature new oom victim selection: * __oom_reap_task exit_mm - * atomic_inc_not_zero + * mmget_not_zero * mmput * atomic_dec_and_test * exit_oom_victim @@ -474,12 +474,22 @@ static bool __oom_reap_task(struct task_struct *tsk) if (!p) goto unlock_oom; mm = p->mm; - atomic_inc(&mm->mm_users); + atomic_inc(&mm->mm_count); task_unlock(p); if (!down_read_trylock(&mm->mmap_sem)) { ret = false; - goto unlock_oom; + goto mm_drop; + } + + /* + * increase mm_users only after we know we will reap something so + * that the mmput_async is called only when we have reaped something + * and delayed __mmput doesn't matter that much + */ + if (!mmget_not_zero(mm)) { + up_read(&mm->mmap_sem); + goto mm_drop; } tlb_gather_mmu(&tlb, mm, 0, -1); @@ -521,15 +531,16 @@ static bool __oom_reap_task(struct task_struct *tsk) * to release its memory. */ set_bit(MMF_OOM_REAPED, &mm->flags); -unlock_oom: - mutex_unlock(&oom_lock); /* * Drop our reference but make sure the mmput slow path is called from a * different context because we shouldn't risk we get stuck there and * put the oom_reaper out of the way. */ - if (mm) - mmput_async(mm); + mmput_async(mm); +mm_drop: + mmdrop(mm); +unlock_oom: + mutex_unlock(&oom_lock); return ret; } -- cgit v1.2.3-70-g09d2 From 8a5c743e308dd2b90ad10d1faaa7a1b09173a132 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Tue, 26 Jul 2016 15:24:53 -0700 Subject: mm, memcg: use consistent gfp flags during readahead Vladimir has noticed that we might declare memcg oom even during readahead because read_pages only uses GFP_KERNEL (with mapping_gfp restriction) while __do_page_cache_readahead uses page_cache_alloc_readahead which adds __GFP_NORETRY to prevent from OOMs. This gfp mask discrepancy is really unfortunate and easily fixable. Drop page_cache_alloc_readahead() which only has one user and outsource the gfp_mask logic into readahead_gfp_mask and propagate this mask from __do_page_cache_readahead down to read_pages. This alone would have only very limited impact as most filesystems are implementing ->readpages and the common implementation mpage_readpages does GFP_KERNEL (with mapping_gfp restriction) again. We can tell it to use readahead_gfp_mask instead as this function is called only during readahead as well. The same applies to read_cache_pages. ext4 has its own ext4_mpage_readpages but the path which has pages != NULL can use the same gfp mask. Btrfs, cifs, f2fs and orangefs are doing a very similar pattern to mpage_readpages so the same can be applied to them as well. [akpm@linux-foundation.org: coding-style fixes] [mhocko@suse.com: restrict gfp mask in mpage_alloc] Link: http://lkml.kernel.org/r/20160610074223.GC32285@dhcp22.suse.cz Link: http://lkml.kernel.org/r/1465301556-26431-1-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko Cc: Vladimir Davydov Cc: Chris Mason Cc: Steve French Cc: Theodore Ts'o Cc: Jan Kara Cc: Mike Marshall Cc: Jaegeuk Kim Cc: Changman Lee Cc: Chao Yu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/extent_io.c | 3 ++- fs/cifs/file.c | 2 +- fs/ext4/readpage.c | 2 +- fs/f2fs/data.c | 3 ++- fs/mpage.c | 4 +++- fs/orangefs/inode.c | 2 +- include/linux/pagemap.h | 6 +++--- mm/readahead.c | 13 ++++++------- 8 files changed, 19 insertions(+), 16 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 75533adef998..e91d55837dd2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4180,7 +4180,8 @@ int extent_readpages(struct extent_io_tree *tree, prefetchw(&page->flags); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, - page->index, GFP_NOFS)) { + page->index, + readahead_gfp_mask(mapping))) { put_page(page); continue; } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d4890b6dc22d..579e41b350a2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3366,7 +3366,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, struct page *page, *tpage; unsigned int expected_index; int rc; - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + gfp_t gfp = readahead_gfp_mask(mapping); INIT_LIST_HEAD(tmplist); diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index dc54a4b60eba..c75b66a64982 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -166,7 +166,7 @@ int ext4_mpage_readpages(struct address_space *mapping, page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) + readahead_gfp_mask(mapping))) goto next_page; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9a8bbc1fb1fa..c80dda4bdff8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -996,7 +996,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping, page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) + page->index, + readahead_gfp_mask(mapping))) goto next_page; } diff --git a/fs/mpage.c b/fs/mpage.c index eedc644b78d7..c8a05901a37b 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -71,6 +71,8 @@ mpage_alloc(struct block_device *bdev, { struct bio *bio; + /* Restrict the given (page cache) mask for slab allocations */ + gfp_flags &= GFP_KERNEL; bio = bio_alloc(gfp_flags, nr_vecs); if (bio == NULL && (current->flags & PF_MEMALLOC)) { @@ -362,7 +364,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + gfp_t gfp = readahead_gfp_mask(mapping); map_bh.b_state = 0; map_bh.b_size = 0; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 85640e955cde..06a8da75651d 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -80,7 +80,7 @@ static int orangefs_readpages(struct file *file, if (!add_to_page_cache(page, mapping, page->index, - GFP_KERNEL)) { + readahead_gfp_mask(mapping))) { ret = read_one_page(page); gossip_debug(GOSSIP_INODE_DEBUG, "failure adding page to cache, read_one_page returned: %d\n", diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 97354102794d..81363b834900 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -209,10 +209,10 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x) return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } -static inline struct page *page_cache_alloc_readahead(struct address_space *x) +static inline gfp_t readahead_gfp_mask(struct address_space *x) { - return __page_cache_alloc(mapping_gfp_mask(x) | - __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); + return mapping_gfp_mask(x) | + __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; } typedef int filler_t(void *, struct page *); diff --git a/mm/readahead.c b/mm/readahead.c index 40be3ae0afe3..65ec288dc057 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,7 +89,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, page = lru_to_page(pages); list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) { + readahead_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -108,7 +108,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, EXPORT_SYMBOL(read_cache_pages); static int read_pages(struct address_space *mapping, struct file *filp, - struct list_head *pages, unsigned nr_pages) + struct list_head *pages, unsigned int nr_pages, gfp_t gfp) { struct blk_plug plug; unsigned page_idx; @@ -126,10 +126,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = lru_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, page->index, - mapping_gfp_constraint(mapping, GFP_KERNEL))) { + if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) mapping->a_ops->readpage(filp, page); - } put_page(page); } ret = 0; @@ -159,6 +157,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, int page_idx; int ret = 0; loff_t isize = i_size_read(inode); + gfp_t gfp_mask = readahead_gfp_mask(mapping); if (isize == 0) goto out; @@ -180,7 +179,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, if (page && !radix_tree_exceptional_entry(page)) continue; - page = page_cache_alloc_readahead(mapping); + page = __page_cache_alloc(gfp_mask); if (!page) break; page->index = page_offset; @@ -196,7 +195,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, * will then handle the error. */ if (ret) - read_pages(mapping, filp, &page_pool, ret); + read_pages(mapping, filp, &page_pool, ret, gfp_mask); BUG_ON(!list_empty(&page_pool)); out: return ret; -- cgit v1.2.3-70-g09d2 From ef3cc4db415e0cee73ea37ac6d79821d77f15f1d Mon Sep 17 00:00:00 2001 From: nimisolo Date: Tue, 26 Jul 2016 15:24:56 -0700 Subject: mm/memblock.c:memblock_add_range(): if nr_new is 0 just return If nr_new is 0 which means there's no region would be added, so just return to the caller. Signed-off-by: nimisolo Cc: Alexander Kuleshov Cc: Pekka Enberg Cc: Tony Luck Cc: Mel Gorman Cc: Tang Chen Cc: Wei Yang Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memblock.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/memblock.c b/mm/memblock.c index ac1248933b31..ca099159b45a 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -584,6 +584,9 @@ repeat: nid, flags); } + if (!nr_new) + return 0; + /* * If this was the first round, resize array and repeat for actual * insertions; otherwise, merge and return. -- cgit v1.2.3-70-g09d2 From 70652f6ec0566ae6b4147d88c6d043c68484227f Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:24:59 -0700 Subject: mm: make optimistic check for swapin readahead Introduce a new sysfs integer knob /sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_swap which makes optimistic check for swapin readahead to increase thp collapse rate. Before getting swapped out pages to memory, checks them and allows up to a certain number. It also prints out using tracepoints amount of unmapped ptes. [vdavydov@parallels.com: fix scan not aborted on SCAN_EXCEED_SWAP_PTE] [sfr@canb.auug.org.au: build fix] Link: http://lkml.kernel.org/r/20160616154503.65806e12@canb.auug.org.au Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Xie XiuQi Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 14 +++++++----- mm/huge_memory.c | 45 +++++++++++++++++++++++++++++++++++--- 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 551ba4acde4d..fad6539c9d68 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -28,7 +28,8 @@ EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ - EMe( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") + EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ + EMe( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") #undef EM #undef EMe @@ -45,9 +46,9 @@ SCAN_STATUS TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, - bool referenced, int none_or_zero, int status), + bool referenced, int none_or_zero, int status, int unmapped), - TP_ARGS(mm, page, writable, referenced, none_or_zero, status), + TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), TP_STRUCT__entry( __field(struct mm_struct *, mm) @@ -56,6 +57,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __field(bool, referenced) __field(int, none_or_zero) __field(int, status) + __field(int, unmapped) ), TP_fast_assign( @@ -65,15 +67,17 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __entry->referenced = referenced; __entry->none_or_zero = none_or_zero; __entry->status = status; + __entry->unmapped = unmapped; ), - TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s", + TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d", __entry->mm, __entry->pfn, __entry->writable, __entry->referenced, __entry->none_or_zero, - __print_symbolic(__entry->status, SCAN_STATUS)) + __print_symbolic(__entry->status, SCAN_STATUS), + __entry->unmapped) ); TRACE_EVENT(mm_collapse_huge_page, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 23d1bf42fef1..ed474483a620 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -57,7 +57,8 @@ enum scan_result { SCAN_SWAP_CACHE_PAGE, SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, - SCAN_CGROUP_CHARGE_FAIL + SCAN_CGROUP_CHARGE_FAIL, + SCAN_EXCEED_SWAP_PTE }; #define CREATE_TRACE_POINTS @@ -100,6 +101,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); * fault. */ static unsigned int khugepaged_max_ptes_none __read_mostly; +static unsigned int khugepaged_max_ptes_swap __read_mostly; static int khugepaged(void *none); static int khugepaged_slab_init(void); @@ -598,6 +600,33 @@ static struct kobj_attribute khugepaged_max_ptes_none_attr = __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, khugepaged_max_ptes_none_store); +static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); +} + +static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_swap; + + err = kstrtoul(buf, 10, &max_ptes_swap); + if (err || max_ptes_swap > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_swap = max_ptes_swap; + + return count; +} + +static struct kobj_attribute khugepaged_max_ptes_swap_attr = + __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, + khugepaged_max_ptes_swap_store); + static struct attribute *khugepaged_attr[] = { &khugepaged_defrag_attr.attr, &khugepaged_max_ptes_none_attr.attr, @@ -606,6 +635,7 @@ static struct attribute *khugepaged_attr[] = { &full_scans_attr.attr, &scan_sleep_millisecs_attr.attr, &alloc_sleep_millisecs_attr.attr, + &khugepaged_max_ptes_swap_attr.attr, NULL, }; @@ -674,6 +704,7 @@ static int __init hugepage_init(void) khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; + khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; /* * hugepages can't be allocated by the buddy allocator */ @@ -2507,7 +2538,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, struct page *page = NULL; unsigned long _address; spinlock_t *ptl; - int node = NUMA_NO_NODE; + int node = NUMA_NO_NODE, unmapped = 0; bool writable = false, referenced = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -2523,6 +2554,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; + if (is_swap_pte(pteval)) { + if (++unmapped <= khugepaged_max_ptes_swap) { + continue; + } else { + result = SCAN_EXCEED_SWAP_PTE; + goto out_unmap; + } + } if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) { @@ -2609,7 +2648,7 @@ out_unmap: } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, - none_or_zero, result); + none_or_zero, result, unmapped); return ret; } -- cgit v1.2.3-70-g09d2 From 8a966ed746d63c8103d496da85973eeeec01d77f Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:25:03 -0700 Subject: mm: make swapin readahead to improve thp collapse rate This patch makes swapin readahead to improve thp collapse rate. When khugepaged scanned pages, there can be a few of the pages in swap area. With the patch THP can collapse 4kB pages into a THP when there are up to max_ptes_swap swap ptes in a 2MB range. The patch was tested with a test program that allocates 400B of memory, writes to it, and then sleeps. I force the system to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. Without the patch, system did not swap in readahead. THP rate was %65 of the program of the memory, it did not change over time. With this patch, after 10 minutes of waiting khugepaged had collapsed %99 of the program's memory. [kirill.shutemov@linux.intel.com: trivial cleanup of exit path of the function] [kirill.shutemov@linux.intel.com: __collapse_huge_page_swapin(): drop unused 'pte' parameter] [kirill.shutemov@linux.intel.com: do not hold anon_vma lock during swap in] Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Xie XiuQi Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 24 +++++++++++++++++++++ mm/huge_memory.c | 43 +++++++++++++++++++++++++++++++++++--- mm/internal.h | 4 ++++ mm/memory.c | 2 +- 4 files changed, 69 insertions(+), 4 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index fad6539c9d68..bda21183eb05 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -135,5 +135,29 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, __print_symbolic(__entry->status, SCAN_STATUS)) ); +TRACE_EVENT(mm_collapse_huge_page_swapin, + + TP_PROTO(struct mm_struct *mm, int swapped_in, int ret), + + TP_ARGS(mm, swapped_in, ret), + + TP_STRUCT__entry( + __field(struct mm_struct *, mm) + __field(int, swapped_in) + __field(int, ret) + ), + + TP_fast_assign( + __entry->mm = mm; + __entry->swapped_in = swapped_in; + __entry->ret = ret; + ), + + TP_printk("mm=%p, swapped_in=%d, ret=%d", + __entry->mm, + __entry->swapped_in, + __entry->ret) +); + #endif /* __HUGE_MEMORY_H */ #include diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ed474483a620..b11351579e7a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2373,6 +2373,44 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return !(vma->vm_flags & VM_NO_THP); } +/* + * Bring missing pages in from swap, to complete THP collapse. + * Only done if khugepaged_scan_pmd believes it is worthwhile. + * + * Called and returns without pte mapped or spinlocks held, + * but with mmap_sem held to protect against vma changes. + */ + +static void __collapse_huge_page_swapin(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ + unsigned long _address; + pte_t *pte, pteval; + int swapped_in = 0, ret = 0; + + pte = pte_offset_map(pmd, address); + for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE; + pte++, _address += PAGE_SIZE) { + pteval = *pte; + if (!is_swap_pte(pteval)) + continue; + swapped_in++; + ret = do_swap_page(mm, vma, _address, pte, pmd, + FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, + pteval); + if (ret & VM_FAULT_ERROR) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + return; + } + /* pte is unmapped now, we need to map it */ + pte = pte_offset_map(pmd, _address); + } + pte--; + pte_unmap(pte); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); +} + static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, @@ -2440,6 +2478,8 @@ static void collapse_huge_page(struct mm_struct *mm, goto out; } + __collapse_huge_page_swapin(mm, vma, address, pmd); + anon_vma_lock_write(vma->anon_vma); pte = pte_offset_map(pmd, address); @@ -2516,9 +2556,6 @@ static void collapse_huge_page(struct mm_struct *mm, result = SCAN_SUCCEED; out_up_write: up_write(&mm->mmap_sem); - trace_mm_collapse_huge_page(mm, isolated, result); - return; - out_nolock: trace_mm_collapse_huge_page(mm, isolated, result); return; diff --git a/mm/internal.h b/mm/internal.h index fbfba0cc2c35..e1531758122b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,6 +36,10 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) +extern int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags, pte_t orig_pte); + void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/memory.c b/mm/memory.c index a329149e1c54..5e6eadd127e7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2522,7 +2522,7 @@ EXPORT_SYMBOL(unmap_mapping_range); * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ -static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, +int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) { -- cgit v1.2.3-70-g09d2 From 72695862529b4190b58c779809bc01ac55ea6605 Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:25:06 -0700 Subject: mm, thp: make swapin readahead under down_read of mmap_sem Currently khugepaged makes swapin readahead under down_write. This patch supplies to make swapin readahead under down_read instead of down_write. The patch was tested with a test program that allocates 800MB of memory, writes to it, and then sleeps. The system was forced to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. [akpm@linux-foundation.org: update comment to match new code] [kirill.shutemov@linux.intel.com: passing 'vma' to hugepage_vma_revlidate() is useless] Link: http://lkml.kernel.org/r/20160530095058.GA53044@black.fi.intel.com Link: http://lkml.kernel.org/r/1466021202-61880-3-git-send-email-kirill.shutemov@linux.intel.com Link: http://lkml.kernel.org/r/1464335964-6510-4-git-send-email-ebru.akagunduz@gmail.com Link: http://lkml.kernel.org/r/1466021202-61880-2-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Ebru Akagunduz Cc: Hugh Dickins Cc: Rik van Riel Cc: "Kirill A. Shutemov" Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Johannes Weiner Cc: Michal Hocko Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 91 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 29 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b11351579e7a..b54559058d30 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2373,6 +2373,34 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return !(vma->vm_flags & VM_NO_THP); } +/* + * If mmap_sem temporarily dropped, revalidate vma + * before taking mmap_sem. + * Return 0 if succeeds, otherwise return none-zero + * value (scan code). + */ + +static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) +{ + struct vm_area_struct *vma; + unsigned long hstart, hend; + + if (unlikely(khugepaged_test_exit(mm))) + return SCAN_ANY_PROCESS; + + vma = find_vma(mm, address); + if (!vma) + return SCAN_VMA_NULL; + + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (address < hstart || address + HPAGE_PMD_SIZE > hend) + return SCAN_ADDRESS_RANGE; + if (!hugepage_vma_check(vma)) + return SCAN_VMA_CHECK; + return 0; +} + /* * Bring missing pages in from swap, to complete THP collapse. * Only done if khugepaged_scan_pmd believes it is worthwhile. @@ -2381,7 +2409,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) * but with mmap_sem held to protect against vma changes. */ -static void __collapse_huge_page_swapin(struct mm_struct *mm, +static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) { @@ -2397,11 +2425,18 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, continue; swapped_in++; ret = do_swap_page(mm, vma, _address, pte, pmd, - FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, + FAULT_FLAG_ALLOW_RETRY, pteval); + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ + if (ret & VM_FAULT_RETRY) { + down_read(&mm->mmap_sem); + /* vma is no longer available, don't continue to swapin */ + if (hugepage_vma_revalidate(mm, address)) + return false; + } if (ret & VM_FAULT_ERROR) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); - return; + return false; } /* pte is unmapped now, we need to map it */ pte = pte_offset_map(pmd, _address); @@ -2409,6 +2444,7 @@ static void __collapse_huge_page_swapin(struct mm_struct *mm, pte--; pte_unmap(pte); trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); + return true; } static void collapse_huge_page(struct mm_struct *mm, @@ -2423,7 +2459,6 @@ static void collapse_huge_page(struct mm_struct *mm, struct page *new_page; spinlock_t *pmd_ptl, *pte_ptl; int isolated = 0, result = 0; - unsigned long hstart, hend; struct mem_cgroup *memcg; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ @@ -2446,39 +2481,37 @@ static void collapse_huge_page(struct mm_struct *mm, goto out_nolock; } - /* - * Prevent all access to pagetables with the exception of - * gup_fast later hanlded by the ptep_clear_flush and the VM - * handled by the anon_vma lock + PG_lock. - */ - down_write(&mm->mmap_sem); - if (unlikely(khugepaged_test_exit(mm))) { - result = SCAN_ANY_PROCESS; + down_read(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, address); + if (result) goto out; - } - vma = find_vma(mm, address); - if (!vma) { - result = SCAN_VMA_NULL; - goto out; - } - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (address < hstart || address + HPAGE_PMD_SIZE > hend) { - result = SCAN_ADDRESS_RANGE; - goto out; - } - if (!hugepage_vma_check(vma)) { - result = SCAN_VMA_CHECK; - goto out; - } pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; goto out; } - __collapse_huge_page_swapin(mm, vma, address, pmd); + /* + * __collapse_huge_page_swapin always returns with mmap_sem locked. + * If it fails, release mmap_sem and jump directly out. + * Continuing to collapse causes inconsistency. + */ + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + up_read(&mm->mmap_sem); + goto out; + } + + up_read(&mm->mmap_sem); + /* + * Prevent all access to pagetables with the exception of + * gup_fast later handled by the ptep_clear_flush and the VM + * handled by the anon_vma lock + PG_lock. + */ + down_write(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, address); + if (result) + goto out; anon_vma_lock_write(vma->anon_vma); -- cgit v1.2.3-70-g09d2 From 8024ee2a09c6135102505bab4105e1410c279832 Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:25:09 -0700 Subject: mm, thp: fix locking inconsistency in collapse_huge_page After creating revalidate vma function, locking inconsistency occured due to directing the code path to wrong label. This patch directs to correct label and fix the inconsistency. Related commit that caused inconsistency: http://git.kernel.org/cgit/linux/kernel/git/next/linux-next.git/commit/?id=da4360877094368f6dfe75bbe804b0f0a5d575b0 Link: http://lkml.kernel.org/r/1464956884-4644-1-git-send-email-ebru.akagunduz@gmail.com Link: http://lkml.kernel.org/r/1466021202-61880-4-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Ebru Akagunduz Cc: Vlastimil Babka Cc: Sergey Senozhatsky Cc: Kirill A. Shutemov Cc: Stephen Rothwell Cc: Rik van Riel Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b54559058d30..1841e0ceac23 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2483,13 +2483,18 @@ static void collapse_huge_page(struct mm_struct *mm, down_read(&mm->mmap_sem); result = hugepage_vma_revalidate(mm, address); - if (result) - goto out; + if (result) { + mem_cgroup_cancel_charge(new_page, memcg, true); + up_read(&mm->mmap_sem); + goto out_nolock; + } pmd = mm_find_pmd(mm, address); if (!pmd) { result = SCAN_PMD_NULL; - goto out; + mem_cgroup_cancel_charge(new_page, memcg, true); + up_read(&mm->mmap_sem); + goto out_nolock; } /* @@ -2498,8 +2503,9 @@ static void collapse_huge_page(struct mm_struct *mm, * Continuing to collapse causes inconsistency. */ if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + mem_cgroup_cancel_charge(new_page, memcg, true); up_read(&mm->mmap_sem); - goto out; + goto out_nolock; } up_read(&mm->mmap_sem); -- cgit v1.2.3-70-g09d2 From 1f52e67e5e7f48424059f8168e3a65bddc80ad97 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:12 -0700 Subject: khugepaged: recheck pmd after mmap_sem re-acquired Vlastimil noted[1] that pmd can be no longer valid after we drop mmap_sem. We need recheck it once mmap_sem taken again. [1] http://lkml.kernel.org/r/12918dcd-a695-c6f4-e06f-69141c5f357f@suse.cz Link: http://lkml.kernel.org/r/1466021202-61880-6-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1841e0ceac23..1a90f55d930f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2433,6 +2433,9 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, /* vma is no longer available, don't continue to swapin */ if (hugepage_vma_revalidate(mm, address)) return false; + /* check if the pmd is still valid */ + if (mm_find_pmd(mm, address) != pmd) + return false; } if (ret & VM_FAULT_ERROR) { trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); @@ -2518,6 +2521,9 @@ static void collapse_huge_page(struct mm_struct *mm, result = hugepage_vma_revalidate(mm, address); if (result) goto out; + /* check if the pmd is still valid */ + if (mm_find_pmd(mm, address) != pmd) + goto out; anon_vma_lock_write(vma->anon_vma); -- cgit v1.2.3-70-g09d2 From 6fb8ddfc455ca82a3ce674f54298cd20f27ca518 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:15 -0700 Subject: thp, mlock: update unevictable-lru.txt Add description of THP handling into unevictable-lru.txt. Link: http://lkml.kernel.org/r/1466021202-61880-7-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/unevictable-lru.txt | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt index fa3b527086fa..0026a8d33fc0 100644 --- a/Documentation/vm/unevictable-lru.txt +++ b/Documentation/vm/unevictable-lru.txt @@ -461,6 +461,27 @@ unevictable LRU is enabled, the work of compaction is mostly handled by the page migration code and the same work flow as described in MIGRATING MLOCKED PAGES will apply. +MLOCKING TRANSPARENT HUGE PAGES +------------------------------- + +A transparent huge page is represented by a single entry on an LRU list. +Therefore, we can only make unevictable an entire compound page, not +individual subpages. + +If a user tries to mlock() part of a huge page, we want the rest of the +page to be reclaimable. + +We cannot just split the page on partial mlock() as split_huge_page() can +fail and new intermittent failure mode for the syscall is undesirable. + +We handle this by keeping PTE-mapped huge pages on normal LRU lists: the +PMD on border of VM_LOCKED VMA will be split into PTE table. + +This way the huge page is accessible for vmscan. Under memory pressure the +page will be split, subpages which belong to VM_LOCKED VMAs will be moved +to unevictable LRU and the rest can be reclaimed. + +See also comment in follow_trans_huge_pmd(). mmap(MAP_LOCKED) SYSTEM CALL HANDLING ------------------------------------- -- cgit v1.2.3-70-g09d2 From dcddffd41d3f1d3bdcc1dce3f1cd142779b6d4c1 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:18 -0700 Subject: mm: do not pass mm_struct into handle_mm_fault We always have vma->vm_mm around. Link: http://lkml.kernel.org/r/1466021202-61880-8-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/mm/fault.c | 2 +- arch/arc/mm/fault.c | 2 +- arch/arm/mm/fault.c | 2 +- arch/arm64/mm/fault.c | 2 +- arch/avr32/mm/fault.c | 2 +- arch/cris/mm/fault.c | 2 +- arch/frv/mm/fault.c | 2 +- arch/hexagon/mm/vm_fault.c | 2 +- arch/ia64/mm/fault.c | 2 +- arch/m32r/mm/fault.c | 2 +- arch/m68k/mm/fault.c | 2 +- arch/metag/mm/fault.c | 2 +- arch/microblaze/mm/fault.c | 2 +- arch/mips/mm/fault.c | 2 +- arch/mn10300/mm/fault.c | 2 +- arch/nios2/mm/fault.c | 2 +- arch/openrisc/mm/fault.c | 2 +- arch/parisc/mm/fault.c | 2 +- arch/powerpc/mm/copro_fault.c | 2 +- arch/powerpc/mm/fault.c | 2 +- arch/s390/mm/fault.c | 2 +- arch/score/mm/fault.c | 2 +- arch/sh/mm/fault.c | 2 +- arch/sparc/mm/fault_32.c | 4 ++-- arch/sparc/mm/fault_64.c | 2 +- arch/tile/mm/fault.c | 2 +- arch/um/kernel/trap.c | 2 +- arch/unicore32/mm/fault.c | 2 +- arch/x86/mm/fault.c | 2 +- arch/xtensa/mm/fault.c | 2 +- drivers/iommu/amd_iommu_v2.c | 3 +-- drivers/iommu/intel-svm.c | 2 +- include/linux/mm.h | 9 ++++----- mm/gup.c | 5 ++--- mm/ksm.c | 5 ++--- mm/memory.c | 13 +++++++------ 36 files changed, 48 insertions(+), 51 deletions(-) diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4a905bd667e2..83e9eee57a55 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -147,7 +147,7 @@ retry: /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index af63f4a13e60..e94e5aa33985 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -137,7 +137,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (unlikely(fatal_signal_pending(current))) { diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index ad5841856007..3a2e678b8d30 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -243,7 +243,7 @@ good_area: goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(vma, addr & PAGE_MASK, flags); check_stack: /* Don't allow expansion below FIRST_USER_ADDRESS */ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b1166d1e5955..031820d989a8 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -233,7 +233,7 @@ good_area: goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index c03533937a9f..a4b7edac8f10 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -134,7 +134,7 @@ good_area: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 3066d40a6db1..112ef26c7f2e 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -168,7 +168,7 @@ retry: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 61d99767fe16..614a46c413d2 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, ear0, flags); + fault = handle_mm_fault(vma, ear0, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 8704c9320032..bd7c251e2bce 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -101,7 +101,7 @@ good_area: break; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 70b40d1205a6..fa6ad95e992e 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -159,7 +159,7 @@ retry: * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 8f9875b7933d..a3785d3644c2 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -196,7 +196,7 @@ good_area: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - fault = handle_mm_fault(mm, vma, addr, flags); + fault = handle_mm_fault(vma, addr, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 6a94cdd0c830..bd66a0b20c6b 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -136,7 +136,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); pr_debug("handle_mm_fault returns %d\n", fault); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index f57edca63609..372783a67dda 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c @@ -133,7 +133,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 177dfc003643..abb678ccde6f 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -216,7 +216,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 4b88fa031891..9560ad731120 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -153,7 +153,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 4a1d181ed32f..f23781d6bbb3 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -254,7 +254,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index b51878b0c6b8..affc4eb3f89e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -131,7 +131,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 230ac20ae794..e94cd225e816 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -163,7 +163,7 @@ good_area: * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 16dbe81c97c9..163af2c31d76 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -239,7 +239,7 @@ good_area: * fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 6527882ce05e..bb0354222b11 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a67c6d781c52..a4db22f65021 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -429,7 +429,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (fault & VM_FAULT_SIGSEGV) goto bad_area; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 19288c1b36d3..6c47488745ae 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -456,7 +456,7 @@ retry: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); /* No reason to continue if interrupted by SIGKILL. */ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { fault = VM_FAULT_SIGNAL; diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 37a6c2e0e969..995b71e4db4b 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c @@ -111,7 +111,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 79d8276377d1..9bf876780cef 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -487,7 +487,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) if (mm_fault_error(regs, error_code, address, fault)) diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index b6c559cbd64d..4714061d6cd3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -241,7 +241,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; @@ -411,7 +411,7 @@ good_area: if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(mm, vma, address, flags)) { + switch (handle_mm_fault(vma, address, flags)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index cb841a33da59..6c43b924a7a2 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -436,7 +436,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto exit_exception; diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 26734214818c..beba986589e5 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -434,7 +434,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return 0; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 98783dd0fa2e..ad8f206ab5e8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -73,7 +73,7 @@ good_area: do { int fault; - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) goto out_nosemaphore; diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c index 2ec3d3adcefc..6c7f70bcaae3 100644 --- a/arch/unicore32/mm/fault.c +++ b/arch/unicore32/mm/fault.c @@ -194,7 +194,7 @@ good_area: * If for any reason at all we couldn't handle the fault, make * sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags); return fault; check_stack: diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index d22161ab941d..dc8023060456 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1353,7 +1353,7 @@ good_area: * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); major |= fault & VM_FAULT_MAJOR; /* diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 7f4a1fdb1502..2725e08ef353 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -110,7 +110,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, flags); + fault = handle_mm_fault(vma, address, flags); if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) return; diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 56999d2fac07..fbdaf81ae925 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(mm, vma, address, flags); - + ret = handle_mm_fault(vma, address, flags); out: up_read(&mm->mmap_sem); diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d9939fa9b588..8ebb3530afa7 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (access_error(vma, req)) goto invalid; - ret = handle_mm_fault(svm->mm, vma, address, + ret = handle_mm_fault(vma, address, req->wr_req ? FAULT_FLAG_WRITE : 0); if (ret & VM_FAULT_ERROR) goto invalid; diff --git a/include/linux/mm.h b/include/linux/mm.h index 6c9a394b2979..646bc36b4d1b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page); int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU -extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags); +extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags); extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); #else -static inline int handle_mm_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - unsigned int flags) +static inline int handle_mm_fault(struct vm_area_struct *vma, + unsigned long address, unsigned int flags) { /* should never happen if there's no MMU */ BUG(); diff --git a/mm/gup.c b/mm/gup.c index dee142e100f4..9671e29f8ffd 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -352,7 +352,6 @@ unmap: static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { - struct mm_struct *mm = vma->vm_mm; unsigned int fault_flags = 0; int ret; @@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, fault_flags |= FAULT_FLAG_TRIED; } - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return -ENOMEM; @@ -692,7 +691,7 @@ retry: if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; - ret = handle_mm_fault(mm, vma, address, fault_flags); + ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) diff --git a/mm/ksm.c b/mm/ksm.c index 35b8aef867a9..73d43bafd9fb 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr) if (IS_ERR_OR_NULL(page)) break; if (PageKsm(page)) - ret = handle_mm_fault(vma->vm_mm, vma, addr, - FAULT_FLAG_WRITE | - FAULT_FLAG_REMOTE); + ret = handle_mm_fault(vma, addr, + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); else ret = VM_FAULT_WRITE; put_page(page); diff --git a/mm/memory.c b/mm/memory.c index 5e6eadd127e7..6bf2b8564376 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3420,9 +3420,10 @@ unlock: * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { + struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; pmd_t *pmd; @@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, unsigned int flags) +int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, + unsigned int flags) { int ret; __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); - mem_cgroup_count_vm_event(mm, PGFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT); /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); @@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & FAULT_FLAG_USER) mem_cgroup_oom_enable(); - ret = __handle_mm_fault(mm, vma, address, flags); + ret = __handle_mm_fault(vma, address, flags); if (flags & FAULT_FLAG_USER) { mem_cgroup_oom_disable(); -- cgit v1.2.3-70-g09d2 From bae473a423f65e480db83c85b5e92254f6dfcb28 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:20 -0700 Subject: mm: introduce fault_env The idea borrowed from Peter's patch from patchset on speculative page faults[1]: Instead of passing around the endless list of function arguments, replace the lot with a single structure so we can change context without endless function signature changes. The changes are mostly mechanical with exception of faultaround code: filemap_map_pages() got reworked a bit. This patch is preparation for the next one. [1] http://lkml.kernel.org/r/20141020222841.302891540@infradead.org Link: http://lkml.kernel.org/r/1466021202-61880-9-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Acked-by: Peter Zijlstra (Intel) Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 10 +- fs/userfaultfd.c | 22 +- include/linux/huge_mm.h | 20 +- include/linux/mm.h | 34 ++- include/linux/userfaultfd_k.h | 8 +- mm/filemap.c | 28 +- mm/huge_memory.c | 280 +++++++++--------- mm/internal.h | 4 +- mm/memory.c | 582 ++++++++++++++++++-------------------- mm/nommu.c | 3 +- 10 files changed, 475 insertions(+), 516 deletions(-) diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index dda6e3f8e203..5a7386e38e2d 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -548,13 +548,13 @@ subsequent truncate), and then return with VM_FAULT_LOCKED, and the page locked. The VM will unlock the page. ->map_pages() is called when VM asks to map easy accessible pages. -Filesystem should find and map pages associated with offsets from "pgoff" -till "max_pgoff". ->map_pages() is called with page table locked and must +Filesystem should find and map pages associated with offsets from "start_pgoff" +till "end_pgoff". ->map_pages() is called with page table locked and must not block. If it's not possible to reach a page without blocking, filesystem should skip it. Filesystem should use do_set_pte() to setup -page table entry. Pointer to entry associated with offset "pgoff" is -passed in "pte" field in vm_fault structure. Pointers to entries for other -offsets should be calculated relative to "pte". +page table entry. Pointer to entry associated with the page is passed in +"pte" field in fault_env structure. Pointers to entries for other offsets +should be calculated relative to "pte". ->page_mkwrite() is called when a previously read-only pte is about to become writeable. The filesystem again must ensure that there are diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 2d97952e341a..85959d8324df 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -257,10 +257,9 @@ out: * fatal_signal_pending()s, and the mmap_sem must be released before * returning it. */ -int handle_userfault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long reason) +int handle_userfault(struct fault_env *fe, unsigned long reason) { - struct mm_struct *mm = vma->vm_mm; + struct mm_struct *mm = fe->vma->vm_mm; struct userfaultfd_ctx *ctx; struct userfaultfd_wait_queue uwq; int ret; @@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); ret = VM_FAULT_SIGBUS; - ctx = vma->vm_userfaultfd_ctx.ctx; + ctx = fe->vma->vm_userfaultfd_ctx.ctx; if (!ctx) goto out; @@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, * without first stopping userland access to the memory. For * VM_UFFD_MISSING userfaults this is enough for now. */ - if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) { + if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) { /* * Validate the invariant that nowait must allow retry * to be sure not to return SIGBUS erroneously on * nowait invocations. */ - BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT); + BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT); #ifdef CONFIG_DEBUG_VM if (printk_ratelimit()) { printk(KERN_WARNING - "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags); + "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags); dump_stack(); } #endif @@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, * and wait. */ ret = VM_FAULT_RETRY; - if (flags & FAULT_FLAG_RETRY_NOWAIT) + if (fe->flags & FAULT_FLAG_RETRY_NOWAIT) goto out; /* take the reference before dropping the mmap_sem */ @@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); uwq.wq.private = current; - uwq.msg = userfault_msg(address, flags, reason); + uwq.msg = userfault_msg(fe->address, fe->flags, reason); uwq.ctx = ctx; - return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == + return_to_userland = + (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); spin_lock(&ctx->fault_pending_wqh.lock); @@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address, TASK_KILLABLE); spin_unlock(&ctx->fault_pending_wqh.lock); - must_wait = userfaultfd_must_wait(ctx, address, flags, reason); + must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); up_read(&mm->mmap_sem); if (likely(must_wait && !ACCESS_ONCE(ctx->released) && diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index f0a7a0320300..9bed9249156f 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -1,20 +1,12 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - unsigned int flags); +extern int do_huge_pmd_anonymous_page(struct fault_env *fe); extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma); -extern void huge_pmd_set_accessed(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd, int dirty); -extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pmd_t orig_pmd); +extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd); +extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, @@ -134,8 +126,7 @@ static inline int hpage_nr_pages(struct page *page) return 1; } -extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp); +extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd); extern struct page *huge_zero_page; @@ -196,8 +187,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, return NULL; } -static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp) +static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd) { return 0; } diff --git a/include/linux/mm.h b/include/linux/mm.h index 646bc36b4d1b..8bd74558c0e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -309,10 +309,27 @@ struct vm_fault { * VM_FAULT_DAX_LOCKED and fill in * entry here. */ - /* for ->map_pages() only */ - pgoff_t max_pgoff; /* map pages for offset from pgoff till - * max_pgoff inclusive */ - pte_t *pte; /* pte entry associated with ->pgoff */ +}; + +/* + * Page fault context: passes though page fault handler instead of endless list + * of function arguments. + */ +struct fault_env { + struct vm_area_struct *vma; /* Target VMA */ + unsigned long address; /* Faulting virtual address */ + unsigned int flags; /* FAULT_FLAG_xxx flags */ + pmd_t *pmd; /* Pointer to pmd entry matching + * the 'address' + */ + pte_t *pte; /* Pointer to pte entry matching + * the 'address'. NULL if the page + * table hasn't been allocated. + */ + spinlock_t *ptl; /* Page table lock. + * Protects pte page table if 'pte' + * is not NULL, otherwise pmd. + */ }; /* @@ -327,7 +344,8 @@ struct vm_operations_struct { int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); int (*pmd_fault)(struct vm_area_struct *, unsigned long address, pmd_t *, unsigned int flags); - void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf); + void (*map_pages)(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); /* notification that a previously read-only page is about to become * writable, if an error is returned it will cause a SIGBUS */ @@ -600,8 +618,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon); +void do_set_pte(struct fault_env *fe, struct page *page); #endif /* @@ -2062,7 +2079,8 @@ extern void truncate_inode_pages_final(struct address_space *); /* generic vm_area_ops exported for stackable file systems */ extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); -extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf); +extern void filemap_map_pages(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff); extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); /* mm/page-writeback.c */ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 587480ad41b7..dd66a952e8cd 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -27,8 +27,7 @@ #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) -extern int handle_userfault(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long reason); +extern int handle_userfault(struct fault_env *fe, unsigned long reason); extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len); @@ -56,10 +55,7 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) #else /* CONFIG_USERFAULTFD */ /* mm helpers */ -static inline int handle_userfault(struct vm_area_struct *vma, - unsigned long address, - unsigned int flags, - unsigned long reason) +static inline int handle_userfault(struct fault_env *fe, unsigned long reason) { return VM_FAULT_SIGBUS; } diff --git a/mm/filemap.c b/mm/filemap.c index 20f3b1f33f0e..54d5318f8d3f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2128,22 +2128,27 @@ page_not_uptodate: } EXPORT_SYMBOL(filemap_fault); -void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) +void filemap_map_pages(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff) { struct radix_tree_iter iter; void **slot; - struct file *file = vma->vm_file; + struct file *file = fe->vma->vm_file; struct address_space *mapping = file->f_mapping; + pgoff_t last_pgoff = start_pgoff; loff_t size; struct page *page; - unsigned long address = (unsigned long) vmf->virtual_address; - unsigned long addr; - pte_t *pte; rcu_read_lock(); - radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { - if (iter.index > vmf->max_pgoff) + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, + start_pgoff) { + if (iter.index > end_pgoff) break; + fe->pte += iter.index - last_pgoff; + fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; + last_pgoff = iter.index; + if (!pte_none(*fe->pte)) + goto next; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -2179,14 +2184,9 @@ repeat: if (page->index >= size >> PAGE_SHIFT) goto unlock; - pte = vmf->pte + page->index - vmf->pgoff; - if (!pte_none(*pte)) - goto unlock; - if (file->f_ra.mmap_miss > 0) file->f_ra.mmap_miss--; - addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; - do_set_pte(vma, addr, page, pte, false, false); + do_set_pte(fe, page); unlock_page(page); goto next; unlock: @@ -2194,7 +2194,7 @@ unlock: skip: put_page(page); next: - if (iter.index == vmf->max_pgoff) + if (iter.index == end_pgoff) break; } rcu_read_unlock(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1a90f55d930f..bc5abcbe376e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -821,26 +821,23 @@ void prep_transhuge_page(struct page *page) set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); } -static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - struct page *page, gfp_t gfp, - unsigned int flags) +static int __do_huge_pmd_anonymous_page(struct fault_env *fe, struct page *page, + gfp_t gfp) { + struct vm_area_struct *vma = fe->vma; struct mem_cgroup *memcg; pgtable_t pgtable; - spinlock_t *ptl; - unsigned long haddr = address & HPAGE_PMD_MASK; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; VM_BUG_ON_PAGE(!PageCompound(page), page); - if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) { + if (mem_cgroup_try_charge(page, vma->vm_mm, gfp, &memcg, true)) { put_page(page); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } - pgtable = pte_alloc_one(mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm, haddr); if (unlikely(!pgtable)) { mem_cgroup_cancel_charge(page, memcg, true); put_page(page); @@ -855,12 +852,12 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, */ __SetPageUptodate(page); - ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_none(*pmd))) { - spin_unlock(ptl); + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + if (unlikely(!pmd_none(*fe->pmd))) { + spin_unlock(fe->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); - pte_free(mm, pgtable); + pte_free(vma->vm_mm, pgtable); } else { pmd_t entry; @@ -868,12 +865,11 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, if (userfaultfd_missing(vma)) { int ret; - spin_unlock(ptl); + spin_unlock(fe->ptl); mem_cgroup_cancel_charge(page, memcg, true); put_page(page); - pte_free(mm, pgtable); - ret = handle_userfault(vma, address, flags, - VM_UFFD_MISSING); + pte_free(vma->vm_mm, pgtable); + ret = handle_userfault(fe, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); return ret; } @@ -883,11 +879,11 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, page_add_new_anon_rmap(page, vma, haddr, true); mem_cgroup_commit_charge(page, memcg, false, true); lru_cache_add_active_or_unevictable(page, vma); - pgtable_trans_huge_deposit(mm, pmd, pgtable); - set_pmd_at(mm, haddr, pmd, entry); - add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); - atomic_long_inc(&mm->nr_ptes); - spin_unlock(ptl); + pgtable_trans_huge_deposit(vma->vm_mm, fe->pmd, pgtable); + set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); + atomic_long_inc(&vma->vm_mm->nr_ptes); + spin_unlock(fe->ptl); count_vm_event(THP_FAULT_ALLOC); } @@ -937,13 +933,12 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, return true; } -int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - unsigned int flags) +int do_huge_pmd_anonymous_page(struct fault_env *fe) { + struct vm_area_struct *vma = fe->vma; gfp_t gfp; struct page *page; - unsigned long haddr = address & HPAGE_PMD_MASK; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return VM_FAULT_FALLBACK; @@ -951,42 +946,40 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; if (unlikely(khugepaged_enter(vma, vma->vm_flags))) return VM_FAULT_OOM; - if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) && + if (!(fe->flags & FAULT_FLAG_WRITE) && + !mm_forbids_zeropage(vma->vm_mm) && transparent_hugepage_use_zero_page()) { - spinlock_t *ptl; pgtable_t pgtable; struct page *zero_page; bool set; int ret; - pgtable = pte_alloc_one(mm, haddr); + pgtable = pte_alloc_one(vma->vm_mm, haddr); if (unlikely(!pgtable)) return VM_FAULT_OOM; zero_page = get_huge_zero_page(); if (unlikely(!zero_page)) { - pte_free(mm, pgtable); + pte_free(vma->vm_mm, pgtable); count_vm_event(THP_FAULT_FALLBACK); return VM_FAULT_FALLBACK; } - ptl = pmd_lock(mm, pmd); + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); ret = 0; set = false; - if (pmd_none(*pmd)) { + if (pmd_none(*fe->pmd)) { if (userfaultfd_missing(vma)) { - spin_unlock(ptl); - ret = handle_userfault(vma, address, flags, - VM_UFFD_MISSING); + spin_unlock(fe->ptl); + ret = handle_userfault(fe, VM_UFFD_MISSING); VM_BUG_ON(ret & VM_FAULT_FALLBACK); } else { - set_huge_zero_page(pgtable, mm, vma, - haddr, pmd, - zero_page); - spin_unlock(ptl); + set_huge_zero_page(pgtable, vma->vm_mm, vma, + haddr, fe->pmd, zero_page); + spin_unlock(fe->ptl); set = true; } } else - spin_unlock(ptl); + spin_unlock(fe->ptl); if (!set) { - pte_free(mm, pgtable); + pte_free(vma->vm_mm, pgtable); put_huge_zero_page(); } return ret; @@ -998,8 +991,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_FALLBACK; } prep_transhuge_page(page); - return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, - flags); + return __do_huge_pmd_anonymous_page(fe, page, gfp); } static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -1172,38 +1164,31 @@ out: return ret; } -void huge_pmd_set_accessed(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmd, pmd_t orig_pmd, - int dirty) +void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd) { - spinlock_t *ptl; pmd_t entry; unsigned long haddr; - ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_same(*pmd, orig_pmd))) + fe->ptl = pmd_lock(fe->vma->vm_mm, fe->pmd); + if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) goto unlock; entry = pmd_mkyoung(orig_pmd); - haddr = address & HPAGE_PMD_MASK; - if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) - update_mmu_cache_pmd(vma, address, pmd); + haddr = fe->address & HPAGE_PMD_MASK; + if (pmdp_set_access_flags(fe->vma, haddr, fe->pmd, entry, + fe->flags & FAULT_FLAG_WRITE)) + update_mmu_cache_pmd(fe->vma, fe->address, fe->pmd); unlock: - spin_unlock(ptl); + spin_unlock(fe->ptl); } -static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, - pmd_t *pmd, pmd_t orig_pmd, - struct page *page, - unsigned long haddr) +static int do_huge_pmd_wp_page_fallback(struct fault_env *fe, pmd_t orig_pmd, + struct page *page) { + struct vm_area_struct *vma = fe->vma; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; struct mem_cgroup *memcg; - spinlock_t *ptl; pgtable_t pgtable; pmd_t _pmd; int ret = 0, i; @@ -1220,11 +1205,11 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, for (i = 0; i < HPAGE_PMD_NR; i++) { pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | - __GFP_OTHER_NODE, - vma, address, page_to_nid(page)); + __GFP_OTHER_NODE, vma, + fe->address, page_to_nid(page)); if (unlikely(!pages[i] || - mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, - &memcg, false))) { + mem_cgroup_try_charge(pages[i], vma->vm_mm, + GFP_KERNEL, &memcg, false))) { if (pages[i]) put_page(pages[i]); while (--i >= 0) { @@ -1250,41 +1235,41 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, mmun_start = haddr; mmun_end = haddr + HPAGE_PMD_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); - ptl = pmd_lock(mm, pmd); - if (unlikely(!pmd_same(*pmd, orig_pmd))) + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) goto out_free_pages; VM_BUG_ON_PAGE(!PageHead(page), page); - pmdp_huge_clear_flush_notify(vma, haddr, pmd); + pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); /* leave pmd empty until pte is filled */ - pgtable = pgtable_trans_huge_withdraw(mm, pmd); - pmd_populate(mm, &_pmd, pgtable); + pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, fe->pmd); + pmd_populate(vma->vm_mm, &_pmd, pgtable); for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { - pte_t *pte, entry; + pte_t entry; entry = mk_pte(pages[i], vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); memcg = (void *)page_private(pages[i]); set_page_private(pages[i], 0); - page_add_new_anon_rmap(pages[i], vma, haddr, false); + page_add_new_anon_rmap(pages[i], fe->vma, haddr, false); mem_cgroup_commit_charge(pages[i], memcg, false, false); lru_cache_add_active_or_unevictable(pages[i], vma); - pte = pte_offset_map(&_pmd, haddr); - VM_BUG_ON(!pte_none(*pte)); - set_pte_at(mm, haddr, pte, entry); - pte_unmap(pte); + fe->pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte_none(*fe->pte)); + set_pte_at(vma->vm_mm, haddr, fe->pte, entry); + pte_unmap(fe->pte); } kfree(pages); smp_wmb(); /* make pte visible before pmd */ - pmd_populate(mm, pmd, pgtable); + pmd_populate(vma->vm_mm, fe->pmd, pgtable); page_remove_rmap(page, true); - spin_unlock(ptl); + spin_unlock(fe->ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); ret |= VM_FAULT_WRITE; put_page(page); @@ -1293,8 +1278,8 @@ out: return ret; out_free_pages: - spin_unlock(ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + spin_unlock(fe->ptl); + mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); for (i = 0; i < HPAGE_PMD_NR; i++) { memcg = (void *)page_private(pages[i]); set_page_private(pages[i], 0); @@ -1305,25 +1290,23 @@ out_free_pages: goto out; } -int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, pmd_t orig_pmd) +int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd) { - spinlock_t *ptl; - int ret = 0; + struct vm_area_struct *vma = fe->vma; struct page *page = NULL, *new_page; struct mem_cgroup *memcg; - unsigned long haddr; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ gfp_t huge_gfp; /* for allocation and charge */ + int ret = 0; - ptl = pmd_lockptr(mm, pmd); + fe->ptl = pmd_lockptr(vma->vm_mm, fe->pmd); VM_BUG_ON_VMA(!vma->anon_vma, vma); - haddr = address & HPAGE_PMD_MASK; if (is_huge_zero_pmd(orig_pmd)) goto alloc; - spin_lock(ptl); - if (unlikely(!pmd_same(*pmd, orig_pmd))) + spin_lock(fe->ptl); + if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) goto out_unlock; page = pmd_page(orig_pmd); @@ -1336,13 +1319,13 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t entry; entry = pmd_mkyoung(orig_pmd); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) - update_mmu_cache_pmd(vma, address, pmd); + if (pmdp_set_access_flags(vma, haddr, fe->pmd, entry, 1)) + update_mmu_cache_pmd(vma, fe->address, fe->pmd); ret |= VM_FAULT_WRITE; goto out_unlock; } get_page(page); - spin_unlock(ptl); + spin_unlock(fe->ptl); alloc: if (transparent_hugepage_enabled(vma) && !transparent_hugepage_debug_cow()) { @@ -1355,13 +1338,12 @@ alloc: prep_transhuge_page(new_page); } else { if (!page) { - split_huge_pmd(vma, pmd, address); + split_huge_pmd(vma, fe->pmd, fe->address); ret |= VM_FAULT_FALLBACK; } else { - ret = do_huge_pmd_wp_page_fallback(mm, vma, address, - pmd, orig_pmd, page, haddr); + ret = do_huge_pmd_wp_page_fallback(fe, orig_pmd, page); if (ret & VM_FAULT_OOM) { - split_huge_pmd(vma, pmd, address); + split_huge_pmd(vma, fe->pmd, fe->address); ret |= VM_FAULT_FALLBACK; } put_page(page); @@ -1370,14 +1352,12 @@ alloc: goto out; } - if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg, - true))) { + if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, + huge_gfp, &memcg, true))) { put_page(new_page); - if (page) { - split_huge_pmd(vma, pmd, address); + split_huge_pmd(vma, fe->pmd, fe->address); + if (page) put_page(page); - } else - split_huge_pmd(vma, pmd, address); ret |= VM_FAULT_FALLBACK; count_vm_event(THP_FAULT_FALLBACK); goto out; @@ -1393,13 +1373,13 @@ alloc: mmun_start = haddr; mmun_end = haddr + HPAGE_PMD_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); - spin_lock(ptl); + spin_lock(fe->ptl); if (page) put_page(page); - if (unlikely(!pmd_same(*pmd, orig_pmd))) { - spin_unlock(ptl); + if (unlikely(!pmd_same(*fe->pmd, orig_pmd))) { + spin_unlock(fe->ptl); mem_cgroup_cancel_charge(new_page, memcg, true); put_page(new_page); goto out_mn; @@ -1407,14 +1387,14 @@ alloc: pmd_t entry; entry = mk_huge_pmd(new_page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); - pmdp_huge_clear_flush_notify(vma, haddr, pmd); + pmdp_huge_clear_flush_notify(vma, haddr, fe->pmd); page_add_new_anon_rmap(new_page, vma, haddr, true); mem_cgroup_commit_charge(new_page, memcg, false, true); lru_cache_add_active_or_unevictable(new_page, vma); - set_pmd_at(mm, haddr, pmd, entry); - update_mmu_cache_pmd(vma, address, pmd); + set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); + update_mmu_cache_pmd(vma, fe->address, fe->pmd); if (!page) { - add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); put_huge_zero_page(); } else { VM_BUG_ON_PAGE(!PageHead(page), page); @@ -1423,13 +1403,13 @@ alloc: } ret |= VM_FAULT_WRITE; } - spin_unlock(ptl); + spin_unlock(fe->ptl); out_mn: - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); out: return ret; out_unlock: - spin_unlock(ptl); + spin_unlock(fe->ptl); return ret; } @@ -1489,13 +1469,12 @@ out: } /* NUMA hinting page fault entry point for trans huge pmds */ -int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pmd_t pmd, pmd_t *pmdp) +int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) { - spinlock_t *ptl; + struct vm_area_struct *vma = fe->vma; struct anon_vma *anon_vma = NULL; struct page *page; - unsigned long haddr = addr & HPAGE_PMD_MASK; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; int page_nid = -1, this_nid = numa_node_id(); int target_nid, last_cpupid = -1; bool page_locked; @@ -1506,8 +1485,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* A PROT_NONE fault should not end up here */ BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); - ptl = pmd_lock(mm, pmdp); - if (unlikely(!pmd_same(pmd, *pmdp))) + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + if (unlikely(!pmd_same(pmd, *fe->pmd))) goto out_unlock; /* @@ -1515,9 +1494,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * without disrupting NUMA hinting information. Do not relock and * check_same as the page may no longer be mapped. */ - if (unlikely(pmd_trans_migrating(*pmdp))) { - page = pmd_page(*pmdp); - spin_unlock(ptl); + if (unlikely(pmd_trans_migrating(*fe->pmd))) { + page = pmd_page(*fe->pmd); + spin_unlock(fe->ptl); wait_on_page_locked(page); goto out; } @@ -1550,7 +1529,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { - spin_unlock(ptl); + spin_unlock(fe->ptl); wait_on_page_locked(page); page_nid = -1; goto out; @@ -1561,12 +1540,12 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * to serialises splits */ get_page(page); - spin_unlock(ptl); + spin_unlock(fe->ptl); anon_vma = page_lock_anon_vma_read(page); /* Confirm the PMD did not change while page_table_lock was released */ - spin_lock(ptl); - if (unlikely(!pmd_same(pmd, *pmdp))) { + spin_lock(fe->ptl); + if (unlikely(!pmd_same(pmd, *fe->pmd))) { unlock_page(page); put_page(page); page_nid = -1; @@ -1584,9 +1563,9 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * Migrate the THP to the requested node, returns with page unlocked * and access rights restored. */ - spin_unlock(ptl); - migrated = migrate_misplaced_transhuge_page(mm, vma, - pmdp, pmd, addr, page, target_nid); + spin_unlock(fe->ptl); + migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, + fe->pmd, pmd, fe->address, page, target_nid); if (migrated) { flags |= TNF_MIGRATED; page_nid = target_nid; @@ -1601,18 +1580,18 @@ clear_pmdnuma: pmd = pmd_mkyoung(pmd); if (was_writable) pmd = pmd_mkwrite(pmd); - set_pmd_at(mm, haddr, pmdp, pmd); - update_mmu_cache_pmd(vma, addr, pmdp); + set_pmd_at(vma->vm_mm, haddr, fe->pmd, pmd); + update_mmu_cache_pmd(vma, fe->address, fe->pmd); unlock_page(page); out_unlock: - spin_unlock(ptl); + spin_unlock(fe->ptl); out: if (anon_vma) page_unlock_anon_vma_read(anon_vma); if (page_nid != -1) - task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); + task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, fe->flags); return 0; } @@ -2413,20 +2392,23 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) { - unsigned long _address; - pte_t *pte, pteval; + pte_t pteval; int swapped_in = 0, ret = 0; - - pte = pte_offset_map(pmd, address); - for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE; - pte++, _address += PAGE_SIZE) { - pteval = *pte; + struct fault_env fe = { + .vma = vma, + .address = address, + .flags = FAULT_FLAG_ALLOW_RETRY, + .pmd = pmd, + }; + + fe.pte = pte_offset_map(pmd, address); + for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; + fe.pte++, fe.address += PAGE_SIZE) { + pteval = *fe.pte; if (!is_swap_pte(pteval)) continue; swapped_in++; - ret = do_swap_page(mm, vma, _address, pte, pmd, - FAULT_FLAG_ALLOW_RETRY, - pteval); + ret = do_swap_page(&fe, pteval); /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); @@ -2442,10 +2424,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, return false; } /* pte is unmapped now, we need to map it */ - pte = pte_offset_map(pmd, _address); + fe.pte = pte_offset_map(pmd, fe.address); } - pte--; - pte_unmap(pte); + fe.pte--; + pte_unmap(fe.pte); trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); return true; } diff --git a/mm/internal.h b/mm/internal.h index e1531758122b..9b6a6c43ac39 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -36,9 +36,7 @@ /* Do not use these with a slab allocator */ #define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) -extern int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - unsigned int flags, pte_t orig_pte); +int do_swap_page(struct fault_env *fe, pte_t orig_pte); void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); diff --git a/mm/memory.c b/mm/memory.c index 6bf2b8564376..72b520897339 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2070,13 +2070,11 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, - struct page *page, int page_mkwrite, - int dirty_shared) - __releases(ptl) +static inline int wp_page_reuse(struct fault_env *fe, pte_t orig_pte, + struct page *page, int page_mkwrite, int dirty_shared) + __releases(fe->ptl) { + struct vm_area_struct *vma = fe->vma; pte_t entry; /* * Clear the pages cpupid information as the existing @@ -2086,12 +2084,12 @@ static inline int wp_page_reuse(struct mm_struct *mm, if (page) page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); - flush_cache_page(vma, address, pte_pfn(orig_pte)); + flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); entry = pte_mkyoung(orig_pte); entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (ptep_set_access_flags(vma, address, page_table, entry, 1)) - update_mmu_cache(vma, address, page_table); - pte_unmap_unlock(page_table, ptl); + if (ptep_set_access_flags(vma, fe->address, fe->pte, entry, 1)) + update_mmu_cache(vma, fe->address, fe->pte); + pte_unmap_unlock(fe->pte, fe->ptl); if (dirty_shared) { struct address_space *mapping; @@ -2137,30 +2135,31 @@ static inline int wp_page_reuse(struct mm_struct *mm, * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ -static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - pte_t orig_pte, struct page *old_page) +static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, + struct page *old_page) { + struct vm_area_struct *vma = fe->vma; + struct mm_struct *mm = vma->vm_mm; struct page *new_page = NULL; - spinlock_t *ptl = NULL; pte_t entry; int page_copied = 0; - const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */ - const unsigned long mmun_end = mmun_start + PAGE_SIZE; /* For mmu_notifiers */ + const unsigned long mmun_start = fe->address & PAGE_MASK; + const unsigned long mmun_end = mmun_start + PAGE_SIZE; struct mem_cgroup *memcg; if (unlikely(anon_vma_prepare(vma))) goto oom; if (is_zero_pfn(pte_pfn(orig_pte))) { - new_page = alloc_zeroed_user_highpage_movable(vma, address); + new_page = alloc_zeroed_user_highpage_movable(vma, fe->address); if (!new_page) goto oom; } else { - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, + fe->address); if (!new_page) goto oom; - cow_user_page(new_page, old_page, address, vma); + cow_user_page(new_page, old_page, fe->address, vma); } if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) @@ -2173,8 +2172,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, /* * Re-check the pte - we dropped the lock */ - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (likely(pte_same(*page_table, orig_pte))) { + fe->pte = pte_offset_map_lock(mm, fe->pmd, fe->address, &fe->ptl); + if (likely(pte_same(*fe->pte, orig_pte))) { if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, @@ -2184,7 +2183,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, } else { inc_mm_counter_fast(mm, MM_ANONPAGES); } - flush_cache_page(vma, address, pte_pfn(orig_pte)); + flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); entry = mk_pte(new_page, vma->vm_page_prot); entry = maybe_mkwrite(pte_mkdirty(entry), vma); /* @@ -2193,8 +2192,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, * seen in the presence of one thread doing SMC and another * thread doing COW. */ - ptep_clear_flush_notify(vma, address, page_table); - page_add_new_anon_rmap(new_page, vma, address, false); + ptep_clear_flush_notify(vma, fe->address, fe->pte); + page_add_new_anon_rmap(new_page, vma, fe->address, false); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); /* @@ -2202,8 +2201,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, * mmu page tables (such as kvm shadow page tables), we want the * new page to be mapped directly into the secondary page table. */ - set_pte_at_notify(mm, address, page_table, entry); - update_mmu_cache(vma, address, page_table); + set_pte_at_notify(mm, fe->address, fe->pte, entry); + update_mmu_cache(vma, fe->address, fe->pte); if (old_page) { /* * Only after switching the pte to the new page may @@ -2240,7 +2239,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, if (new_page) put_page(new_page); - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* @@ -2268,44 +2267,43 @@ oom: * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED * mapping */ -static int wp_pfn_shared(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, - pmd_t *pmd) +static int wp_pfn_shared(struct fault_env *fe, pte_t orig_pte) { + struct vm_area_struct *vma = fe->vma; + if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { struct vm_fault vmf = { .page = NULL, - .pgoff = linear_page_index(vma, address), - .virtual_address = (void __user *)(address & PAGE_MASK), + .pgoff = linear_page_index(vma, fe->address), + .virtual_address = + (void __user *)(fe->address & PAGE_MASK), .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, }; int ret; - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); if (ret & VM_FAULT_ERROR) return ret; - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); /* * We might have raced with another page fault while we * released the pte_offset_map_lock. */ - if (!pte_same(*page_table, orig_pte)) { - pte_unmap_unlock(page_table, ptl); + if (!pte_same(*fe->pte, orig_pte)) { + pte_unmap_unlock(fe->pte, fe->ptl); return 0; } } - return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, - NULL, 0, 0); + return wp_page_reuse(fe, orig_pte, NULL, 0, 0); } -static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, - pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, - struct page *old_page) - __releases(ptl) +static int wp_page_shared(struct fault_env *fe, pte_t orig_pte, + struct page *old_page) + __releases(fe->ptl) { + struct vm_area_struct *vma = fe->vma; int page_mkwrite = 0; get_page(old_page); @@ -2313,8 +2311,8 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; - pte_unmap_unlock(page_table, ptl); - tmp = do_page_mkwrite(vma, old_page, address); + pte_unmap_unlock(fe->pte, fe->ptl); + tmp = do_page_mkwrite(vma, old_page, fe->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(old_page); @@ -2326,19 +2324,18 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, * they did, we just return, as we can count on the * MMU to tell us if they didn't also make it writable. */ - page_table = pte_offset_map_lock(mm, pmd, address, - &ptl); - if (!pte_same(*page_table, orig_pte)) { + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (!pte_same(*fe->pte, orig_pte)) { unlock_page(old_page); - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); put_page(old_page); return 0; } page_mkwrite = 1; } - return wp_page_reuse(mm, vma, address, page_table, ptl, - orig_pte, old_page, page_mkwrite, 1); + return wp_page_reuse(fe, orig_pte, old_page, page_mkwrite, 1); } /* @@ -2359,14 +2356,13 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, * but allow concurrent faults), with pte both mapped and locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - spinlock_t *ptl, pte_t orig_pte) - __releases(ptl) +static int do_wp_page(struct fault_env *fe, pte_t orig_pte) + __releases(fe->ptl) { + struct vm_area_struct *vma = fe->vma; struct page *old_page; - old_page = vm_normal_page(vma, address, orig_pte); + old_page = vm_normal_page(vma, fe->address, orig_pte); if (!old_page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a @@ -2377,12 +2373,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) - return wp_pfn_shared(mm, vma, address, page_table, ptl, - orig_pte, pmd); + return wp_pfn_shared(fe, orig_pte); - pte_unmap_unlock(page_table, ptl); - return wp_page_copy(mm, vma, address, page_table, pmd, - orig_pte, old_page); + pte_unmap_unlock(fe->pte, fe->ptl); + return wp_page_copy(fe, orig_pte, old_page); } /* @@ -2393,13 +2387,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, int total_mapcount; if (!trylock_page(old_page)) { get_page(old_page); - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); lock_page(old_page); - page_table = pte_offset_map_lock(mm, pmd, address, - &ptl); - if (!pte_same(*page_table, orig_pte)) { + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, + fe->address, &fe->ptl); + if (!pte_same(*fe->pte, orig_pte)) { unlock_page(old_page); - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); put_page(old_page); return 0; } @@ -2417,14 +2411,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_move_anon_rmap(old_page, vma); } unlock_page(old_page); - return wp_page_reuse(mm, vma, address, page_table, ptl, - orig_pte, old_page, 0, 0); + return wp_page_reuse(fe, orig_pte, old_page, 0, 0); } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - return wp_page_shared(mm, vma, address, page_table, pmd, - ptl, orig_pte, old_page); + return wp_page_shared(fe, orig_pte, old_page); } /* @@ -2432,9 +2424,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ get_page(old_page); - pte_unmap_unlock(page_table, ptl); - return wp_page_copy(mm, vma, address, page_table, pmd, - orig_pte, old_page); + pte_unmap_unlock(fe->pte, fe->ptl); + return wp_page_copy(fe, orig_pte, old_page); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, @@ -2522,11 +2513,9 @@ EXPORT_SYMBOL(unmap_mapping_range); * We return with the mmap_sem locked or unlocked in the same cases * as does filemap_fault(). */ -int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - unsigned int flags, pte_t orig_pte) +int do_swap_page(struct fault_env *fe, pte_t orig_pte) { - spinlock_t *ptl; + struct vm_area_struct *vma = fe->vma; struct page *page, *swapcache; struct mem_cgroup *memcg; swp_entry_t entry; @@ -2535,17 +2524,17 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, int exclusive = 0; int ret = 0; - if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) + if (!pte_unmap_same(vma->vm_mm, fe->pmd, fe->pte, orig_pte)) goto out; entry = pte_to_swp_entry(orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { - migration_entry_wait(mm, pmd, address); + migration_entry_wait(vma->vm_mm, fe->pmd, fe->address); } else if (is_hwpoison_entry(entry)) { ret = VM_FAULT_HWPOISON; } else { - print_bad_pte(vma, address, orig_pte, NULL); + print_bad_pte(vma, fe->address, orig_pte, NULL); ret = VM_FAULT_SIGBUS; } goto out; @@ -2554,14 +2543,15 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, page = lookup_swap_cache(entry); if (!page) { page = swapin_readahead(entry, - GFP_HIGHUSER_MOVABLE, vma, address); + GFP_HIGHUSER_MOVABLE, vma, fe->address); if (!page) { /* * Back out if somebody else faulted in this pte * while we released the pte lock. */ - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (likely(pte_same(*page_table, orig_pte))) + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, + fe->address, &fe->ptl); + if (likely(pte_same(*fe->pte, orig_pte))) ret = VM_FAULT_OOM; delayacct_clear_flag(DELAYACCT_PF_SWAPIN); goto unlock; @@ -2570,7 +2560,7 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Had to read the page from swap area: Major fault */ ret = VM_FAULT_MAJOR; count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(mm, PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); } else if (PageHWPoison(page)) { /* * hwpoisoned dirty swapcache pages are kept for killing @@ -2583,7 +2573,7 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, } swapcache = page; - locked = lock_page_or_retry(page, mm, flags); + locked = lock_page_or_retry(page, vma->vm_mm, fe->flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); if (!locked) { @@ -2600,14 +2590,15 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) goto out_page; - page = ksm_might_need_to_copy(page, vma, address); + page = ksm_might_need_to_copy(page, vma, fe->address); if (unlikely(!page)) { ret = VM_FAULT_OOM; page = swapcache; goto out_page; } - if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) { + if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, + &memcg, false)) { ret = VM_FAULT_OOM; goto out_page; } @@ -2615,8 +2606,9 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, /* * Back out if somebody else already faulted in this pte. */ - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (unlikely(!pte_same(*page_table, orig_pte))) + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (unlikely(!pte_same(*fe->pte, orig_pte))) goto out_nomap; if (unlikely(!PageUptodate(page))) { @@ -2634,24 +2626,24 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, * must be called after the swap_free(), or it will never succeed. */ - inc_mm_counter_fast(mm, MM_ANONPAGES); - dec_mm_counter_fast(mm, MM_SWAPENTS); + inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); + dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); pte = mk_pte(page, vma->vm_page_prot); - if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { + if ((fe->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); - flags &= ~FAULT_FLAG_WRITE; + fe->flags &= ~FAULT_FLAG_WRITE; ret |= VM_FAULT_WRITE; exclusive = RMAP_EXCLUSIVE; } flush_icache_page(vma, page); if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - set_pte_at(mm, address, page_table, pte); + set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); if (page == swapcache) { - do_page_add_anon_rmap(page, vma, address, exclusive); + do_page_add_anon_rmap(page, vma, fe->address, exclusive); mem_cgroup_commit_charge(page, memcg, true, false); } else { /* ksm created a completely new copy */ - page_add_new_anon_rmap(page, vma, address, false); + page_add_new_anon_rmap(page, vma, fe->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); } @@ -2674,22 +2666,22 @@ int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, put_page(swapcache); } - if (flags & FAULT_FLAG_WRITE) { - ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); + if (fe->flags & FAULT_FLAG_WRITE) { + ret |= do_wp_page(fe, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; goto out; } /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, page_table); + update_mmu_cache(vma, fe->address, fe->pte); unlock: - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); out: return ret; out_nomap: mem_cgroup_cancel_charge(page, memcg, false); - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); out_page: unlock_page(page); out_release: @@ -2740,37 +2732,36 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. */ -static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - unsigned int flags) +static int do_anonymous_page(struct fault_env *fe) { + struct vm_area_struct *vma = fe->vma; struct mem_cgroup *memcg; struct page *page; - spinlock_t *ptl; pte_t entry; - pte_unmap(page_table); + pte_unmap(fe->pte); /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, address) < 0) + if (check_stack_guard_page(vma, fe->address) < 0) return VM_FAULT_SIGSEGV; /* Use the zero-page for reads */ - if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { - entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + if (!(fe->flags & FAULT_FLAG_WRITE) && + !mm_forbids_zeropage(vma->vm_mm)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(fe->address), vma->vm_page_prot)); - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_none(*page_table)) + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (!pte_none(*fe->pte)) goto unlock; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { - pte_unmap_unlock(page_table, ptl); - return handle_userfault(vma, address, flags, - VM_UFFD_MISSING); + pte_unmap_unlock(fe->pte, fe->ptl); + return handle_userfault(fe, VM_UFFD_MISSING); } goto setpte; } @@ -2778,11 +2769,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Allocate our own private page. */ if (unlikely(anon_vma_prepare(vma))) goto oom; - page = alloc_zeroed_user_highpage_movable(vma, address); + page = alloc_zeroed_user_highpage_movable(vma, fe->address); if (!page) goto oom; - if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg, false)) + if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) goto oom_free_page; /* @@ -2796,30 +2787,30 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_flags & VM_WRITE) entry = pte_mkwrite(pte_mkdirty(entry)); - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (!pte_none(*page_table)) + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (!pte_none(*fe->pte)) goto release; /* Deliver the page fault to userland, check inside PT lock */ if (userfaultfd_missing(vma)) { - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); mem_cgroup_cancel_charge(page, memcg, false); put_page(page); - return handle_userfault(vma, address, flags, - VM_UFFD_MISSING); + return handle_userfault(fe, VM_UFFD_MISSING); } - inc_mm_counter_fast(mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, address, false); + inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, fe->address, false); mem_cgroup_commit_charge(page, memcg, false, false); lru_cache_add_active_or_unevictable(page, vma); setpte: - set_pte_at(mm, address, page_table, entry); + set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); /* No need to invalidate - it was non-present before */ - update_mmu_cache(vma, address, page_table); + update_mmu_cache(vma, fe->address, fe->pte); unlock: - pte_unmap_unlock(page_table, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); return 0; release: mem_cgroup_cancel_charge(page, memcg, false); @@ -2836,17 +2827,16 @@ oom: * released depending on flags and vma->vm_ops->fault() return value. * See filemap_fault() and __lock_page_retry(). */ -static int __do_fault(struct vm_area_struct *vma, unsigned long address, - pgoff_t pgoff, unsigned int flags, - struct page *cow_page, struct page **page, - void **entry) +static int __do_fault(struct fault_env *fe, pgoff_t pgoff, + struct page *cow_page, struct page **page, void **entry) { + struct vm_area_struct *vma = fe->vma; struct vm_fault vmf; int ret; - vmf.virtual_address = (void __user *)(address & PAGE_MASK); + vmf.virtual_address = (void __user *)(fe->address & PAGE_MASK); vmf.pgoff = pgoff; - vmf.flags = flags; + vmf.flags = fe->flags; vmf.page = NULL; vmf.gfp_mask = __get_fault_gfp_mask(vma); vmf.cow_page = cow_page; @@ -2878,38 +2868,36 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, /** * do_set_pte - setup new PTE entry for given page and add reverse page mapping. * - * @vma: virtual memory area - * @address: user virtual address + * @fe: fault environment * @page: page to map - * @pte: pointer to target page table entry - * @write: true, if new entry is writable - * @anon: true, if it's anonymous page * - * Caller must hold page table lock relevant for @pte. + * Caller must hold page table lock relevant for @fe->pte. * * Target users are page handler itself and implementations of * vm_ops->map_pages. */ -void do_set_pte(struct vm_area_struct *vma, unsigned long address, - struct page *page, pte_t *pte, bool write, bool anon) +void do_set_pte(struct fault_env *fe, struct page *page) { + struct vm_area_struct *vma = fe->vma; + bool write = fe->flags & FAULT_FLAG_WRITE; pte_t entry; flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (write) entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (anon) { + /* copy-on-write page */ + if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); - page_add_new_anon_rmap(page, vma, address, false); + page_add_new_anon_rmap(page, vma, fe->address, false); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page); } - set_pte_at(vma->vm_mm, address, pte, entry); + set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); /* no need to invalidate: a not-present page won't be cached */ - update_mmu_cache(vma, address, pte); + update_mmu_cache(vma, fe->address, fe->pte); } static unsigned long fault_around_bytes __read_mostly = @@ -2976,57 +2964,53 @@ late_initcall(fault_around_debugfs); * fault_around_pages() value (and therefore to page order). This way it's * easier to guarantee that we don't cross page table boundaries. */ -static void do_fault_around(struct vm_area_struct *vma, unsigned long address, - pte_t *pte, pgoff_t pgoff, unsigned int flags) +static void do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) { - unsigned long start_addr, nr_pages, mask; - pgoff_t max_pgoff; - struct vm_fault vmf; + unsigned long address = fe->address, start_addr, nr_pages, mask; + pte_t *pte = fe->pte; + pgoff_t end_pgoff; int off; nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; - start_addr = max(address & mask, vma->vm_start); - off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - pte -= off; - pgoff -= off; + start_addr = max(fe->address & mask, fe->vma->vm_start); + off = ((fe->address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); + fe->pte -= off; + start_pgoff -= off; /* - * max_pgoff is either end of page table or end of vma - * or fault_around_pages() from pgoff, depending what is nearest. + * end_pgoff is either end of page table or end of vma + * or fault_around_pages() from start_pgoff, depending what is nearest. */ - max_pgoff = pgoff - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + + end_pgoff = start_pgoff - + ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; - max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, - pgoff + nr_pages - 1); + end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1, + start_pgoff + nr_pages - 1); /* Check if it makes any sense to call ->map_pages */ - while (!pte_none(*pte)) { - if (++pgoff > max_pgoff) - return; - start_addr += PAGE_SIZE; - if (start_addr >= vma->vm_end) - return; - pte++; + fe->address = start_addr; + while (!pte_none(*fe->pte)) { + if (++start_pgoff > end_pgoff) + goto out; + fe->address += PAGE_SIZE; + if (fe->address >= fe->vma->vm_end) + goto out; + fe->pte++; } - vmf.virtual_address = (void __user *) start_addr; - vmf.pte = pte; - vmf.pgoff = pgoff; - vmf.max_pgoff = max_pgoff; - vmf.flags = flags; - vmf.gfp_mask = __get_fault_gfp_mask(vma); - vma->vm_ops->map_pages(vma, &vmf); + fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff); +out: + /* restore fault_env */ + fe->pte = pte; + fe->address = address; } -static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pgoff_t pgoff, unsigned int flags, pte_t orig_pte) +static int do_read_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) { + struct vm_area_struct *vma = fe->vma; struct page *fault_page; - spinlock_t *ptl; - pte_t *pte; int ret = 0; /* @@ -3035,66 +3019,68 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, * something). */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - do_fault_around(vma, address, pte, pgoff, flags); - if (!pte_same(*pte, orig_pte)) + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (!pte_same(*fe->pte, orig_pte)) + goto unlock_out; + do_fault_around(fe, pgoff); + /* Check if the fault is handled by faultaround */ + if (!pte_same(*fe->pte, orig_pte)) goto unlock_out; - pte_unmap_unlock(pte, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); } - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); + ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - if (unlikely(!pte_same(*pte, orig_pte))) { - pte_unmap_unlock(pte, ptl); + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); + if (unlikely(!pte_same(*fe->pte, orig_pte))) { + pte_unmap_unlock(fe->pte, fe->ptl); unlock_page(fault_page); put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, false, false); + do_set_pte(fe, fault_page); unlock_page(fault_page); unlock_out: - pte_unmap_unlock(pte, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); return ret; } -static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pgoff_t pgoff, unsigned int flags, pte_t orig_pte) +static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) { + struct vm_area_struct *vma = fe->vma; struct page *fault_page, *new_page; void *fault_entry; struct mem_cgroup *memcg; - spinlock_t *ptl; - pte_t *pte; int ret; if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, fe->address); if (!new_page) return VM_FAULT_OOM; - if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { + if (mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL, + &memcg, false)) { put_page(new_page); return VM_FAULT_OOM; } - ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page, - &fault_entry); + ret = __do_fault(fe, pgoff, new_page, &fault_page, &fault_entry); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) goto uncharge_out; if (!(ret & VM_FAULT_DAX_LOCKED)) - copy_user_highpage(new_page, fault_page, address, vma); + copy_user_highpage(new_page, fault_page, fe->address, vma); __SetPageUptodate(new_page); - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - if (unlikely(!pte_same(*pte, orig_pte))) { - pte_unmap_unlock(pte, ptl); + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (unlikely(!pte_same(*fe->pte, orig_pte))) { + pte_unmap_unlock(fe->pte, fe->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); @@ -3104,10 +3090,10 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, } goto uncharge_out; } - do_set_pte(vma, address, new_page, pte, true, true); + do_set_pte(fe, new_page); mem_cgroup_commit_charge(new_page, memcg, false, false); lru_cache_add_active_or_unevictable(new_page, vma); - pte_unmap_unlock(pte, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); @@ -3121,18 +3107,15 @@ uncharge_out: return ret; } -static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, - pgoff_t pgoff, unsigned int flags, pte_t orig_pte) +static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) { + struct vm_area_struct *vma = fe->vma; struct page *fault_page; struct address_space *mapping; - spinlock_t *ptl; - pte_t *pte; int dirtied = 0; int ret, tmp; - ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL); + ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -3142,7 +3125,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ if (vma->vm_ops->page_mkwrite) { unlock_page(fault_page); - tmp = do_page_mkwrite(vma, fault_page, address); + tmp = do_page_mkwrite(vma, fault_page, fe->address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { put_page(fault_page); @@ -3150,15 +3133,16 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, } } - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - if (unlikely(!pte_same(*pte, orig_pte))) { - pte_unmap_unlock(pte, ptl); + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + if (unlikely(!pte_same(*fe->pte, orig_pte))) { + pte_unmap_unlock(fe->pte, fe->ptl); unlock_page(fault_page); put_page(fault_page); return ret; } - do_set_pte(vma, address, fault_page, pte, true, false); - pte_unmap_unlock(pte, ptl); + do_set_pte(fe, fault_page); + pte_unmap_unlock(fe->pte, fe->ptl); if (set_page_dirty(fault_page)) dirtied = 1; @@ -3190,23 +3174,20 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pte_t *page_table, pmd_t *pmd, - unsigned int flags, pte_t orig_pte) +static int do_fault(struct fault_env *fe, pte_t orig_pte) { - pgoff_t pgoff = linear_page_index(vma, address); + struct vm_area_struct *vma = fe->vma; + pgoff_t pgoff = linear_page_index(vma, fe->address); - pte_unmap(page_table); + pte_unmap(fe->pte); /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) return VM_FAULT_SIGBUS; - if (!(flags & FAULT_FLAG_WRITE)) - return do_read_fault(mm, vma, address, pmd, pgoff, flags, - orig_pte); + if (!(fe->flags & FAULT_FLAG_WRITE)) + return do_read_fault(fe, pgoff, orig_pte); if (!(vma->vm_flags & VM_SHARED)) - return do_cow_fault(mm, vma, address, pmd, pgoff, flags, - orig_pte); - return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); + return do_cow_fault(fe, pgoff, orig_pte); + return do_shared_fault(fe, pgoff, orig_pte); } static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, @@ -3224,11 +3205,10 @@ static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, return mpol_misplaced(page, vma, addr); } -static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) +static int do_numa_page(struct fault_env *fe, pte_t pte) { + struct vm_area_struct *vma = fe->vma; struct page *page = NULL; - spinlock_t *ptl; int page_nid = -1; int last_cpupid; int target_nid; @@ -3248,10 +3228,10 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, * page table entry is not accessible, so there would be no * concurrent hardware modifications to the PTE. */ - ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); - if (unlikely(!pte_same(*ptep, pte))) { - pte_unmap_unlock(ptep, ptl); + fe->ptl = pte_lockptr(vma->vm_mm, fe->pmd); + spin_lock(fe->ptl); + if (unlikely(!pte_same(*fe->pte, pte))) { + pte_unmap_unlock(fe->pte, fe->ptl); goto out; } @@ -3260,18 +3240,18 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, pte = pte_mkyoung(pte); if (was_writable) pte = pte_mkwrite(pte); - set_pte_at(mm, addr, ptep, pte); - update_mmu_cache(vma, addr, ptep); + set_pte_at(vma->vm_mm, fe->address, fe->pte, pte); + update_mmu_cache(vma, fe->address, fe->pte); - page = vm_normal_page(vma, addr, pte); + page = vm_normal_page(vma, fe->address, pte); if (!page) { - pte_unmap_unlock(ptep, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); return 0; } /* TODO: handle PTE-mapped THP */ if (PageCompound(page)) { - pte_unmap_unlock(ptep, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); return 0; } @@ -3295,8 +3275,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, last_cpupid = page_cpupid_last(page); page_nid = page_to_nid(page); - target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); - pte_unmap_unlock(ptep, ptl); + target_nid = numa_migrate_prep(page, vma, fe->address, page_nid, + &flags); + pte_unmap_unlock(fe->pte, fe->ptl); if (target_nid == -1) { put_page(page); goto out; @@ -3316,24 +3297,24 @@ out: return 0; } -static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, unsigned int flags) +static int create_huge_pmd(struct fault_env *fe) { + struct vm_area_struct *vma = fe->vma; if (vma_is_anonymous(vma)) - return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags); + return do_huge_pmd_anonymous_page(fe); if (vma->vm_ops->pmd_fault) - return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + return vma->vm_ops->pmd_fault(vma, fe->address, fe->pmd, + fe->flags); return VM_FAULT_FALLBACK; } -static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd, pmd_t orig_pmd, - unsigned int flags) +static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd) { - if (vma_is_anonymous(vma)) - return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); - if (vma->vm_ops->pmd_fault) - return vma->vm_ops->pmd_fault(vma, address, pmd, flags); + if (vma_is_anonymous(fe->vma)) + return do_huge_pmd_wp_page(fe, orig_pmd); + if (fe->vma->vm_ops->pmd_fault) + return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd, + fe->flags); return VM_FAULT_FALLBACK; } @@ -3353,12 +3334,9 @@ static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma, * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int handle_pte_fault(struct mm_struct *mm, - struct vm_area_struct *vma, unsigned long address, - pte_t *pte, pmd_t *pmd, unsigned int flags) +static int handle_pte_fault(struct fault_env *fe) { pte_t entry; - spinlock_t *ptl; /* * some architectures can have larger ptes than wordsize, @@ -3368,37 +3346,34 @@ static int handle_pte_fault(struct mm_struct *mm, * we later double check anyway with the ptl lock held. So here * a barrier will do. */ - entry = *pte; + entry = *fe->pte; barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { - if (vma_is_anonymous(vma)) - return do_anonymous_page(mm, vma, address, - pte, pmd, flags); + if (vma_is_anonymous(fe->vma)) + return do_anonymous_page(fe); else - return do_fault(mm, vma, address, pte, pmd, - flags, entry); + return do_fault(fe, entry); } - return do_swap_page(mm, vma, address, - pte, pmd, flags, entry); + return do_swap_page(fe, entry); } if (pte_protnone(entry)) - return do_numa_page(mm, vma, address, entry, pte, pmd); + return do_numa_page(fe, entry); - ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); - if (unlikely(!pte_same(*pte, entry))) + fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); + spin_lock(fe->ptl); + if (unlikely(!pte_same(*fe->pte, entry))) goto unlock; - if (flags & FAULT_FLAG_WRITE) { + if (fe->flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) - return do_wp_page(mm, vma, address, - pte, pmd, ptl, entry); + return do_wp_page(fe, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); - if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { - update_mmu_cache(vma, address, pte); + if (ptep_set_access_flags(fe->vma, fe->address, fe->pte, entry, + fe->flags & FAULT_FLAG_WRITE)) { + update_mmu_cache(fe->vma, fe->address, fe->pte); } else { /* * This is needed only for protection faults but the arch code @@ -3406,11 +3381,11 @@ static int handle_pte_fault(struct mm_struct *mm, * This still avoids useless tlb flushes for .text page faults * with threads. */ - if (flags & FAULT_FLAG_WRITE) - flush_tlb_fix_spurious_fault(vma, address); + if (fe->flags & FAULT_FLAG_WRITE) + flush_tlb_fix_spurious_fault(fe->vma, fe->address); } unlock: - pte_unmap_unlock(pte, ptl); + pte_unmap_unlock(fe->pte, fe->ptl); return 0; } @@ -3423,51 +3398,42 @@ unlock: static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) { + struct fault_env fe = { + .vma = vma, + .address = address, + .flags = flags, + }; struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, - flags & FAULT_FLAG_INSTRUCTION, - flags & FAULT_FLAG_REMOTE)) - return VM_FAULT_SIGSEGV; - - if (unlikely(is_vm_hugetlb_page(vma))) - return hugetlb_fault(mm, vma, address, flags); pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) return VM_FAULT_OOM; - pmd = pmd_alloc(mm, pud, address); - if (!pmd) + fe.pmd = pmd_alloc(mm, pud, address); + if (!fe.pmd) return VM_FAULT_OOM; - if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { - int ret = create_huge_pmd(mm, vma, address, pmd, flags); + if (pmd_none(*fe.pmd) && transparent_hugepage_enabled(vma)) { + int ret = create_huge_pmd(&fe); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - pmd_t orig_pmd = *pmd; + pmd_t orig_pmd = *fe.pmd; int ret; barrier(); if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { - unsigned int dirty = flags & FAULT_FLAG_WRITE; - if (pmd_protnone(orig_pmd)) - return do_huge_pmd_numa_page(mm, vma, address, - orig_pmd, pmd); + return do_huge_pmd_numa_page(&fe, orig_pmd); - if (dirty && !pmd_write(orig_pmd)) { - ret = wp_huge_pmd(mm, vma, address, pmd, - orig_pmd, flags); + if ((fe.flags & FAULT_FLAG_WRITE) && + !pmd_write(orig_pmd)) { + ret = wp_huge_pmd(&fe, orig_pmd); if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - huge_pmd_set_accessed(mm, vma, address, pmd, - orig_pmd, dirty); + huge_pmd_set_accessed(&fe, orig_pmd); return 0; } } @@ -3478,7 +3444,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(pte_alloc(mm, pmd, address))) + if (unlikely(pte_alloc(fe.vma->vm_mm, fe.pmd, fe.address))) return VM_FAULT_OOM; /* * If a huge pmd materialized under us just retry later. Use @@ -3491,7 +3457,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, * through an atomic read in C, which is what pmd_trans_unstable() * provides. */ - if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd))) + if (unlikely(pmd_trans_unstable(fe.pmd) || pmd_devmap(*fe.pmd))) return 0; /* * A regular pmd is established and it can't morph into a huge pmd @@ -3499,9 +3465,9 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, * read mode and khugepaged takes it in write mode. So now it's * safe to run pte_offset_map(). */ - pte = pte_offset_map(pmd, address); + fe.pte = pte_offset_map(fe.pmd, fe.address); - return handle_pte_fault(mm, vma, address, pte, pmd, flags); + return handle_pte_fault(&fe); } /* @@ -3530,7 +3496,15 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, if (flags & FAULT_FLAG_USER) mem_cgroup_oom_enable(); - ret = __handle_mm_fault(vma, address, flags); + if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE, + flags & FAULT_FLAG_INSTRUCTION, + flags & FAULT_FLAG_REMOTE)) + return VM_FAULT_SIGSEGV; + + if (unlikely(is_vm_hugetlb_page(vma))) + ret = hugetlb_fault(vma->vm_mm, vma, address, flags); + else + ret = __handle_mm_fault(vma, address, flags); if (flags & FAULT_FLAG_USER) { mem_cgroup_oom_disable(); diff --git a/mm/nommu.c b/mm/nommu.c index c2e58880207f..95daf81a4855 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1809,7 +1809,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } EXPORT_SYMBOL(filemap_fault); -void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) +void filemap_map_pages(struct fault_env *fe, + pgoff_t start_pgoff, pgoff_t end_pgoff) { BUG(); } -- cgit v1.2.3-70-g09d2 From 7267ec008b5cd8b3579e188b1ff238815643e372 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:23 -0700 Subject: mm: postpone page table allocation until we have page to map The idea (and most of code) is borrowed again: from Hugh's patchset on huge tmpfs[1]. Instead of allocation pte page table upfront, we postpone this until we have page to map in hands. This approach opens possibility to map the page as huge if filesystem supports this. Comparing to Hugh's patch I've pushed page table allocation a bit further: into do_set_pte(). This way we can postpone allocation even in faultaround case without moving do_fault_around() after __do_fault(). do_set_pte() got renamed to alloc_set_pte() as it can allocate page table if required. [1] http://lkml.kernel.org/r/alpine.LSU.2.11.1502202015090.14414@eggly.anvils Link: http://lkml.kernel.org/r/1466021202-61880-10-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 10 +- mm/filemap.c | 16 +-- mm/memory.c | 298 ++++++++++++++++++++++++++++++++--------------------- 3 files changed, 197 insertions(+), 127 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 8bd74558c0e4..192c1bbe5fcd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -330,6 +330,13 @@ struct fault_env { * Protects pte page table if 'pte' * is not NULL, otherwise pmd. */ + pgtable_t prealloc_pte; /* Pre-allocated pte page table. + * vm_ops->map_pages() calls + * alloc_set_pte() from atomic context. + * do_fault_around() pre-allocates + * page table to avoid allocation from + * atomic context. + */ }; /* @@ -618,7 +625,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -void do_set_pte(struct fault_env *fe, struct page *page); +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, + struct page *page); #endif /* diff --git a/mm/filemap.c b/mm/filemap.c index 54d5318f8d3f..1efd2994dccf 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2144,11 +2144,6 @@ void filemap_map_pages(struct fault_env *fe, start_pgoff) { if (iter.index > end_pgoff) break; - fe->pte += iter.index - last_pgoff; - fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; - last_pgoff = iter.index; - if (!pte_none(*fe->pte)) - goto next; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -2186,7 +2181,13 @@ repeat: if (file->f_ra.mmap_miss > 0) file->f_ra.mmap_miss--; - do_set_pte(fe, page); + + fe->address += (iter.index - last_pgoff) << PAGE_SHIFT; + if (fe->pte) + fe->pte += iter.index - last_pgoff; + last_pgoff = iter.index; + if (alloc_set_pte(fe, NULL, page)) + goto unlock; unlock_page(page); goto next; unlock: @@ -2194,6 +2195,9 @@ unlock: skip: put_page(page); next: + /* Huge page is mapped? No need to proceed. */ + if (pmd_trans_huge(*fe->pmd)) + break; if (iter.index == end_pgoff) break; } diff --git a/mm/memory.c b/mm/memory.c index 72b520897339..1991105bf67c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2739,8 +2739,6 @@ static int do_anonymous_page(struct fault_env *fe) struct page *page; pte_t entry; - pte_unmap(fe->pte); - /* File mapping without ->vm_ops ? */ if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; @@ -2749,6 +2747,23 @@ static int do_anonymous_page(struct fault_env *fe) if (check_stack_guard_page(vma, fe->address) < 0) return VM_FAULT_SIGSEGV; + /* + * Use pte_alloc() instead of pte_alloc_map(). We can't run + * pte_offset_map() on pmds where a huge pmd might be created + * from a different thread. + * + * pte_alloc_map() is safe to use under down_write(mmap_sem) or when + * parallel threads are excluded by other means. + * + * Here we only have down_read(mmap_sem). + */ + if (pte_alloc(vma->vm_mm, fe->pmd, fe->address)) + return VM_FAULT_OOM; + + /* See the comment in pte_alloc_one_map() */ + if (unlikely(pmd_trans_unstable(fe->pmd))) + return 0; + /* Use the zero-page for reads */ if (!(fe->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { @@ -2865,23 +2880,76 @@ static int __do_fault(struct fault_env *fe, pgoff_t pgoff, return ret; } +static int pte_alloc_one_map(struct fault_env *fe) +{ + struct vm_area_struct *vma = fe->vma; + + if (!pmd_none(*fe->pmd)) + goto map_pte; + if (fe->prealloc_pte) { + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + if (unlikely(!pmd_none(*fe->pmd))) { + spin_unlock(fe->ptl); + goto map_pte; + } + + atomic_long_inc(&vma->vm_mm->nr_ptes); + pmd_populate(vma->vm_mm, fe->pmd, fe->prealloc_pte); + spin_unlock(fe->ptl); + fe->prealloc_pte = 0; + } else if (unlikely(pte_alloc(vma->vm_mm, fe->pmd, fe->address))) { + return VM_FAULT_OOM; + } +map_pte: + /* + * If a huge pmd materialized under us just retry later. Use + * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd + * didn't become pmd_trans_huge under us and then back to pmd_none, as + * a result of MADV_DONTNEED running immediately after a huge pmd fault + * in a different thread of this mm, in turn leading to a misleading + * pmd_trans_huge() retval. All we have to ensure is that it is a + * regular pmd that we can walk with pte_offset_map() and we can do that + * through an atomic read in C, which is what pmd_trans_unstable() + * provides. + */ + if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) + return VM_FAULT_NOPAGE; + + fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, + &fe->ptl); + return 0; +} + /** - * do_set_pte - setup new PTE entry for given page and add reverse page mapping. + * alloc_set_pte - setup new PTE entry for given page and add reverse page + * mapping. If needed, the fucntion allocates page table or use pre-allocated. * * @fe: fault environment + * @memcg: memcg to charge page (only for private mappings) * @page: page to map * - * Caller must hold page table lock relevant for @fe->pte. + * Caller must take care of unlocking fe->ptl, if fe->pte is non-NULL on return. * * Target users are page handler itself and implementations of * vm_ops->map_pages. */ -void do_set_pte(struct fault_env *fe, struct page *page) +int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, + struct page *page) { struct vm_area_struct *vma = fe->vma; bool write = fe->flags & FAULT_FLAG_WRITE; pte_t entry; + if (!fe->pte) { + int ret = pte_alloc_one_map(fe); + if (ret) + return ret; + } + + /* Re-check under ptl */ + if (unlikely(!pte_none(*fe->pte))) + return VM_FAULT_NOPAGE; + flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (write) @@ -2890,6 +2958,8 @@ void do_set_pte(struct fault_env *fe, struct page *page) if (write && !(vma->vm_flags & VM_SHARED)) { inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, fe->address, false); + mem_cgroup_commit_charge(page, memcg, false, false); + lru_cache_add_active_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); page_add_file_rmap(page); @@ -2898,6 +2968,8 @@ void do_set_pte(struct fault_env *fe, struct page *page) /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, fe->address, fe->pte); + + return 0; } static unsigned long fault_around_bytes __read_mostly = @@ -2964,19 +3036,17 @@ late_initcall(fault_around_debugfs); * fault_around_pages() value (and therefore to page order). This way it's * easier to guarantee that we don't cross page table boundaries. */ -static void do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) +static int do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) { - unsigned long address = fe->address, start_addr, nr_pages, mask; - pte_t *pte = fe->pte; + unsigned long address = fe->address, nr_pages, mask; pgoff_t end_pgoff; - int off; + int off, ret = 0; nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; - start_addr = max(fe->address & mask, fe->vma->vm_start); - off = ((fe->address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); - fe->pte -= off; + fe->address = max(address & mask, fe->vma->vm_start); + off = ((address - fe->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); start_pgoff -= off; /* @@ -2984,30 +3054,45 @@ static void do_fault_around(struct fault_env *fe, pgoff_t start_pgoff) * or fault_around_pages() from start_pgoff, depending what is nearest. */ end_pgoff = start_pgoff - - ((start_addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + + ((fe->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + PTRS_PER_PTE - 1; end_pgoff = min3(end_pgoff, vma_pages(fe->vma) + fe->vma->vm_pgoff - 1, start_pgoff + nr_pages - 1); - /* Check if it makes any sense to call ->map_pages */ - fe->address = start_addr; - while (!pte_none(*fe->pte)) { - if (++start_pgoff > end_pgoff) - goto out; - fe->address += PAGE_SIZE; - if (fe->address >= fe->vma->vm_end) - goto out; - fe->pte++; + if (pmd_none(*fe->pmd)) { + fe->prealloc_pte = pte_alloc_one(fe->vma->vm_mm, fe->address); + smp_wmb(); /* See comment in __pte_alloc() */ } fe->vma->vm_ops->map_pages(fe, start_pgoff, end_pgoff); + + /* preallocated pagetable is unused: free it */ + if (fe->prealloc_pte) { + pte_free(fe->vma->vm_mm, fe->prealloc_pte); + fe->prealloc_pte = 0; + } + /* Huge page is mapped? Page fault is solved */ + if (pmd_trans_huge(*fe->pmd)) { + ret = VM_FAULT_NOPAGE; + goto out; + } + + /* ->map_pages() haven't done anything useful. Cold page cache? */ + if (!fe->pte) + goto out; + + /* check if the page fault is solved */ + fe->pte -= (fe->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT); + if (!pte_none(*fe->pte)) + ret = VM_FAULT_NOPAGE; + pte_unmap_unlock(fe->pte, fe->ptl); out: - /* restore fault_env */ - fe->pte = pte; fe->address = address; + fe->pte = NULL; + return ret; } -static int do_read_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) +static int do_read_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page; @@ -3019,36 +3104,25 @@ static int do_read_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) * something). */ if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (!pte_same(*fe->pte, orig_pte)) - goto unlock_out; - do_fault_around(fe, pgoff); - /* Check if the fault is handled by faultaround */ - if (!pte_same(*fe->pte, orig_pte)) - goto unlock_out; - pte_unmap_unlock(fe->pte, fe->ptl); + ret = do_fault_around(fe, pgoff); + if (ret) + return ret; } ret = __do_fault(fe, pgoff, NULL, &fault_page, NULL); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, &fe->ptl); - if (unlikely(!pte_same(*fe->pte, orig_pte))) { + ret |= alloc_set_pte(fe, NULL, fault_page); + if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); - unlock_page(fault_page); - put_page(fault_page); - return ret; - } - do_set_pte(fe, fault_page); unlock_page(fault_page); -unlock_out: - pte_unmap_unlock(fe->pte, fe->ptl); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + put_page(fault_page); return ret; } -static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) +static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page, *new_page; @@ -3077,29 +3151,17 @@ static int do_cow_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) copy_user_highpage(new_page, fault_page, fe->address, vma); __SetPageUptodate(new_page); - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (unlikely(!pte_same(*fe->pte, orig_pte))) { + ret |= alloc_set_pte(fe, memcg, new_page); + if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); - if (!(ret & VM_FAULT_DAX_LOCKED)) { - unlock_page(fault_page); - put_page(fault_page); - } else { - dax_unlock_mapping_entry(vma->vm_file->f_mapping, - pgoff); - } - goto uncharge_out; - } - do_set_pte(fe, new_page); - mem_cgroup_commit_charge(new_page, memcg, false, false); - lru_cache_add_active_or_unevictable(new_page, vma); - pte_unmap_unlock(fe->pte, fe->ptl); if (!(ret & VM_FAULT_DAX_LOCKED)) { unlock_page(fault_page); put_page(fault_page); } else { dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff); } + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) + goto uncharge_out; return ret; uncharge_out: mem_cgroup_cancel_charge(new_page, memcg, false); @@ -3107,7 +3169,7 @@ uncharge_out: return ret; } -static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) +static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff) { struct vm_area_struct *vma = fe->vma; struct page *fault_page; @@ -3133,16 +3195,15 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) } } - fe->pte = pte_offset_map_lock(vma->vm_mm, fe->pmd, fe->address, - &fe->ptl); - if (unlikely(!pte_same(*fe->pte, orig_pte))) { + ret |= alloc_set_pte(fe, NULL, fault_page); + if (fe->pte) pte_unmap_unlock(fe->pte, fe->ptl); + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | + VM_FAULT_RETRY))) { unlock_page(fault_page); put_page(fault_page); return ret; } - do_set_pte(fe, fault_page); - pte_unmap_unlock(fe->pte, fe->ptl); if (set_page_dirty(fault_page)) dirtied = 1; @@ -3174,20 +3235,19 @@ static int do_shared_fault(struct fault_env *fe, pgoff_t pgoff, pte_t orig_pte) * The mmap_sem may have been released depending on flags and our * return value. See filemap_fault() and __lock_page_or_retry(). */ -static int do_fault(struct fault_env *fe, pte_t orig_pte) +static int do_fault(struct fault_env *fe) { struct vm_area_struct *vma = fe->vma; pgoff_t pgoff = linear_page_index(vma, fe->address); - pte_unmap(fe->pte); /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) return VM_FAULT_SIGBUS; if (!(fe->flags & FAULT_FLAG_WRITE)) - return do_read_fault(fe, pgoff, orig_pte); + return do_read_fault(fe, pgoff); if (!(vma->vm_flags & VM_SHARED)) - return do_cow_fault(fe, pgoff, orig_pte); - return do_shared_fault(fe, pgoff, orig_pte); + return do_cow_fault(fe, pgoff); + return do_shared_fault(fe, pgoff); } static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, @@ -3327,37 +3387,63 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd) * with external mmu caches can use to update those (ie the Sparc or * PowerPC hashed page tables that act as extended TLBs). * - * We enter with non-exclusive mmap_sem (to exclude vma changes, - * but allow concurrent faults), and pte mapped but not yet locked. - * We return with pte unmapped and unlocked. + * We enter with non-exclusive mmap_sem (to exclude vma changes, but allow + * concurrent faults). * - * The mmap_sem may have been released depending on flags and our - * return value. See filemap_fault() and __lock_page_or_retry(). + * The mmap_sem may have been released depending on flags and our return value. + * See filemap_fault() and __lock_page_or_retry(). */ static int handle_pte_fault(struct fault_env *fe) { pte_t entry; - /* - * some architectures can have larger ptes than wordsize, - * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y, - * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses. - * The code below just needs a consistent view for the ifs and - * we later double check anyway with the ptl lock held. So here - * a barrier will do. - */ - entry = *fe->pte; - barrier(); - if (!pte_present(entry)) { + if (unlikely(pmd_none(*fe->pmd))) { + /* + * Leave __pte_alloc() until later: because vm_ops->fault may + * want to allocate huge page, and if we expose page table + * for an instant, it will be difficult to retract from + * concurrent faults and from rmap lookups. + */ + fe->pte = NULL; + } else { + /* See comment in pte_alloc_one_map() */ + if (pmd_trans_unstable(fe->pmd) || pmd_devmap(*fe->pmd)) + return 0; + /* + * A regular pmd is established and it can't morph into a huge + * pmd from under us anymore at this point because we hold the + * mmap_sem read mode and khugepaged takes it in write mode. + * So now it's safe to run pte_offset_map(). + */ + fe->pte = pte_offset_map(fe->pmd, fe->address); + + entry = *fe->pte; + + /* + * some architectures can have larger ptes than wordsize, + * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and + * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee + * atomic accesses. The code below just needs a consistent + * view for the ifs and we later double check anyway with the + * ptl lock held. So here a barrier will do. + */ + barrier(); if (pte_none(entry)) { - if (vma_is_anonymous(fe->vma)) - return do_anonymous_page(fe); - else - return do_fault(fe, entry); + pte_unmap(fe->pte); + fe->pte = NULL; } - return do_swap_page(fe, entry); } + if (!fe->pte) { + if (vma_is_anonymous(fe->vma)) + return do_anonymous_page(fe); + else + return do_fault(fe); + } + + if (!pte_present(entry)) + return do_swap_page(fe, entry); + if (pte_protnone(entry)) return do_numa_page(fe, entry); @@ -3439,34 +3525,6 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, } } - /* - * Use pte_alloc() instead of pte_alloc_map, because we can't - * run pte_offset_map on the pmd, if an huge pmd could - * materialize from under us from a different thread. - */ - if (unlikely(pte_alloc(fe.vma->vm_mm, fe.pmd, fe.address))) - return VM_FAULT_OOM; - /* - * If a huge pmd materialized under us just retry later. Use - * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd - * didn't become pmd_trans_huge under us and then back to pmd_none, as - * a result of MADV_DONTNEED running immediately after a huge pmd fault - * in a different thread of this mm, in turn leading to a misleading - * pmd_trans_huge() retval. All we have to ensure is that it is a - * regular pmd that we can walk with pte_offset_map() and we can do that - * through an atomic read in C, which is what pmd_trans_unstable() - * provides. - */ - if (unlikely(pmd_trans_unstable(fe.pmd) || pmd_devmap(*fe.pmd))) - return 0; - /* - * A regular pmd is established and it can't morph into a huge pmd - * from under us anymore at this point because we hold the mmap_sem - * read mode and khugepaged takes it in write mode. So now it's - * safe to run pte_offset_map(). - */ - fe.pte = pte_offset_map(fe.pmd, fe.address); - return handle_pte_fault(&fe); } -- cgit v1.2.3-70-g09d2 From dd78fedde4b99b322f2dc849d467d365a82e23ca Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:26 -0700 Subject: rmap: support file thp Naive approach: on mapping/unmapping the page as compound we update ->_mapcount on each 4k page. That's not efficient, but it's not obvious how we can optimize this. We can look into optimization later. PG_double_map optimization doesn't work for file pages since lifecycle of file pages is different comparing to anon pages: file page can be mapped again at any time. Link: http://lkml.kernel.org/r/1466021202-61880-11-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rmap.h | 2 +- mm/huge_memory.c | 10 +++++++--- mm/memory.c | 4 ++-- mm/migrate.c | 2 +- mm/rmap.c | 48 +++++++++++++++++++++++++++++++++++------------- mm/util.c | 6 ++++++ 6 files changed, 52 insertions(+), 20 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 2b0fad83683f..b46bb5620a76 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -165,7 +165,7 @@ void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); -void page_add_file_rmap(struct page *); +void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bc5abcbe376e..90f5dd22b1c8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3301,18 +3301,22 @@ static void __split_huge_page(struct page *page, struct list_head *list) int total_mapcount(struct page *page) { - int i, ret; + int i, compound, ret; VM_BUG_ON_PAGE(PageTail(page), page); if (likely(!PageCompound(page))) return atomic_read(&page->_mapcount) + 1; - ret = compound_mapcount(page); + compound = compound_mapcount(page); if (PageHuge(page)) - return ret; + return compound; + ret = compound; for (i = 0; i < HPAGE_PMD_NR; i++) ret += atomic_read(&page[i]._mapcount) + 1; + /* File pages has compound_mapcount included in _mapcount */ + if (!PageAnon(page)) + return ret - compound * HPAGE_PMD_NR; if (PageDoubleMap(page)) ret -= HPAGE_PMD_NR; return ret; diff --git a/mm/memory.c b/mm/memory.c index 1991105bf67c..30cda24ff205 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1494,7 +1494,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr, /* Ok, finally just insert the thing.. */ get_page(page); inc_mm_counter_fast(mm, mm_counter_file(page)); - page_add_file_rmap(page); + page_add_file_rmap(page, false); set_pte_at(mm, addr, pte, mk_pte(page, prot)); retval = 0; @@ -2962,7 +2962,7 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, lru_cache_add_active_or_unevictable(page, vma); } else { inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page)); - page_add_file_rmap(page); + page_add_file_rmap(page, false); } set_pte_at(vma->vm_mm, fe->address, fe->pte, entry); diff --git a/mm/migrate.c b/mm/migrate.c index f278005f609c..e85a72c0d6f0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -259,7 +259,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, } else if (PageAnon(new)) page_add_anon_rmap(new, vma, addr, false); else - page_add_file_rmap(new); + page_add_file_rmap(new, false); if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) mlock_vma_page(new); diff --git a/mm/rmap.c b/mm/rmap.c index 701b93fea2a0..2b336c4277da 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1270,18 +1270,34 @@ void page_add_new_anon_rmap(struct page *page, * * The caller needs to hold the pte lock. */ -void page_add_file_rmap(struct page *page) +void page_add_file_rmap(struct page *page, bool compound) { + int i, nr = 1; + + VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); - if (atomic_inc_and_test(&page->_mapcount)) { - __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); + if (compound && PageTransHuge(page)) { + for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + if (atomic_inc_and_test(&page[i]._mapcount)) + nr++; + } + if (!atomic_inc_and_test(compound_mapcount_ptr(page))) + goto out; + } else { + if (!atomic_inc_and_test(&page->_mapcount)) + goto out; } + __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr); + mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); +out: unlock_page_memcg(page); } -static void page_remove_file_rmap(struct page *page) +static void page_remove_file_rmap(struct page *page, bool compound) { + int i, nr = 1; + + VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); /* Hugepages are not counted in NR_FILE_MAPPED for now. */ @@ -1292,15 +1308,24 @@ static void page_remove_file_rmap(struct page *page) } /* page still mapped by someone else? */ - if (!atomic_add_negative(-1, &page->_mapcount)) - goto out; + if (compound && PageTransHuge(page)) { + for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + if (atomic_add_negative(-1, &page[i]._mapcount)) + nr++; + } + if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) + goto out; + } else { + if (!atomic_add_negative(-1, &page->_mapcount)) + goto out; + } /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ - __dec_zone_page_state(page, NR_FILE_MAPPED); + __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); if (unlikely(PageMlocked(page))) @@ -1356,11 +1381,8 @@ static void page_remove_anon_compound_rmap(struct page *page) */ void page_remove_rmap(struct page *page, bool compound) { - if (!PageAnon(page)) { - VM_BUG_ON_PAGE(compound && !PageHuge(page), page); - page_remove_file_rmap(page); - return; - } + if (!PageAnon(page)) + return page_remove_file_rmap(page, compound); if (compound) return page_remove_anon_compound_rmap(page); diff --git a/mm/util.c b/mm/util.c index b756ee36f7f0..8d010ef2ce1c 100644 --- a/mm/util.c +++ b/mm/util.c @@ -412,6 +412,12 @@ int __page_mapcount(struct page *page) int ret; ret = atomic_read(&page->_mapcount) + 1; + /* + * For file THP page->_mapcount contains total number of mapping + * of the page: no need to look into compound_mapcount. + */ + if (!PageAnon(page) && !PageHuge(page)) + return ret; page = compound_head(page); ret += atomic_read(compound_mapcount_ptr(page)) + 1; if (PageDoubleMap(page)) -- cgit v1.2.3-70-g09d2 From 1010245964415bb7403463115bab2cd26244b445 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:29 -0700 Subject: mm: introduce do_set_pmd() With postponed page table allocation we have chance to setup huge pages. do_set_pte() calls do_set_pmd() if following criteria met: - page is compound; - pmd entry in pmd_none(); - vma has suitable size and alignment; Link: http://lkml.kernel.org/r/1466021202-61880-12-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 5 ---- mm/memory.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++- mm/migrate.c | 3 +-- 4 files changed, 74 insertions(+), 8 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9bed9249156f..254aac4c3963 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -143,6 +143,8 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) struct page *get_huge_zero_page(void); void put_huge_zero_page(void); +#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 90f5dd22b1c8..5bc058ad12c2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -796,11 +796,6 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) return pmd; } -static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) -{ - return pmd_mkhuge(mk_pmd(page, prot)); -} - static inline struct list_head *page_deferred_list(struct page *page) { /* diff --git a/mm/memory.c b/mm/memory.c index 30cda24ff205..650622a3a0a1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2920,6 +2920,66 @@ map_pte: return 0; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, + unsigned long haddr) +{ + if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) != + (vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK)) + return false; + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) + return false; + return true; +} + +static int do_set_pmd(struct fault_env *fe, struct page *page) +{ + struct vm_area_struct *vma = fe->vma; + bool write = fe->flags & FAULT_FLAG_WRITE; + unsigned long haddr = fe->address & HPAGE_PMD_MASK; + pmd_t entry; + int i, ret; + + if (!transhuge_vma_suitable(vma, haddr)) + return VM_FAULT_FALLBACK; + + ret = VM_FAULT_FALLBACK; + page = compound_head(page); + + fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); + if (unlikely(!pmd_none(*fe->pmd))) + goto out; + + for (i = 0; i < HPAGE_PMD_NR; i++) + flush_icache_page(vma, page + i); + + entry = mk_huge_pmd(page, vma->vm_page_prot); + if (write) + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); + + add_mm_counter(vma->vm_mm, MM_FILEPAGES, HPAGE_PMD_NR); + page_add_file_rmap(page, true); + + set_pmd_at(vma->vm_mm, haddr, fe->pmd, entry); + + update_mmu_cache_pmd(vma, haddr, fe->pmd); + + /* fault is handled */ + ret = 0; +out: + spin_unlock(fe->ptl); + return ret; +} +#else +static int do_set_pmd(struct fault_env *fe, struct page *page) +{ + BUILD_BUG(); + return 0; +} +#endif + /** * alloc_set_pte - setup new PTE entry for given page and add reverse page * mapping. If needed, the fucntion allocates page table or use pre-allocated. @@ -2939,9 +2999,19 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, struct vm_area_struct *vma = fe->vma; bool write = fe->flags & FAULT_FLAG_WRITE; pte_t entry; + int ret; + + if (pmd_none(*fe->pmd) && PageTransCompound(page)) { + /* THP on COW? */ + VM_BUG_ON_PAGE(memcg, page); + + ret = do_set_pmd(fe, page); + if (ret != VM_FAULT_FALLBACK) + return ret; + } if (!fe->pte) { - int ret = pte_alloc_one_map(fe); + ret = pte_alloc_one_map(fe); if (ret) return ret; } diff --git a/mm/migrate.c b/mm/migrate.c index e85a72c0d6f0..2232f6923cc7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1986,8 +1986,7 @@ fail_putback: } orig_entry = *pmd; - entry = mk_pmd(new_page, vma->vm_page_prot); - entry = pmd_mkhuge(entry); + entry = mk_huge_pmd(new_page, vma->vm_page_prot); entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); /* -- cgit v1.2.3-70-g09d2 From 95ecedcd6abbb05d8177331e2fa697888dcd634b Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:31 -0700 Subject: thp, vmstats: add counters for huge file pages THP_FILE_ALLOC: how many times huge page was allocated and put page cache. THP_FILE_MAPPED: how many times file huge page was mapped. Link: http://lkml.kernel.org/r/1466021202-61880-13-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/vm_event_item.h | 7 +++++++ mm/memory.c | 1 + mm/vmstat.c | 2 ++ 3 files changed, 10 insertions(+) diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ec084321fe09..42604173f122 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -70,6 +70,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_FAULT_FALLBACK, THP_COLLAPSE_ALLOC, THP_COLLAPSE_ALLOC_FAILED, + THP_FILE_ALLOC, + THP_FILE_MAPPED, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, @@ -100,4 +102,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, NR_VM_EVENT_ITEMS }; +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; }) +#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; }) +#endif + #endif /* VM_EVENT_ITEM_H_INCLUDED */ diff --git a/mm/memory.c b/mm/memory.c index 650622a3a0a1..10a424eca8a4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2968,6 +2968,7 @@ static int do_set_pmd(struct fault_env *fe, struct page *page) /* fault is handled */ ret = 0; + count_vm_event(THP_FILE_MAPPED); out: spin_unlock(fe->ptl); return ret; diff --git a/mm/vmstat.c b/mm/vmstat.c index 2a0f26bdae39..cff2f4ec9cce 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -817,6 +817,8 @@ const char * const vmstat_text[] = { "thp_fault_fallback", "thp_collapse_alloc", "thp_collapse_alloc_failed", + "thp_file_alloc", + "thp_file_mapped", "thp_split_page", "thp_split_page_failed", "thp_deferred_split_page", -- cgit v1.2.3-70-g09d2 From b5072380eb619786990cd9eab3ade05d09ccd89e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:34 -0700 Subject: thp: support file pages in zap_huge_pmd() split_huge_pmd() for file mappings (and DAX too) is implemented by just clearing pmd entry as we can re-fill this area from page cache on pte level later. This means we don't need deposit page tables when file THP is mapped. Therefore we shouldn't try to withdraw a page table on zap_huge_pmd() file THP PMD. Link: http://lkml.kernel.org/r/1466021202-61880-14-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5bc058ad12c2..d8ccd245947d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1689,10 +1689,16 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, struct page *page = pmd_page(orig_pmd); page_remove_rmap(page, true); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); - add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); VM_BUG_ON_PAGE(!PageHead(page), page); - pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); - atomic_long_dec(&tlb->mm->nr_ptes); + if (PageAnon(page)) { + pgtable_t pgtable; + pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); + pte_free(tlb->mm, pgtable); + atomic_long_dec(&tlb->mm->nr_ptes); + add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); + } else { + add_mm_counter(tlb->mm, MM_FILEPAGES, -HPAGE_PMD_NR); + } spin_unlock(ptl); tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE); } -- cgit v1.2.3-70-g09d2 From d21b9e57c74ce82ac459e2ec8ce667db9b9da8b0 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:37 -0700 Subject: thp: handle file pages in split_huge_pmd() Splitting THP PMD is simple: just unmap it as in DAX case. This way we can avoid memory overhead on page table allocation to deposit. It's probably a good idea to try to allocation page table with GFP_ATOMIC in __split_huge_pmd_locked() to avoid refaulting the area, but clearing pmd should be good enough for now. Unlike DAX, we also remove the page from rmap and drop reference. pmd_young() is transfered to PageReferenced(). Link: http://lkml.kernel.org/r/1466021202-61880-15-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d8ccd245947d..e6725c5c5a62 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2987,10 +2987,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, count_vm_event(THP_SPLIT_PMD); - if (vma_is_dax(vma)) { - pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); + if (!vma_is_anonymous(vma)) { + _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); if (is_huge_zero_pmd(_pmd)) put_huge_zero_page(); + if (vma_is_dax(vma)) + return; + page = pmd_page(_pmd); + if (!PageReferenced(page) && pmd_young(_pmd)) + SetPageReferenced(page); + page_remove_rmap(page, true); + put_page(page); + add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR); return; } else if (is_huge_zero_pmd(*pmd)) { return __split_huge_zero_page_pmd(vma, haddr, pmd); -- cgit v1.2.3-70-g09d2 From af9e4d5f2de2eabdc7145e077ba48b2a638465c6 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:40 -0700 Subject: thp: handle file COW faults File COW for THP is handled on pte level: just split the pmd. It's not clear how benefitial would be allocation of huge pages on COW faults. And it would require some code to make them work. I think at some point we can consider teaching khugepaged to collapse pages in COW mappings, but allocating huge on fault is probably overkill. Link: http://lkml.kernel.org/r/1466021202-61880-16-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 10a424eca8a4..8f4254798130 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3446,6 +3446,11 @@ static int wp_huge_pmd(struct fault_env *fe, pmd_t orig_pmd) if (fe->vma->vm_ops->pmd_fault) return fe->vma->vm_ops->pmd_fault(fe->vma, fe->address, fe->pmd, fe->flags); + + /* COW handled on pte level: split pmd */ + VM_BUG_ON_VMA(fe->vma->vm_flags & VM_SHARED, fe->vma); + split_huge_pmd(fe->vma, fe->pmd, fe->address); + return VM_FAULT_FALLBACK; } -- cgit v1.2.3-70-g09d2 From 628d47ce98d50860d4fc1eef250126dd50622a89 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:42 -0700 Subject: thp: skip file huge pmd on copy_huge_pmd() copy_page_range() has a check for "Don't copy ptes where a page fault will fill them correctly." It works on VMA level. We still copy all page table entries from private mappings, even if they map page cache. We can simplify copy_huge_pmd() a bit by skipping file PMDs. We don't map file private pages with PMDs, so they only can map page cache. It's safe to skip them as they can be re-faulted later. Link: http://lkml.kernel.org/r/1466021202-61880-17-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e6725c5c5a62..c11cfce4d9d9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1098,14 +1098,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct page *src_page; pmd_t pmd; pgtable_t pgtable = NULL; - int ret; + int ret = -ENOMEM; - if (!vma_is_dax(vma)) { - ret = -ENOMEM; - pgtable = pte_alloc_one(dst_mm, addr); - if (unlikely(!pgtable)) - goto out; - } + /* Skip if can be re-fill on fault */ + if (!vma_is_anonymous(vma)) + return 0; + + pgtable = pte_alloc_one(dst_mm, addr); + if (unlikely(!pgtable)) + goto out; dst_ptl = pmd_lock(dst_mm, dst_pmd); src_ptl = pmd_lockptr(src_mm, src_pmd); @@ -1113,7 +1114,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ret = -EAGAIN; pmd = *src_pmd; - if (unlikely(!pmd_trans_huge(pmd) && !pmd_devmap(pmd))) { + if (unlikely(!pmd_trans_huge(pmd))) { pte_free(dst_mm, pgtable); goto out_unlock; } @@ -1136,16 +1137,13 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, goto out_unlock; } - if (!vma_is_dax(vma)) { - /* thp accounting separate from pmd_devmap accounting */ - src_page = pmd_page(pmd); - VM_BUG_ON_PAGE(!PageHead(src_page), src_page); - get_page(src_page); - page_dup_rmap(src_page, true); - add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); - atomic_long_inc(&dst_mm->nr_ptes); - pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); - } + src_page = pmd_page(pmd); + VM_BUG_ON_PAGE(!PageHead(src_page), src_page); + get_page(src_page); + page_dup_rmap(src_page, true); + add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); + atomic_long_inc(&dst_mm->nr_ptes); + pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); pmdp_set_wrprotect(src_mm, addr, src_pmd); pmd = pmd_mkold(pmd_wrprotect(pmd)); -- cgit v1.2.3-70-g09d2 From b237aded41cd68f378650209e8a10c04a25da258 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:45 -0700 Subject: thp: prepare change_huge_pmd() for file thp change_huge_pmd() has assert which is not relvant for file page. For shared mapping it's perfectly fine to have page table entry writable, without explicit mkwrite. Link: http://lkml.kernel.org/r/1466021202-61880-18-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c11cfce4d9d9..3a20f11248a1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1788,7 +1788,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_mkwrite(entry); ret = HPAGE_PMD_NR; set_pmd_at(mm, addr, pmd, entry); - BUG_ON(!preserve_write && pmd_write(entry)); + BUG_ON(vma_is_anonymous(vma) && !preserve_write && + pmd_write(entry)); } spin_unlock(ptl); } -- cgit v1.2.3-70-g09d2 From 37f9f5595c26d3cb644ca2fab83dc4c4db119f9f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:48 -0700 Subject: thp: run vma_adjust_trans_huge() outside i_mmap_rwsem vma_addjust_trans_huge() splits pmd if it's crossing VMA boundary. During split we munlock the huge page which requires rmap walk. rmap wants to take the lock on its own. Let's move vma_adjust_trans_huge() outside i_mmap_rwsem to fix this. Link: http://lkml.kernel.org/r/1466021202-61880-19-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 234edffec1d0..31f9b2220b72 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -675,6 +675,8 @@ again: remove_next = 1 + (end > next->vm_end); } } + vma_adjust_trans_huge(vma, start, end, adjust_next); + if (file) { mapping = file->f_mapping; root = &mapping->i_mmap; @@ -695,8 +697,6 @@ again: remove_next = 1 + (end > next->vm_end); } } - vma_adjust_trans_huge(vma, start, end, adjust_next); - anon_vma = vma->anon_vma; if (!anon_vma && adjust_next) anon_vma = next->anon_vma; -- cgit v1.2.3-70-g09d2 From baa355fd331424526e742d41d9b90d5f9d10f716 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:51 -0700 Subject: thp: file pages support for split_huge_page() Basic scheme is the same as for anon THP. Main differences: - File pages are on radix-tree, so we have head->_count offset by HPAGE_PMD_NR. The count got distributed to small pages during split. - mapping->tree_lock prevents non-lockless access to pages under split over radix-tree; - Lockless access is prevented by setting the head->_count to 0 during split; - After split, some pages can be beyond i_size. We drop them from radix-tree. - We don't setup migration entries. Just unmap pages. It helps handling cases when i_size is in the middle of the page: no need handle unmap pages beyond i_size manually. Link: http://lkml.kernel.org/r/1466021202-61880-20-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/gup.c | 2 + mm/huge_memory.c | 160 +++++++++++++++++++++++++++++++++++++++---------------- 2 files changed, 117 insertions(+), 45 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 9671e29f8ffd..547741f5f7a7 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -288,6 +288,8 @@ struct page *follow_page_mask(struct vm_area_struct *vma, ret = split_huge_page(page); unlock_page(page); put_page(page); + if (pmd_none(*pmd)) + return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3a20f11248a1..486077742650 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -3187,12 +3188,15 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, static void freeze_page(struct page *page) { - enum ttu_flags ttu_flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | - TTU_IGNORE_ACCESS | TTU_RMAP_LOCKED; + enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS | + TTU_RMAP_LOCKED; int i, ret; VM_BUG_ON_PAGE(!PageHead(page), page); + if (PageAnon(page)) + ttu_flags |= TTU_MIGRATION; + /* We only need TTU_SPLIT_HUGE_PMD once */ ret = try_to_unmap(page, ttu_flags | TTU_SPLIT_HUGE_PMD); for (i = 1; !ret && i < HPAGE_PMD_NR; i++) { @@ -3202,7 +3206,7 @@ static void freeze_page(struct page *page) ret = try_to_unmap(page + i, ttu_flags); } - VM_BUG_ON(ret); + VM_BUG_ON_PAGE(ret, page + i - 1); } static void unfreeze_page(struct page *page) @@ -3224,15 +3228,20 @@ static void __split_huge_page_tail(struct page *head, int tail, /* * tail_page->_refcount is zero and not changing from under us. But * get_page_unless_zero() may be running from under us on the - * tail_page. If we used atomic_set() below instead of atomic_inc(), we - * would then run atomic_set() concurrently with + * tail_page. If we used atomic_set() below instead of atomic_inc() or + * atomic_add(), we would then run atomic_set() concurrently with * get_page_unless_zero(), and atomic_set() is implemented in C not * using locked ops. spin_unlock on x86 sometime uses locked ops * because of PPro errata 66, 92, so unless somebody can guarantee * atomic_set() here would be safe on all archs (and not only on x86), - * it's safer to use atomic_inc(). + * it's safer to use atomic_inc()/atomic_add(). */ - page_ref_inc(page_tail); + if (PageAnon(head)) { + page_ref_inc(page_tail); + } else { + /* Additional pin to radix tree */ + page_ref_add(page_tail, 2); + } page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; page_tail->flags |= (head->flags & @@ -3268,25 +3277,44 @@ static void __split_huge_page_tail(struct page *head, int tail, lru_add_page_tail(head, page_tail, lruvec, list); } -static void __split_huge_page(struct page *page, struct list_head *list) +static void __split_huge_page(struct page *page, struct list_head *list, + unsigned long flags) { struct page *head = compound_head(page); struct zone *zone = page_zone(head); struct lruvec *lruvec; + pgoff_t end = -1; int i; - /* prevent PageLRU to go away from under us, and freeze lru stats */ - spin_lock_irq(&zone->lru_lock); lruvec = mem_cgroup_page_lruvec(head, zone); /* complete memcg works before add pages to LRU */ mem_cgroup_split_huge_fixup(head); - for (i = HPAGE_PMD_NR - 1; i >= 1; i--) + if (!PageAnon(page)) + end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE); + + for (i = HPAGE_PMD_NR - 1; i >= 1; i--) { __split_huge_page_tail(head, i, lruvec, list); + /* Some pages can be beyond i_size: drop them from page cache */ + if (head[i].index >= end) { + __ClearPageDirty(head + i); + __delete_from_page_cache(head + i, NULL); + put_page(head + i); + } + } ClearPageCompound(head); - spin_unlock_irq(&zone->lru_lock); + /* See comment in __split_huge_page_tail() */ + if (PageAnon(head)) { + page_ref_inc(head); + } else { + /* Additional pin to radix tree */ + page_ref_add(head, 2); + spin_unlock(&head->mapping->tree_lock); + } + + spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags); unfreeze_page(head); @@ -3411,36 +3439,54 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) { struct page *head = compound_head(page); struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); - struct anon_vma *anon_vma; - int count, mapcount, ret; + struct anon_vma *anon_vma = NULL; + struct address_space *mapping = NULL; + int count, mapcount, extra_pins, ret; bool mlocked; unsigned long flags; VM_BUG_ON_PAGE(is_huge_zero_page(page), page); - VM_BUG_ON_PAGE(!PageAnon(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); VM_BUG_ON_PAGE(!PageCompound(page), page); - /* - * The caller does not necessarily hold an mmap_sem that would prevent - * the anon_vma disappearing so we first we take a reference to it - * and then lock the anon_vma for write. This is similar to - * page_lock_anon_vma_read except the write lock is taken to serialise - * against parallel split or collapse operations. - */ - anon_vma = page_get_anon_vma(head); - if (!anon_vma) { - ret = -EBUSY; - goto out; + if (PageAnon(head)) { + /* + * The caller does not necessarily hold an mmap_sem that would + * prevent the anon_vma disappearing so we first we take a + * reference to it and then lock the anon_vma for write. This + * is similar to page_lock_anon_vma_read except the write lock + * is taken to serialise against parallel split or collapse + * operations. + */ + anon_vma = page_get_anon_vma(head); + if (!anon_vma) { + ret = -EBUSY; + goto out; + } + extra_pins = 0; + mapping = NULL; + anon_vma_lock_write(anon_vma); + } else { + mapping = head->mapping; + + /* Truncated ? */ + if (!mapping) { + ret = -EBUSY; + goto out; + } + + /* Addidional pins from radix tree */ + extra_pins = HPAGE_PMD_NR; + anon_vma = NULL; + i_mmap_lock_read(mapping); } - anon_vma_lock_write(anon_vma); /* * Racy check if we can split the page, before freeze_page() will * split PMDs */ - if (total_mapcount(head) != page_count(head) - 1) { + if (total_mapcount(head) != page_count(head) - extra_pins - 1) { ret = -EBUSY; goto out_unlock; } @@ -3453,35 +3499,60 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (mlocked) lru_add_drain(); + /* prevent PageLRU to go away from under us, and freeze lru stats */ + spin_lock_irqsave(&page_zone(head)->lru_lock, flags); + + if (mapping) { + void **pslot; + + spin_lock(&mapping->tree_lock); + pslot = radix_tree_lookup_slot(&mapping->page_tree, + page_index(head)); + /* + * Check if the head page is present in radix tree. + * We assume all tail are present too, if head is there. + */ + if (radix_tree_deref_slot_protected(pslot, + &mapping->tree_lock) != head) + goto fail; + } + /* Prevent deferred_split_scan() touching ->_refcount */ - spin_lock_irqsave(&pgdata->split_queue_lock, flags); + spin_lock(&pgdata->split_queue_lock); count = page_count(head); mapcount = total_mapcount(head); - if (!mapcount && count == 1) { + if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) { if (!list_empty(page_deferred_list(head))) { pgdata->split_queue_len--; list_del(page_deferred_list(head)); } - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); - __split_huge_page(page, list); + spin_unlock(&pgdata->split_queue_lock); + __split_huge_page(page, list, flags); ret = 0; - } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); - pr_alert("total_mapcount: %u, page_count(): %u\n", - mapcount, count); - if (PageTail(page)) - dump_page(head, NULL); - dump_page(page, "total_mapcount(head) > 0"); - BUG(); } else { - spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); + if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { + pr_alert("total_mapcount: %u, page_count(): %u\n", + mapcount, count); + if (PageTail(page)) + dump_page(head, NULL); + dump_page(page, "total_mapcount(head) > 0"); + BUG(); + } + spin_unlock(&pgdata->split_queue_lock); +fail: if (mapping) + spin_unlock(&mapping->tree_lock); + spin_unlock_irqrestore(&page_zone(head)->lru_lock, flags); unfreeze_page(head); ret = -EBUSY; } out_unlock: - anon_vma_unlock_write(anon_vma); - put_anon_vma(anon_vma); + if (anon_vma) { + anon_vma_unlock_write(anon_vma); + put_anon_vma(anon_vma); + } + if (mapping) + i_mmap_unlock_read(mapping); out: count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); return ret; @@ -3604,8 +3675,7 @@ static int split_huge_pages_set(void *data, u64 val) if (zone != page_zone(page)) goto next; - if (!PageHead(page) || !PageAnon(page) || - PageHuge(page)) + if (!PageHead(page) || PageHuge(page) || !PageLRU(page)) goto next; total++; -- cgit v1.2.3-70-g09d2 From 9a73f61bdb8acdc01bbaf72a3fe0a8854f2463ad Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:53 -0700 Subject: thp, mlock: do not mlock PTE-mapped file huge pages As with anon THP, we only mlock file huge pages if we can prove that the page is not mapped with PTE. This way we can avoid mlock leak into non-mlocked vma on split. We rely on PageDoubleMap() under lock_page() to check if the the page may be PTE mapped. PG_double_map is set by page_add_file_rmap() when the page mapped with PTEs. Link: http://lkml.kernel.org/r/1466021202-61880-21-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 13 ++++++++++++- mm/huge_memory.c | 27 ++++++++++++++++++++------- mm/mmap.c | 6 ++++++ mm/page_alloc.c | 2 ++ mm/rmap.c | 16 ++++++++++++++-- 5 files changed, 54 insertions(+), 10 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 7c8e82ac2eb7..8cf09639185a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -581,6 +581,17 @@ static inline int PageDoubleMap(struct page *page) return PageHead(page) && test_bit(PG_double_map, &page[1].flags); } +static inline void SetPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + set_bit(PG_double_map, &page[1].flags); +} + +static inline void ClearPageDoubleMap(struct page *page) +{ + VM_BUG_ON_PAGE(!PageHead(page), page); + clear_bit(PG_double_map, &page[1].flags); +} static inline int TestSetPageDoubleMap(struct page *page) { VM_BUG_ON_PAGE(!PageHead(page), page); @@ -598,7 +609,7 @@ TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) -TESTPAGEFLAG_FALSE(DoubleMap) +PAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) TESTCLEARFLAG_FALSE(DoubleMap) #endif diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 486077742650..3b74fea6b5db 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1437,6 +1437,8 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, * We don't mlock() pte-mapped THPs. This way we can avoid * leaking mlocked pages into non-VM_LOCKED VMAs. * + * For anon THP: + * * In most cases the pmd is the only mapping of the page as we * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for * writable private mappings in populate_vma_page_range(). @@ -1444,15 +1446,26 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, * The only scenario when we have the page shared here is if we * mlocking read-only mapping shared over fork(). We skip * mlocking such pages. + * + * For file THP: + * + * We can expect PageDoubleMap() to be stable under page lock: + * for file pages we set it in page_add_file_rmap(), which + * requires page to be locked. */ - if (compound_mapcount(page) == 1 && !PageDoubleMap(page) && - page->mapping && trylock_page(page)) { - lru_add_drain(); - if (page->mapping) - mlock_vma_page(page); - unlock_page(page); - } + + if (PageAnon(page) && compound_mapcount(page) != 1) + goto skip_mlock; + if (PageDoubleMap(page) || !page->mapping) + goto skip_mlock; + if (!trylock_page(page)) + goto skip_mlock; + lru_add_drain(); + if (page->mapping && !PageDoubleMap(page)) + mlock_vma_page(page); + unlock_page(page); } +skip_mlock: page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; VM_BUG_ON_PAGE(!PageCompound(page), page); if (flags & FOLL_GET) diff --git a/mm/mmap.c b/mm/mmap.c index 31f9b2220b72..a41872c8f2af 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2591,6 +2591,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, /* drop PG_Mlocked flag for over-mapped range */ for (tmp = vma; tmp->vm_start >= start + size; tmp = tmp->vm_next) { + /* + * Split pmd and munlock page on the border + * of the range. + */ + vma_adjust_trans_huge(tmp, start, start + size, 0); + munlock_vma_pages_range(tmp, max(tmp->vm_start, start), min(tmp->vm_end, start + size)); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7023a31edc5c..847281eb74da 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1007,6 +1007,8 @@ static __always_inline bool free_pages_prepare(struct page *page, VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); + if (compound) + ClearPageDoubleMap(page); for (i = 1; i < (1 << order); i++) { if (compound) bad += free_tail_pages_check(page, page + i); diff --git a/mm/rmap.c b/mm/rmap.c index 2b336c4277da..9d643b7a99ce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1284,6 +1284,12 @@ void page_add_file_rmap(struct page *page, bool compound) if (!atomic_inc_and_test(compound_mapcount_ptr(page))) goto out; } else { + if (PageTransCompound(page)) { + VM_BUG_ON_PAGE(!PageLocked(page), page); + SetPageDoubleMap(compound_head(page)); + if (PageMlocked(page)) + clear_page_mlock(compound_head(page)); + } if (!atomic_inc_and_test(&page->_mapcount)) goto out; } @@ -1458,8 +1464,14 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, */ if (!(flags & TTU_IGNORE_MLOCK)) { if (vma->vm_flags & VM_LOCKED) { - /* Holding pte lock, we do *not* need mmap_sem here */ - mlock_vma_page(page); + /* PTE-mapped THP are never mlocked */ + if (!PageTransCompound(page)) { + /* + * Holding pte lock, we do *not* need + * mmap_sem here + */ + mlock_vma_page(page); + } ret = SWAP_MLOCK; goto out_unmap; } -- cgit v1.2.3-70-g09d2 From 7751b2da6be0b59da0838a05153a646df1affbce Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:56 -0700 Subject: vmscan: split file huge pages before paging them out This is preparation of vmscan for file huge pages. We cannot write out huge pages, so we need to split them on the way out. Link: http://lkml.kernel.org/r/1466021202-61880-22-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/vmscan.c b/mm/vmscan.c index 93ba33789ac6..21d417ccff69 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1055,8 +1055,14 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Adding to swap updated mapping */ mapping = page_mapping(page); + } else if (unlikely(PageTransHuge(page))) { + /* Split file THP */ + if (split_huge_page_to_list(page, page_list)) + goto keep_locked; } + VM_BUG_ON_PAGE(PageTransHuge(page), page); + /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. -- cgit v1.2.3-70-g09d2 From e2f0a0db95979a4aa951d883248da9d361507abf Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:25:59 -0700 Subject: page-flags: relax policy for PG_mappedtodisk and PG_reclaim These flags are in use for file THP. Link: http://lkml.kernel.org/r/1466021202-61880-23-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 8cf09639185a..74e4dda91238 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -295,11 +295,11 @@ PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) */ TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND) TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND) -PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND) +PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ -PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND) - TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND) +PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) + TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND) TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND) -- cgit v1.2.3-70-g09d2 From c78c66d1ddfdbd2353f3fcfeba0268524537b096 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:02 -0700 Subject: radix-tree: implement radix_tree_maybe_preload_order() The new helper is similar to radix_tree_maybe_preload(), but tries to preload number of nodes required to insert (1 << order) continuous naturally-aligned elements. This is required to push huge pages into pagecache. Link: http://lkml.kernel.org/r/1466021202-61880-24-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/radix-tree.h | 1 + lib/radix-tree.c | 84 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 80 insertions(+), 5 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index eca6f626c16e..cbfee507c839 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -291,6 +291,7 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, unsigned long first_index, unsigned int max_items); int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8b7d8459bb9d..61b8fb529cef 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -38,6 +38,9 @@ #include /* in_interrupt() */ +/* Number of nodes in fully populated tree of given height */ +static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; + /* * Radix tree node cache. */ @@ -342,7 +345,7 @@ radix_tree_node_free(struct radix_tree_node *node) * To make use of this facility, the radix tree must be initialised without * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). */ -static int __radix_tree_preload(gfp_t gfp_mask) +static int __radix_tree_preload(gfp_t gfp_mask, int nr) { struct radix_tree_preload *rtp; struct radix_tree_node *node; @@ -350,14 +353,14 @@ static int __radix_tree_preload(gfp_t gfp_mask) preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { + while (rtp->nr < nr) { preempt_enable(); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { + if (rtp->nr < nr) { node->private_data = rtp->nodes; rtp->nodes = node; rtp->nr++; @@ -383,7 +386,7 @@ int radix_tree_preload(gfp_t gfp_mask) { /* Warn on non-sensical use... */ WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); - return __radix_tree_preload(gfp_mask); + return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); } EXPORT_SYMBOL(radix_tree_preload); @@ -395,13 +398,58 @@ EXPORT_SYMBOL(radix_tree_preload); int radix_tree_maybe_preload(gfp_t gfp_mask) { if (gfpflags_allow_blocking(gfp_mask)) - return __radix_tree_preload(gfp_mask); + return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ preempt_disable(); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); +/* + * The same as function above, but preload number of nodes required to insert + * (1 << order) continuous naturally-aligned elements. + */ +int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) +{ + unsigned long nr_subtrees; + int nr_nodes, subtree_height; + + /* Preloading doesn't help anything with this gfp mask, skip it */ + if (!gfpflags_allow_blocking(gfp_mask)) { + preempt_disable(); + return 0; + } + + /* + * Calculate number and height of fully populated subtrees it takes to + * store (1 << order) elements. + */ + nr_subtrees = 1 << order; + for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; + subtree_height++) + nr_subtrees >>= RADIX_TREE_MAP_SHIFT; + + /* + * The worst case is zero height tree with a single item at index 0 and + * then inserting items starting at ULONG_MAX - (1 << order). + * + * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to + * 0-index item. + */ + nr_nodes = RADIX_TREE_MAX_PATH; + + /* Plus branch to fully populated subtrees. */ + nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; + + /* Root node is shared. */ + nr_nodes--; + + /* Plus nodes required to build subtrees. */ + nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; + + return __radix_tree_preload(gfp_mask, nr_nodes); +} + /* * The maximum index which can be stored in a radix tree */ @@ -1571,6 +1619,31 @@ radix_tree_node_ctor(void *arg) INIT_LIST_HEAD(&node->private_list); } +static __init unsigned long __maxindex(unsigned int height) +{ + unsigned int width = height * RADIX_TREE_MAP_SHIFT; + int shift = RADIX_TREE_INDEX_BITS - width; + + if (shift < 0) + return ~0UL; + if (shift >= BITS_PER_LONG) + return 0UL; + return ~0UL >> shift; +} + +static __init void radix_tree_init_maxnodes(void) +{ + unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; + unsigned int i, j; + + for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) + height_to_maxindex[i] = __maxindex(i); + for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { + for (j = i; j > 0; j--) + height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; + } +} + static int radix_tree_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -1597,5 +1670,6 @@ void __init radix_tree_init(void) sizeof(struct radix_tree_node), 0, SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, radix_tree_node_ctor); + radix_tree_init_maxnodes(); hotcpu_notifier(radix_tree_callback, 0); } -- cgit v1.2.3-70-g09d2 From 83929372f629001568d43069a63376e13bfc497b Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:04 -0700 Subject: filemap: prepare find and delete operations for huge pages For now, we would have HPAGE_PMD_NR entries in radix tree for every huge page. That's suboptimal and it will be changed to use Matthew's multi-order entries later. 'add' operation is not changed, because we don't need it to implement hugetmpfs: shmem uses its own implementation. Link: http://lkml.kernel.org/r/1466021202-61880-25-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 178 ++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 122 insertions(+), 56 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 1efd2994dccf..21508ea25717 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -114,14 +114,14 @@ static void page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) { struct radix_tree_node *node; + int i, nr = PageHuge(page) ? 1 : hpage_nr_pages(page); - VM_BUG_ON(!PageLocked(page)); - - node = radix_tree_replace_clear_tags(&mapping->page_tree, page->index, - shadow); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(nr != 1 && shadow, page); if (shadow) { - mapping->nrexceptional++; + mapping->nrexceptional += nr; /* * Make sure the nrexceptional update is committed before * the nrpages update so that final truncate racing @@ -130,31 +130,38 @@ static void page_cache_tree_delete(struct address_space *mapping, */ smp_wmb(); } - mapping->nrpages--; - - if (!node) - return; + mapping->nrpages -= nr; - workingset_node_pages_dec(node); - if (shadow) - workingset_node_shadows_inc(node); - else - if (__radix_tree_delete_node(&mapping->page_tree, node)) + for (i = 0; i < nr; i++) { + node = radix_tree_replace_clear_tags(&mapping->page_tree, + page->index + i, shadow); + if (!node) { + VM_BUG_ON_PAGE(nr != 1, page); return; + } - /* - * Track node that only contains shadow entries. DAX mappings contain - * no shadow entries and may contain other exceptional entries so skip - * those. - * - * Avoid acquiring the list_lru lock if already tracked. The - * list_empty() test is safe as node->private_list is - * protected by mapping->tree_lock. - */ - if (!dax_mapping(mapping) && !workingset_node_pages(node) && - list_empty(&node->private_list)) { - node->private_data = mapping; - list_lru_add(&workingset_shadow_nodes, &node->private_list); + workingset_node_pages_dec(node); + if (shadow) + workingset_node_shadows_inc(node); + else + if (__radix_tree_delete_node(&mapping->page_tree, node)) + continue; + + /* + * Track node that only contains shadow entries. DAX mappings + * contain no shadow entries and may contain other exceptional + * entries so skip those. + * + * Avoid acquiring the list_lru lock if already tracked. + * The list_empty() test is safe as node->private_list is + * protected by mapping->tree_lock. + */ + if (!dax_mapping(mapping) && !workingset_node_pages(node) && + list_empty(&node->private_list)) { + node->private_data = mapping; + list_lru_add(&workingset_shadow_nodes, + &node->private_list); + } } } @@ -166,6 +173,7 @@ static void page_cache_tree_delete(struct address_space *mapping, void __delete_from_page_cache(struct page *page, void *shadow) { struct address_space *mapping = page->mapping; + int nr = hpage_nr_pages(page); trace_mm_filemap_delete_from_page_cache(page); /* @@ -178,6 +186,7 @@ void __delete_from_page_cache(struct page *page, void *shadow) else cleancache_invalidate_page(mapping, page); + VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(page_mapped(page), page); if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { int mapcount; @@ -209,9 +218,9 @@ void __delete_from_page_cache(struct page *page, void *shadow) /* hugetlb pages do not participate in page cache accounting. */ if (!PageHuge(page)) - __dec_zone_page_state(page, NR_FILE_PAGES); + __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) - __dec_zone_page_state(page, NR_SHMEM); + __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); /* * At this point page must be either written or cleaned by truncate. @@ -235,9 +244,8 @@ void __delete_from_page_cache(struct page *page, void *shadow) */ void delete_from_page_cache(struct page *page) { - struct address_space *mapping = page->mapping; + struct address_space *mapping = page_mapping(page); unsigned long flags; - void (*freepage)(struct page *); BUG_ON(!PageLocked(page)); @@ -250,7 +258,13 @@ void delete_from_page_cache(struct page *page) if (freepage) freepage(page); - put_page(page); + + if (PageTransHuge(page) && !PageHuge(page)) { + page_ref_sub(page, HPAGE_PMD_NR); + VM_BUG_ON_PAGE(page_count(page) <= 0, page); + } else { + put_page(page); + } } EXPORT_SYMBOL(delete_from_page_cache); @@ -1053,7 +1067,7 @@ EXPORT_SYMBOL(page_cache_prev_hole); struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) { void **pagep; - struct page *page; + struct page *head, *page; rcu_read_lock(); repeat: @@ -1073,8 +1087,16 @@ repeat: */ goto out; } - if (!page_cache_get_speculative(page)) + + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* * Has the page moved? @@ -1082,7 +1104,7 @@ repeat: * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { - put_page(page); + put_page(head); goto repeat; } } @@ -1118,12 +1140,12 @@ repeat: if (page && !radix_tree_exception(page)) { lock_page(page); /* Has the page been truncated? */ - if (unlikely(page->mapping != mapping)) { + if (unlikely(page_mapping(page) != mapping)) { unlock_page(page); put_page(page); goto repeat; } - VM_BUG_ON_PAGE(page->index != offset, page); + VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); } return page; } @@ -1255,7 +1277,7 @@ unsigned find_get_entries(struct address_space *mapping, rcu_read_lock(); radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { - struct page *page; + struct page *head, *page; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -1272,12 +1294,20 @@ repeat: */ goto export; } - if (!page_cache_get_speculative(page)) + + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } export: @@ -1318,7 +1348,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, rcu_read_lock(); radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { - struct page *page; + struct page *head, *page; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -1337,12 +1367,19 @@ repeat: continue; } - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } @@ -1379,7 +1416,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, rcu_read_lock(); radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { - struct page *page; + struct page *head, *page; repeat: page = radix_tree_deref_slot(slot); /* The hole, there no reason to continue */ @@ -1399,12 +1436,19 @@ repeat: break; } - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } @@ -1413,7 +1457,7 @@ repeat: * otherwise we can get both false positives and false * negatives, which is just confusing to the caller. */ - if (page->mapping == NULL || page->index != iter.index) { + if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { put_page(page); break; } @@ -1451,7 +1495,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, rcu_read_lock(); radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, *index, tag) { - struct page *page; + struct page *head, *page; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -1476,12 +1520,19 @@ repeat: continue; } - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto repeat; + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); + goto repeat; + } + /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } @@ -1525,7 +1576,7 @@ unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, rcu_read_lock(); radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, start, tag) { - struct page *page; + struct page *head, *page; repeat: page = radix_tree_deref_slot(slot); if (unlikely(!page)) @@ -1543,12 +1594,20 @@ repeat: */ goto export; } - if (!page_cache_get_speculative(page)) + + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto repeat; + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); + goto repeat; + } + /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } export: @@ -2137,7 +2196,7 @@ void filemap_map_pages(struct fault_env *fe, struct address_space *mapping = file->f_mapping; pgoff_t last_pgoff = start_pgoff; loff_t size; - struct page *page; + struct page *head, *page; rcu_read_lock(); radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, @@ -2156,12 +2215,19 @@ repeat: goto next; } - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto repeat; + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); + goto repeat; + } + /* Has the page moved? */ if (unlikely(page != *slot)) { - put_page(page); + put_page(head); goto repeat; } -- cgit v1.2.3-70-g09d2 From fc127da085c26beb89f83ad804cf73422c3b6855 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:07 -0700 Subject: truncate: handle file thp For shmem/tmpfs we only need to tweak truncate_inode_page() and invalidate_mapping_pages(). truncate_inode_pages_range() and invalidate_inode_pages2_range() are adjusted to use page_to_pgoff(). Link: http://lkml.kernel.org/r/1466021202-61880-26-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/truncate.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/mm/truncate.c b/mm/truncate.c index 4064f8f53daa..a01cce450a26 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -155,10 +155,14 @@ invalidate_complete_page(struct address_space *mapping, struct page *page) int truncate_inode_page(struct address_space *mapping, struct page *page) { + loff_t holelen; + VM_BUG_ON_PAGE(PageTail(page), page); + + holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE; if (page_mapped(page)) { unmap_mapping_range(mapping, (loff_t)page->index << PAGE_SHIFT, - PAGE_SIZE, 0); + holelen, 0); } return truncate_complete_page(mapping, page); } @@ -279,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping, if (!trylock_page(page)) continue; - WARN_ON(page->index != index); + WARN_ON(page_to_pgoff(page) != index); if (PageWriteback(page)) { unlock_page(page); continue; @@ -367,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping, } lock_page(page); - WARN_ON(page->index != index); + WARN_ON(page_to_pgoff(page) != index); wait_on_page_writeback(page); truncate_inode_page(mapping, page); unlock_page(page); @@ -487,7 +491,21 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (!trylock_page(page)) continue; - WARN_ON(page->index != index); + + WARN_ON(page_to_pgoff(page) != index); + + /* Middle of THP: skip */ + if (PageTransTail(page)) { + unlock_page(page); + continue; + } else if (PageTransHuge(page)) { + index += HPAGE_PMD_NR - 1; + i += HPAGE_PMD_NR - 1; + /* 'end' is in the middle of THP */ + if (index == round_down(end, HPAGE_PMD_NR)) + continue; + } + ret = invalidate_inode_page(page); unlock_page(page); /* @@ -594,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, } lock_page(page); - WARN_ON(page->index != index); + WARN_ON(page_to_pgoff(page) != index); if (page->mapping != mapping) { unlock_page(page); continue; -- cgit v1.2.3-70-g09d2 From 65c453778aea374a46597f4d9826274d1eaf7338 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:10 -0700 Subject: mm, rmap: account shmem thp pages Let's add ShmemHugePages and ShmemPmdMapped fields into meminfo and smaps. It indicates how many times we allocate and map shmem THP. NR_ANON_TRANSPARENT_HUGEPAGES is renamed to NR_ANON_THPS. Link: http://lkml.kernel.org/r/1466021202-61880-27-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/node.c | 13 +++++++++---- fs/proc/meminfo.c | 7 +++++-- fs/proc/task_mmu.c | 10 +++++++++- include/linux/mmzone.h | 4 +++- mm/huge_memory.c | 4 +++- mm/page_alloc.c | 19 +++++++++++++++++++ mm/rmap.c | 14 ++++++++------ mm/vmstat.c | 2 ++ 8 files changed, 58 insertions(+), 15 deletions(-) diff --git a/drivers/base/node.c b/drivers/base/node.c index 560751bad294..51c7db2c4ee2 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -113,6 +113,8 @@ static ssize_t node_read_meminfo(struct device *dev, "Node %d SUnreclaim: %8lu kB\n" #ifdef CONFIG_TRANSPARENT_HUGEPAGE "Node %d AnonHugePages: %8lu kB\n" + "Node %d ShmemHugePages: %8lu kB\n" + "Node %d ShmemPmdMapped: %8lu kB\n" #endif , nid, K(node_page_state(nid, NR_FILE_DIRTY)), @@ -131,10 +133,13 @@ static ssize_t node_read_meminfo(struct device *dev, node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), #ifdef CONFIG_TRANSPARENT_HUGEPAGE - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) - , nid, - K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR)); + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), + nid, K(node_page_state(nid, NR_ANON_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(nid, NR_SHMEM_THPS) * + HPAGE_PMD_NR), + nid, K(node_page_state(nid, NR_SHMEM_PMDMAPPED) * + HPAGE_PMD_NR)); #else nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 83720460c5bc..cf301a9ef512 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -105,6 +105,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE "AnonHugePages: %8lu kB\n" + "ShmemHugePages: %8lu kB\n" + "ShmemPmdMapped: %8lu kB\n" #endif #ifdef CONFIG_CMA "CmaTotal: %8lu kB\n" @@ -162,8 +164,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) , atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR) + , K(global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR) + , K(global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR) + , K(global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR) #endif #ifdef CONFIG_CMA , K(totalcma_pages) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4648c7f63ae2..187d84ef9de9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -448,6 +448,7 @@ struct mem_size_stats { unsigned long referenced; unsigned long anonymous; unsigned long anonymous_thp; + unsigned long shmem_thp; unsigned long swap; unsigned long shared_hugetlb; unsigned long private_hugetlb; @@ -576,7 +577,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); if (IS_ERR_OR_NULL(page)) return; - mss->anonymous_thp += HPAGE_PMD_SIZE; + if (PageAnon(page)) + mss->anonymous_thp += HPAGE_PMD_SIZE; + else if (PageSwapBacked(page)) + mss->shmem_thp += HPAGE_PMD_SIZE; + else + VM_BUG_ON_PAGE(1, page); smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); } #else @@ -770,6 +776,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) "Referenced: %8lu kB\n" "Anonymous: %8lu kB\n" "AnonHugePages: %8lu kB\n" + "ShmemPmdMapped: %8lu kB\n" "Shared_Hugetlb: %8lu kB\n" "Private_Hugetlb: %7lu kB\n" "Swap: %8lu kB\n" @@ -787,6 +794,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) mss.referenced >> 10, mss.anonymous >> 10, mss.anonymous_thp >> 10, + mss.shmem_thp >> 10, mss.shared_hugetlb >> 10, mss.private_hugetlb >> 10, mss.swap >> 10, diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3d7ab30d4940..19425e988bdc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -154,7 +154,9 @@ enum zone_stat_item { WORKINGSET_REFAULT, WORKINGSET_ACTIVATE, WORKINGSET_NODERECLAIM, - NR_ANON_TRANSPARENT_HUGEPAGES, + NR_ANON_THPS, + NR_SHMEM_THPS, + NR_SHMEM_PMDMAPPED, NR_FREE_CMA_PAGES, NR_VM_ZONE_STAT_ITEMS }; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3b74fea6b5db..6c524984931b 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3067,7 +3067,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { /* Last compound_mapcount is gone. */ - __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); + __dec_zone_page_state(page, NR_ANON_THPS); if (TestClearPageDoubleMap(page)) { /* No need in mapcount reference anymore */ for (i = 0; i < HPAGE_PMD_NR; i++) @@ -3539,6 +3539,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) pgdata->split_queue_len--; list_del(page_deferred_list(head)); } + if (mapping) + __dec_zone_page_state(page, NR_SHMEM_THPS); spin_unlock(&pgdata->split_queue_lock); __split_huge_page(page, list, flags); ret = 0; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 847281eb74da..452513bf02ce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4312,6 +4312,9 @@ void show_free_areas(unsigned int filter) " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" " slab_reclaimable:%lu slab_unreclaimable:%lu\n" " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + " anon_thp: %lu shmem_thp: %lu shmem_pmdmapped: %lu\n" +#endif " free:%lu free_pcp:%lu free_cma:%lu\n", global_page_state(NR_ACTIVE_ANON), global_page_state(NR_INACTIVE_ANON), @@ -4329,6 +4332,11 @@ void show_free_areas(unsigned int filter) global_page_state(NR_SHMEM), global_page_state(NR_PAGETABLE), global_page_state(NR_BOUNCE), +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + global_page_state(NR_ANON_THPS) * HPAGE_PMD_NR, + global_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR, + global_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR, +#endif global_page_state(NR_FREE_PAGES), free_pcp, global_page_state(NR_FREE_CMA_PAGES)); @@ -4363,6 +4371,11 @@ void show_free_areas(unsigned int filter) " writeback:%lukB" " mapped:%lukB" " shmem:%lukB" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + " shmem_thp: %lukB" + " shmem_pmdmapped: %lukB" + " anon_thp: %lukB" +#endif " slab_reclaimable:%lukB" " slab_unreclaimable:%lukB" " kernel_stack:%lukB" @@ -4395,6 +4408,12 @@ void show_free_areas(unsigned int filter) K(zone_page_state(zone, NR_WRITEBACK)), K(zone_page_state(zone, NR_FILE_MAPPED)), K(zone_page_state(zone, NR_SHMEM)), +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(zone_page_state(zone, NR_SHMEM_THPS) * HPAGE_PMD_NR), + K(zone_page_state(zone, NR_SHMEM_PMDMAPPED) + * HPAGE_PMD_NR), + K(zone_page_state(zone, NR_ANON_THPS) * HPAGE_PMD_NR), +#endif K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), zone_page_state(zone, NR_KERNEL_STACK) * diff --git a/mm/rmap.c b/mm/rmap.c index 9d643b7a99ce..8a13d9f7b566 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1212,10 +1212,8 @@ void do_page_add_anon_rmap(struct page *page, * pte lock(a spinlock) is held, which implies preemption * disabled. */ - if (compound) { - __inc_zone_page_state(page, - NR_ANON_TRANSPARENT_HUGEPAGES); - } + if (compound) + __inc_zone_page_state(page, NR_ANON_THPS); __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); } if (unlikely(PageKsm(page))) @@ -1253,7 +1251,7 @@ void page_add_new_anon_rmap(struct page *page, VM_BUG_ON_PAGE(!PageTransHuge(page), page); /* increment count (starts at -1) */ atomic_set(compound_mapcount_ptr(page), 0); - __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); + __inc_zone_page_state(page, NR_ANON_THPS); } else { /* Anon THP always mapped first with PMD */ VM_BUG_ON_PAGE(PageTransCompound(page), page); @@ -1283,6 +1281,8 @@ void page_add_file_rmap(struct page *page, bool compound) } if (!atomic_inc_and_test(compound_mapcount_ptr(page))) goto out; + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); + __inc_zone_page_state(page, NR_SHMEM_PMDMAPPED); } else { if (PageTransCompound(page)) { VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -1321,6 +1321,8 @@ static void page_remove_file_rmap(struct page *page, bool compound) } if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) goto out; + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); + __dec_zone_page_state(page, NR_SHMEM_PMDMAPPED); } else { if (!atomic_add_negative(-1, &page->_mapcount)) goto out; @@ -1354,7 +1356,7 @@ static void page_remove_anon_compound_rmap(struct page *page) if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; - __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); + __dec_zone_page_state(page, NR_ANON_THPS); if (TestClearPageDoubleMap(page)) { /* diff --git a/mm/vmstat.c b/mm/vmstat.c index cff2f4ec9cce..7997f52935c9 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -733,6 +733,8 @@ const char * const vmstat_text[] = { "workingset_activate", "workingset_nodereclaim", "nr_anon_transparent_hugepages", + "nr_shmem_hugepages", + "nr_shmem_pmdmapped", "nr_free_cma", /* enum writeback_stat_item counters */ -- cgit v1.2.3-70-g09d2 From 5a6e75f8110c97e2a5488894d4e922187e6cb343 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:13 -0700 Subject: shmem: prepare huge= mount option and sysfs knob This patch adds new mount option "huge=". It can have following values: - "always": Attempt to allocate huge pages every time we need a new page; - "never": Do not allocate huge pages; - "within_size": Only allocate huge page if it will be fully within i_size. Also respect fadvise()/madvise() hints; - "advise: Only allocate huge pages if requested with fadvise()/madvise(); Default is "never" for now. "mount -o remount,huge= /mountpoint" works fine after mount: remounting huge=never will not attempt to break up huge pages at all, just stop more from being allocated. No new config option: put this under CONFIG_TRANSPARENT_HUGEPAGE, which is the appropriate option to protect those who don't want the new bloat, and with which we shall share some pmd code. Prohibit the option when !CONFIG_TRANSPARENT_HUGEPAGE, just as mpol is invalid without CONFIG_NUMA (was hidden in mpol_parse_str(): make it explicit). Allow enabling THP only if the machine has_transparent_hugepage(). But what about Shmem with no user-visible mount? SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. Though unlikely to suit all usages, provide sysfs knob /sys/kernel/mm/transparent_hugepage/shmem_enabled to experiment with huge on those. And allow shmem_enabled two further values: - "deny": For use in emergencies, to force the huge option off from all mounts; - "force": Force the huge option on for all - very useful for testing; Based on patch by Hugh Dickins. Link: http://lkml.kernel.org/r/1466021202-61880-28-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 + include/linux/shmem_fs.h | 3 +- mm/huge_memory.c | 3 + mm/shmem.c | 161 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 168 insertions(+), 1 deletion(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 254aac4c3963..2f757132662a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -41,6 +41,8 @@ enum transparent_hugepage_flag { #endif }; +extern struct kobj_attribute shmem_enabled_attr; + #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) #define HPAGE_PMD_NR (1<gid = make_kgid(current_user_ns(), gid); if (!gid_valid(sbinfo->gid)) goto bad_val; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + } else if (!strcmp(this_char, "huge")) { + int huge; + huge = shmem_parse_huge(value); + if (huge < 0) + goto bad_val; + if (!has_transparent_hugepage() && + huge != SHMEM_HUGE_NEVER) + goto bad_val; + sbinfo->huge = huge; +#endif +#ifdef CONFIG_NUMA } else if (!strcmp(this_char,"mpol")) { mpol_put(mpol); mpol = NULL; if (mpol_parse_str(value, &mpol)) goto bad_val; +#endif } else { pr_err("tmpfs: Bad mount option %s\n", this_char); goto error; @@ -2910,6 +3004,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) goto out; error = 0; + sbinfo->huge = config.huge; sbinfo->max_blocks = config.max_blocks; sbinfo->max_inodes = config.max_inodes; sbinfo->free_inodes = config.max_inodes - inodes; @@ -2943,6 +3038,11 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ + if (sbinfo->huge) + seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); +#endif shmem_show_mpol(seq, sbinfo->mpol); return 0; } @@ -3282,6 +3382,13 @@ int __init shmem_init(void) pr_err("Could not kern_mount tmpfs\n"); goto out1; } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; + else + shmem_huge = 0; /* just in case it was patched */ +#endif return 0; out1: @@ -3293,6 +3400,60 @@ out3: return error; } +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) +static ssize_t shmem_enabled_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int values[] = { + SHMEM_HUGE_ALWAYS, + SHMEM_HUGE_WITHIN_SIZE, + SHMEM_HUGE_ADVISE, + SHMEM_HUGE_NEVER, + SHMEM_HUGE_DENY, + SHMEM_HUGE_FORCE, + }; + int i, count; + + for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { + const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; + + count += sprintf(buf + count, fmt, + shmem_format_huge(values[i])); + } + buf[count - 1] = '\n'; + return count; +} + +static ssize_t shmem_enabled_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + char tmp[16]; + int huge; + + if (count + 1 > sizeof(tmp)) + return -EINVAL; + memcpy(tmp, buf, count); + tmp[count] = '\0'; + if (count && tmp[count - 1] == '\n') + tmp[count - 1] = '\0'; + + huge = shmem_parse_huge(tmp); + if (huge == -EINVAL) + return -EINVAL; + if (!has_transparent_hugepage() && + huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) + return -EINVAL; + + shmem_huge = huge; + if (shmem_huge < SHMEM_HUGE_DENY) + SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; + return count; +} + +struct kobj_attribute shmem_enabled_attr = + __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); +#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ + #else /* !CONFIG_SHMEM */ /* -- cgit v1.2.3-70-g09d2 From c01d5b300774d130a24d787825b01eb24e6e20cb Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 26 Jul 2016 15:26:15 -0700 Subject: shmem: get_unmapped_area align huge page Provide a shmem_get_unmapped_area method in file_operations, called at mmap time to decide the mapping address. It could be conditional on CONFIG_TRANSPARENT_HUGEPAGE, but save #ifdefs in other places by making it unconditional. shmem_get_unmapped_area() first calls the usual mm->get_unmapped_area (which we treat as a black box, highly dependent on architecture and config and executable layout). Lots of conditions, and in most cases it just goes with the address that chose; but when our huge stars are rightly aligned, yet that did not provide a suitable address, go back to ask for a larger arena, within which to align the mapping suitably. There have to be some direct calls to shmem_get_unmapped_area(), not via the file_operations: because of the way shmem_zero_setup() is called to create a shmem object late in the mmap sequence, when MAP_SHARED is requested with MAP_ANONYMOUS or /dev/zero. Though this only matters when /proc/sys/vm/shmem_huge has been set. Link: http://lkml.kernel.org/r/1466021202-61880-29-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Hugh Dickins Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/char/mem.c | 24 ++++++++++++ include/linux/shmem_fs.h | 2 + ipc/shm.c | 6 ++- mm/mmap.c | 16 +++++++- mm/shmem.c | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 142 insertions(+), 4 deletions(-) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index d633974e7f8b..a33163dbb913 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -657,6 +658,28 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma) return 0; } +static unsigned long get_unmapped_area_zero(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ +#ifdef CONFIG_MMU + if (flags & MAP_SHARED) { + /* + * mmap_zero() will call shmem_zero_setup() to create a file, + * so use shmem's get_unmapped_area in case it can be huge; + * and pass NULL for file as in mmap.c's get_unmapped_area(), + * so as not to confuse shmem with our handle on "/dev/zero". + */ + return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); + } + + /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +#else + return -ENOSYS; +#endif +} + static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -764,6 +787,7 @@ static const struct file_operations zero_fops = { .read_iter = read_iter_zero, .write_iter = write_iter_zero, .mmap = mmap_zero, + .get_unmapped_area = get_unmapped_area_zero, #ifndef CONFIG_MMU .mmap_capabilities = zero_mmap_capabilities, #endif diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 466f18c73a49..ff2de4bab61f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -50,6 +50,8 @@ extern struct file *shmem_file_setup(const char *name, extern struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags); extern int shmem_zero_setup(struct vm_area_struct *); +extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); extern void shmem_unlock_mapping(struct address_space *mapping); diff --git a/ipc/shm.c b/ipc/shm.c index 13282510bc0d..7fa5cbebbf19 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -476,13 +476,15 @@ static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, -#ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, -#endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; +/* + * shm_file_operations_huge is now identical to shm_file_operations, + * but we keep it distinct for the sake of is_file_shm_hugepages(). + */ static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, diff --git a/mm/mmap.c b/mm/mmap.c index a41872c8f2af..86b18f334f4f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1897,8 +1898,19 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return -ENOMEM; get_area = current->mm->get_unmapped_area; - if (file && file->f_op->get_unmapped_area) - get_area = file->f_op->get_unmapped_area; + if (file) { + if (file->f_op->get_unmapped_area) + get_area = file->f_op->get_unmapped_area; + } else if (flags & MAP_SHARED) { + /* + * mmap_region() will call shmem_zero_setup() to create a file, + * so use shmem's get_unmapped_area in case it can be huge. + * do_mmap_pgoff() will clear pgoff, so match alignment. + */ + pgoff = 0; + get_area = shmem_get_unmapped_area; + } + addr = get_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; diff --git a/mm/shmem.c b/mm/shmem.c index fd374f74d99f..ab02b5bb5553 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1513,6 +1513,94 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return ret; } +unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + unsigned long (*get_area)(struct file *, + unsigned long, unsigned long, unsigned long, unsigned long); + unsigned long addr; + unsigned long offset; + unsigned long inflated_len; + unsigned long inflated_addr; + unsigned long inflated_offset; + + if (len > TASK_SIZE) + return -ENOMEM; + + get_area = current->mm->get_unmapped_area; + addr = get_area(file, uaddr, len, pgoff, flags); + + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return addr; + if (IS_ERR_VALUE(addr)) + return addr; + if (addr & ~PAGE_MASK) + return addr; + if (addr > TASK_SIZE - len) + return addr; + + if (shmem_huge == SHMEM_HUGE_DENY) + return addr; + if (len < HPAGE_PMD_SIZE) + return addr; + if (flags & MAP_FIXED) + return addr; + /* + * Our priority is to support MAP_SHARED mapped hugely; + * and support MAP_PRIVATE mapped hugely too, until it is COWed. + * But if caller specified an address hint, respect that as before. + */ + if (uaddr) + return addr; + + if (shmem_huge != SHMEM_HUGE_FORCE) { + struct super_block *sb; + + if (file) { + VM_BUG_ON(file->f_op != &shmem_file_operations); + sb = file_inode(file)->i_sb; + } else { + /* + * Called directly from mm/mmap.c, or drivers/char/mem.c + * for "/dev/zero", to create a shared anonymous object. + */ + if (IS_ERR(shm_mnt)) + return addr; + sb = shm_mnt->mnt_sb; + } + if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) + return addr; + } + + offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); + if (offset && offset + len < 2 * HPAGE_PMD_SIZE) + return addr; + if ((addr & (HPAGE_PMD_SIZE-1)) == offset) + return addr; + + inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; + if (inflated_len > TASK_SIZE) + return addr; + if (inflated_len < len) + return addr; + + inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); + if (IS_ERR_VALUE(inflated_addr)) + return addr; + if (inflated_addr & ~PAGE_MASK) + return addr; + + inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); + inflated_addr += offset - inflated_offset; + if (inflated_offset > offset) + inflated_addr += HPAGE_PMD_SIZE; + + if (inflated_addr > TASK_SIZE - len) + return addr; + return inflated_addr; +} + #ifdef CONFIG_NUMA static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) { @@ -3261,6 +3349,7 @@ static const struct address_space_operations shmem_aops = { static const struct file_operations shmem_file_operations = { .mmap = shmem_mmap, + .get_unmapped_area = shmem_get_unmapped_area, #ifdef CONFIG_TMPFS .llseek = shmem_file_llseek, .read_iter = shmem_file_read_iter, @@ -3496,6 +3585,15 @@ void shmem_unlock_mapping(struct address_space *mapping) { } +#ifdef CONFIG_MMU +unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); +} +#endif + void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) { truncate_inode_pages_range(inode->i_mapping, lstart, lend); -- cgit v1.2.3-70-g09d2 From 800d8c63b2e989c2e349632d1648119bf5862f01 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:18 -0700 Subject: shmem: add huge pages support Here's basic implementation of huge pages support for shmem/tmpfs. It's all pretty streight-forward: - shmem_getpage() allcoates huge page if it can and try to inserd into radix tree with shmem_add_to_page_cache(); - shmem_add_to_page_cache() puts the page onto radix-tree if there's space for it; - shmem_undo_range() removes huge pages, if it fully within range. Partial truncate of huge pages zero out this part of THP. This have visible effect on fallocate(FALLOC_FL_PUNCH_HOLE) behaviour. As we don't really create hole in this case, lseek(SEEK_HOLE) may have inconsistent results depending what pages happened to be allocated. - no need to change shmem_fault: core-mm will map an compound page as huge if VMA is suitable; Link: http://lkml.kernel.org/r/1466021202-61880-30-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 + include/linux/shmem_fs.h | 3 + mm/filemap.c | 7 +- mm/huge_memory.c | 2 + mm/memory.c | 2 +- mm/mempolicy.c | 2 +- mm/page-writeback.c | 1 + mm/shmem.c | 380 ++++++++++++++++++++++++++++++++++++++--------- mm/swap.c | 2 + 9 files changed, 331 insertions(+), 70 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2f757132662a..7b7406e9fedf 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -156,6 +156,8 @@ void put_huge_zero_page(void); #define transparent_hugepage_enabled(__vma) 0 +static inline void prep_transhuge_page(struct page *page) {} + #define transparent_hugepage_flags 0UL static inline int split_huge_page_to_list(struct page *page, struct list_head *list) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index ff2de4bab61f..94eaaa2c6ad9 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -71,6 +71,9 @@ static inline struct page *shmem_read_mapping_page( mapping_gfp_mask(mapping)); } +extern bool shmem_charge(struct inode *inode, long pages); +extern void shmem_uncharge(struct inode *inode, long pages); + #ifdef CONFIG_TMPFS extern int shmem_add_seals(struct file *file, unsigned int seals); diff --git a/mm/filemap.c b/mm/filemap.c index 21508ea25717..e90c1543ec2d 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -219,8 +219,13 @@ void __delete_from_page_cache(struct page *page, void *shadow) /* hugetlb pages do not participate in page cache accounting. */ if (!PageHuge(page)) __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); - if (PageSwapBacked(page)) + if (PageSwapBacked(page)) { __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); + if (PageTransHuge(page)) + __dec_zone_page_state(page, NR_SHMEM_THPS); + } else { + VM_BUG_ON_PAGE(PageTransHuge(page) && !PageHuge(page), page); + } /* * At this point page must be either written or cleaned by truncate. diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cabd53dac5b9..0f58460cd69c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3316,6 +3316,8 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (head[i].index >= end) { __ClearPageDirty(head + i); __delete_from_page_cache(head + i, NULL); + if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) + shmem_uncharge(head->mapping->host, 1); put_page(head + i); } } diff --git a/mm/memory.c b/mm/memory.c index 8f4254798130..712790e95f08 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1142,7 +1142,7 @@ again: * unmap shared but keep private pages. */ if (details->check_mapping && - details->check_mapping != page->mapping) + details->check_mapping != page_rmapping(page)) continue; } ptent = ptep_get_and_clear_full(mm, addr, pte, diff --git a/mm/mempolicy.c b/mm/mempolicy.c index fe90e5051012..53e40d3f3933 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -531,7 +531,7 @@ retry: nid = page_to_nid(page); if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT)) continue; - if (PageTransCompound(page) && PageAnon(page)) { + if (PageTransCompound(page)) { get_page(page); pte_unmap_unlock(pte, ptl); lock_page(page); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8195eb454411..d578d2a56b19 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2563,6 +2563,7 @@ int set_page_dirty(struct page *page) { struct address_space *mapping = page_mapping(page); + page = compound_head(page); if (likely(mapping)) { int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; /* diff --git a/mm/shmem.c b/mm/shmem.c index ab02b5bb5553..302ae4a0af3d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -173,10 +173,13 @@ static inline int shmem_reacct_size(unsigned long flags, * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */ -static inline int shmem_acct_block(unsigned long flags) +static inline int shmem_acct_block(unsigned long flags, long pages) { - return (flags & VM_NORESERVE) ? - security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0; + if (!(flags & VM_NORESERVE)) + return 0; + + return security_vm_enough_memory_mm(current->mm, + pages * VM_ACCT(PAGE_SIZE)); } static inline void shmem_unacct_blocks(unsigned long flags, long pages) @@ -249,6 +252,51 @@ static void shmem_recalc_inode(struct inode *inode) } } +bool shmem_charge(struct inode *inode, long pages) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + + if (shmem_acct_block(info->flags, pages)) + return false; + spin_lock(&info->lock); + info->alloced += pages; + inode->i_blocks += pages * BLOCKS_PER_PAGE; + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + inode->i_mapping->nrpages += pages; + + if (!sbinfo->max_blocks) + return true; + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks - pages) > 0) { + inode->i_mapping->nrpages -= pages; + spin_lock(&info->lock); + info->alloced -= pages; + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + + return false; + } + percpu_counter_add(&sbinfo->used_blocks, pages); + return true; +} + +void shmem_uncharge(struct inode *inode, long pages) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + + spin_lock(&info->lock); + info->alloced -= pages; + inode->i_blocks -= pages * BLOCKS_PER_PAGE; + shmem_recalc_inode(inode); + spin_unlock(&info->lock); + + if (sbinfo->max_blocks) + percpu_counter_sub(&sbinfo->used_blocks, pages); +} + /* * Replace item expected in radix tree by a new item, while holding tree lock. */ @@ -376,30 +424,57 @@ static int shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected) { - int error; + int error, nr = hpage_nr_pages(page); + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(index != round_down(index, nr), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); + VM_BUG_ON(expected && PageTransHuge(page)); - get_page(page); + page_ref_add(page, nr); page->mapping = mapping; page->index = index; spin_lock_irq(&mapping->tree_lock); - if (!expected) + if (PageTransHuge(page)) { + void __rcu **results; + pgoff_t idx; + int i; + + error = 0; + if (radix_tree_gang_lookup_slot(&mapping->page_tree, + &results, &idx, index, 1) && + idx < index + HPAGE_PMD_NR) { + error = -EEXIST; + } + + if (!error) { + for (i = 0; i < HPAGE_PMD_NR; i++) { + error = radix_tree_insert(&mapping->page_tree, + index + i, page + i); + VM_BUG_ON(error); + } + count_vm_event(THP_FILE_ALLOC); + } + } else if (!expected) { error = radix_tree_insert(&mapping->page_tree, index, page); - else + } else { error = shmem_radix_tree_replace(mapping, index, expected, page); + } + if (!error) { - mapping->nrpages++; - __inc_zone_page_state(page, NR_FILE_PAGES); - __inc_zone_page_state(page, NR_SHMEM); + mapping->nrpages += nr; + if (PageTransHuge(page)) + __inc_zone_page_state(page, NR_SHMEM_THPS); + __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, nr); + __mod_zone_page_state(page_zone(page), NR_SHMEM, nr); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock); - put_page(page); + page_ref_sub(page, nr); } return error; } @@ -412,6 +487,8 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) struct address_space *mapping = page->mapping; int error; + VM_BUG_ON_PAGE(PageCompound(page), page); + spin_lock_irq(&mapping->tree_lock); error = shmem_radix_tree_replace(mapping, page->index, page, radswap); page->mapping = NULL; @@ -591,10 +668,33 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, continue; } + VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); + if (!trylock_page(page)) continue; + + if (PageTransTail(page)) { + /* Middle of THP: zero out the page */ + clear_highpage(page); + unlock_page(page); + continue; + } else if (PageTransHuge(page)) { + if (index == round_down(end, HPAGE_PMD_NR)) { + /* + * Range ends in the middle of THP: + * zero out the page + */ + clear_highpage(page); + unlock_page(page); + continue; + } + index += HPAGE_PMD_NR - 1; + i += HPAGE_PMD_NR - 1; + } + if (!unfalloc || !PageUptodate(page)) { - if (page->mapping == mapping) { + VM_BUG_ON_PAGE(PageTail(page), page); + if (page_mapping(page) == mapping) { VM_BUG_ON_PAGE(PageWriteback(page), page); truncate_inode_page(mapping, page); } @@ -670,8 +770,36 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, } lock_page(page); + + if (PageTransTail(page)) { + /* Middle of THP: zero out the page */ + clear_highpage(page); + unlock_page(page); + /* + * Partial thp truncate due 'start' in middle + * of THP: don't need to look on these pages + * again on !pvec.nr restart. + */ + if (index != round_down(end, HPAGE_PMD_NR)) + start++; + continue; + } else if (PageTransHuge(page)) { + if (index == round_down(end, HPAGE_PMD_NR)) { + /* + * Range ends in the middle of THP: + * zero out the page + */ + clear_highpage(page); + unlock_page(page); + continue; + } + index += HPAGE_PMD_NR - 1; + i += HPAGE_PMD_NR - 1; + } + if (!unfalloc || !PageUptodate(page)) { - if (page->mapping == mapping) { + VM_BUG_ON_PAGE(PageTail(page), page); + if (page_mapping(page) == mapping) { VM_BUG_ON_PAGE(PageWriteback(page), page); truncate_inode_page(mapping, page); } else { @@ -929,6 +1057,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) swp_entry_t swap; pgoff_t index; + VM_BUG_ON_PAGE(PageCompound(page), page); BUG_ON(!PageLocked(page)); mapping = page->mapping; index = page->index; @@ -1065,24 +1194,63 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) #define vm_policy vm_private_data #endif +static void shmem_pseudo_vma_init(struct vm_area_struct *vma, + struct shmem_inode_info *info, pgoff_t index) +{ + /* Create a pseudo vma that just contains the policy */ + vma->vm_start = 0; + /* Bias interleave by inode number to distribute better across nodes */ + vma->vm_pgoff = index + info->vfs_inode.i_ino; + vma->vm_ops = NULL; + vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); +} + +static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) +{ + /* Drop reference taken by mpol_shared_policy_lookup() */ + mpol_cond_put(vma->vm_policy); +} + static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index) { struct vm_area_struct pvma; struct page *page; - /* Create a pseudo vma that just contains the policy */ - pvma.vm_start = 0; - /* Bias interleave by inode number to distribute better across nodes */ - pvma.vm_pgoff = index + info->vfs_inode.i_ino; - pvma.vm_ops = NULL; - pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); - + shmem_pseudo_vma_init(&pvma, info, index); page = swapin_readahead(swap, gfp, &pvma, 0); + shmem_pseudo_vma_destroy(&pvma); - /* Drop reference taken by mpol_shared_policy_lookup() */ - mpol_cond_put(pvma.vm_policy); + return page; +} + +static struct page *shmem_alloc_hugepage(gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) +{ + struct vm_area_struct pvma; + struct inode *inode = &info->vfs_inode; + struct address_space *mapping = inode->i_mapping; + pgoff_t idx, hindex = round_down(index, HPAGE_PMD_NR); + void __rcu **results; + struct page *page; + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return NULL; + + rcu_read_lock(); + if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, + hindex, 1) && idx < hindex + HPAGE_PMD_NR) { + rcu_read_unlock(); + return NULL; + } + rcu_read_unlock(); + + shmem_pseudo_vma_init(&pvma, info, hindex); + page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, + HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); + shmem_pseudo_vma_destroy(&pvma); + if (page) + prep_transhuge_page(page); return page; } @@ -1092,23 +1260,51 @@ static struct page *shmem_alloc_page(gfp_t gfp, struct vm_area_struct pvma; struct page *page; - /* Create a pseudo vma that just contains the policy */ - pvma.vm_start = 0; - /* Bias interleave by inode number to distribute better across nodes */ - pvma.vm_pgoff = index + info->vfs_inode.i_ino; - pvma.vm_ops = NULL; - pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + shmem_pseudo_vma_init(&pvma, info, index); + page = alloc_page_vma(gfp, &pvma, 0); + shmem_pseudo_vma_destroy(&pvma); + + return page; +} + +static struct page *shmem_alloc_and_acct_page(gfp_t gfp, + struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, + pgoff_t index, bool huge) +{ + struct page *page; + int nr; + int err = -ENOSPC; + + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + huge = false; + nr = huge ? HPAGE_PMD_NR : 1; + + if (shmem_acct_block(info->flags, nr)) + goto failed; + if (sbinfo->max_blocks) { + if (percpu_counter_compare(&sbinfo->used_blocks, + sbinfo->max_blocks - nr) > 0) + goto unacct; + percpu_counter_add(&sbinfo->used_blocks, nr); + } - page = alloc_pages_vma(gfp, 0, &pvma, 0, numa_node_id(), false); + if (huge) + page = shmem_alloc_hugepage(gfp, info, index); + else + page = shmem_alloc_page(gfp, info, index); if (page) { __SetPageLocked(page); __SetPageSwapBacked(page); + return page; } - /* Drop reference taken by mpol_shared_policy_lookup() */ - mpol_cond_put(pvma.vm_policy); - - return page; + err = -ENOMEM; + if (sbinfo->max_blocks) + percpu_counter_add(&sbinfo->used_blocks, -nr); +unacct: + shmem_unacct_blocks(info->flags, nr); +failed: + return ERR_PTR(err); } /* @@ -1213,6 +1409,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct mem_cgroup *memcg; struct page *page; swp_entry_t swap; + pgoff_t hindex = index; int error; int once = 0; int alloced = 0; @@ -1334,47 +1531,74 @@ repeat: swap_free(swap); } else { - if (shmem_acct_block(info->flags)) { - error = -ENOSPC; - goto failed; - } - if (sbinfo->max_blocks) { - if (percpu_counter_compare(&sbinfo->used_blocks, - sbinfo->max_blocks) >= 0) { - error = -ENOSPC; - goto unacct; - } - percpu_counter_inc(&sbinfo->used_blocks); + /* shmem_symlink() */ + if (mapping->a_ops != &shmem_aops) + goto alloc_nohuge; + if (shmem_huge == SHMEM_HUGE_DENY) + goto alloc_nohuge; + if (shmem_huge == SHMEM_HUGE_FORCE) + goto alloc_huge; + switch (sbinfo->huge) { + loff_t i_size; + pgoff_t off; + case SHMEM_HUGE_NEVER: + goto alloc_nohuge; + case SHMEM_HUGE_WITHIN_SIZE: + off = round_up(index, HPAGE_PMD_NR); + i_size = round_up(i_size_read(inode), PAGE_SIZE); + if (i_size >= HPAGE_PMD_SIZE && + i_size >> PAGE_SHIFT >= off) + goto alloc_huge; + /* fallthrough */ + case SHMEM_HUGE_ADVISE: + /* TODO: wire up fadvise()/madvise() */ + goto alloc_nohuge; } - page = shmem_alloc_page(gfp, info, index); - if (!page) { - error = -ENOMEM; - goto decused; +alloc_huge: + page = shmem_alloc_and_acct_page(gfp, info, sbinfo, + index, true); + if (IS_ERR(page)) { +alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, + index, false); + } + if (IS_ERR(page)) { + error = PTR_ERR(page); + page = NULL; + goto failed; } + + if (PageTransHuge(page)) + hindex = round_down(index, HPAGE_PMD_NR); + else + hindex = index; + if (sgp == SGP_WRITE) __SetPageReferenced(page); error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, - false); + PageTransHuge(page)); if (error) - goto decused; - error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); + goto unacct; + error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, + compound_order(page)); if (!error) { - error = shmem_add_to_page_cache(page, mapping, index, + error = shmem_add_to_page_cache(page, mapping, hindex, NULL); radix_tree_preload_end(); } if (error) { - mem_cgroup_cancel_charge(page, memcg, false); - goto decused; + mem_cgroup_cancel_charge(page, memcg, + PageTransHuge(page)); + goto unacct; } - mem_cgroup_commit_charge(page, memcg, false, false); + mem_cgroup_commit_charge(page, memcg, false, + PageTransHuge(page)); lru_cache_add_anon(page); spin_lock(&info->lock); - info->alloced++; - inode->i_blocks += BLOCKS_PER_PAGE; + info->alloced += 1 << compound_order(page); + inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); shmem_recalc_inode(inode); spin_unlock(&info->lock); alloced = true; @@ -1390,10 +1614,15 @@ clear: * but SGP_FALLOC on a page fallocated earlier must initialize * it now, lest undo on failure cancel our earlier guarantee. */ - if (sgp != SGP_WRITE) { - clear_highpage(page); - flush_dcache_page(page); - SetPageUptodate(page); + if (sgp != SGP_WRITE && !PageUptodate(page)) { + struct page *head = compound_head(page); + int i; + + for (i = 0; i < (1 << compound_order(head)); i++) { + clear_highpage(head + i); + flush_dcache_page(head + i); + } + SetPageUptodate(head); } } @@ -1410,17 +1639,23 @@ clear: error = -EINVAL; goto unlock; } - *pagep = page; + *pagep = page + index - hindex; return 0; /* * Error recovery. */ -decused: - if (sbinfo->max_blocks) - percpu_counter_add(&sbinfo->used_blocks, -1); unacct: - shmem_unacct_blocks(info->flags, 1); + if (sbinfo->max_blocks) + percpu_counter_sub(&sbinfo->used_blocks, + 1 << compound_order(page)); + shmem_unacct_blocks(info->flags, 1 << compound_order(page)); + + if (PageTransHuge(page)) { + unlock_page(page); + put_page(page); + goto alloc_nohuge; + } failed: if (swap.val && !shmem_confirm_swap(mapping, index, swap)) error = -EEXIST; @@ -1758,12 +1993,23 @@ shmem_write_end(struct file *file, struct address_space *mapping, i_size_write(inode, pos + copied); if (!PageUptodate(page)) { + struct page *head = compound_head(page); + if (PageTransCompound(page)) { + int i; + + for (i = 0; i < HPAGE_PMD_NR; i++) { + if (head + i == page) + continue; + clear_highpage(head + i); + flush_dcache_page(head + i); + } + } if (copied < PAGE_SIZE) { unsigned from = pos & (PAGE_SIZE - 1); zero_user_segments(page, 0, from, from + copied, PAGE_SIZE); } - SetPageUptodate(page); + SetPageUptodate(head); } set_page_dirty(page); unlock_page(page); diff --git a/mm/swap.c b/mm/swap.c index 90530ff8ed16..616df4ddd870 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -292,6 +292,7 @@ static bool need_activate_page_drain(int cpu) void activate_page(struct page *page) { + page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); @@ -316,6 +317,7 @@ void activate_page(struct page *page) { struct zone *zone = page_zone(page); + page = compound_head(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); spin_unlock_irq(&zone->lru_lock); -- cgit v1.2.3-70-g09d2 From 657e3038c4e6fcd3cef41f2b01c655a685a7b8c7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:21 -0700 Subject: shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Let's wire up existing madvise() hugepage hints for file mappings. MADV_HUGEPAGE advise shmem to allocate huge page on page fault in the VMA. It only has effect if the filesystem is mounted with huge=advise or huge=within_size. MADV_NOHUGEPAGE prevents hugepage from being allocated on page fault in the VMA. It doesn't prevent a huge page from being allocated by other means, i.e. page fault into different mapping or write(2) into file. Link: http://lkml.kernel.org/r/1466021202-61880-31-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 19 +++++-------------- mm/shmem.c | 20 +++++++++++++++++--- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 0f58460cd69c..5eba97874ad5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1830,7 +1830,7 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) return NULL; } -#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -1846,11 +1846,6 @@ int hugepage_madvise(struct vm_area_struct *vma, if (mm_has_pgste(vma->vm_mm)) return 0; #endif - /* - * Be somewhat over-protective like KSM for now! - */ - if (*vm_flags & VM_NO_THP) - return -EINVAL; *vm_flags &= ~VM_NOHUGEPAGE; *vm_flags |= VM_HUGEPAGE; /* @@ -1858,15 +1853,11 @@ int hugepage_madvise(struct vm_area_struct *vma, * register it here without waiting a page fault that * may not happen any time soon. */ - if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) + if (!(*vm_flags & VM_NO_KHUGEPAGED) && + khugepaged_enter_vma_merge(vma, *vm_flags)) return -ENOMEM; break; case MADV_NOHUGEPAGE: - /* - * Be somewhat over-protective like KSM for now! - */ - if (*vm_flags & VM_NO_THP) - return -EINVAL; *vm_flags &= ~VM_HUGEPAGE; *vm_flags |= VM_NOHUGEPAGE; /* @@ -1974,7 +1965,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, * page fault if needed. */ return 0; - if (vma->vm_ops || (vm_flags & VM_NO_THP)) + if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) /* khugepaged not yet working on file or special mappings */ return 0; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; @@ -2366,7 +2357,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - return !(vma->vm_flags & VM_NO_THP); + return !(vma->vm_flags & VM_NO_KHUGEPAGED); } /* diff --git a/mm/shmem.c b/mm/shmem.c index 302ae4a0af3d..f19b6b44ae46 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -101,6 +101,8 @@ struct shmem_falloc { enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; @@ -1409,6 +1411,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct mem_cgroup *memcg; struct page *page; swp_entry_t swap; + enum sgp_type sgp_huge = sgp; pgoff_t hindex = index; int error; int once = 0; @@ -1416,6 +1419,8 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; + if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) + sgp = SGP_CACHE; repeat: swap.val = 0; page = find_lock_entry(mapping, index); @@ -1534,7 +1539,7 @@ repeat: /* shmem_symlink() */ if (mapping->a_ops != &shmem_aops) goto alloc_nohuge; - if (shmem_huge == SHMEM_HUGE_DENY) + if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) goto alloc_nohuge; if (shmem_huge == SHMEM_HUGE_FORCE) goto alloc_huge; @@ -1551,7 +1556,9 @@ repeat: goto alloc_huge; /* fallthrough */ case SHMEM_HUGE_ADVISE: - /* TODO: wire up fadvise()/madvise() */ + if (sgp_huge == SGP_HUGE) + goto alloc_huge; + /* TODO: implement fadvise() hints */ goto alloc_nohuge; } @@ -1680,6 +1687,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct inode *inode = file_inode(vma->vm_file); gfp_t gfp = mapping_gfp_mask(inode->i_mapping); + enum sgp_type sgp; int error; int ret = VM_FAULT_LOCKED; @@ -1741,7 +1749,13 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) spin_unlock(&inode->i_lock); } - error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE, + sgp = SGP_CACHE; + if (vma->vm_flags & VM_HUGEPAGE) + sgp = SGP_HUGE; + else if (vma->vm_flags & VM_NOHUGEPAGE) + sgp = SGP_NOHUGE; + + error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, gfp, vma->vm_mm, &ret); if (error) return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); -- cgit v1.2.3-70-g09d2 From b46e756f5e47031c67658ff036e5ffe27062fa43 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:24 -0700 Subject: thp: extract khugepaged from mm/huge_memory.c khugepaged implementation grew to the point when it deserve separate file in source. Let's move it to mm/khugepaged.c. Link: http://lkml.kernel.org/r/1466021202-61880-32-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 10 + include/linux/khugepaged.h | 5 + mm/Makefile | 2 +- mm/huge_memory.c | 1493 +------------------------------------------- mm/khugepaged.c | 1490 +++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1515 insertions(+), 1485 deletions(-) create mode 100644 mm/khugepaged.c diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7b7406e9fedf..92ce91c03cd0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -41,6 +41,16 @@ enum transparent_hugepage_flag { #endif }; +struct kobject; +struct kobj_attribute; + +extern ssize_t single_hugepage_flag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count, + enum transparent_hugepage_flag flag); +extern ssize_t single_hugepage_flag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf, + enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index eeb307985715..1e032a1ddb3e 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -4,6 +4,11 @@ #include /* MMF_VM_HUGEPAGE */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern struct attribute_group khugepaged_attr_group; + +extern int khugepaged_init(void); +extern void khugepaged_destroy(void); +extern int start_stop_khugepaged(void); extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, diff --git a/mm/Makefile b/mm/Makefile index 78c6f7dedb83..fc059666c760 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -74,7 +74,7 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o -obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5eba97874ad5..2706182787d8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -36,35 +35,6 @@ #include #include "internal.h" -enum scan_result { - SCAN_FAIL, - SCAN_SUCCEED, - SCAN_PMD_NULL, - SCAN_EXCEED_NONE_PTE, - SCAN_PTE_NON_PRESENT, - SCAN_PAGE_RO, - SCAN_NO_REFERENCED_PAGE, - SCAN_PAGE_NULL, - SCAN_SCAN_ABORT, - SCAN_PAGE_COUNT, - SCAN_PAGE_LRU, - SCAN_PAGE_LOCK, - SCAN_PAGE_ANON, - SCAN_PAGE_COMPOUND, - SCAN_ANY_PROCESS, - SCAN_VMA_NULL, - SCAN_VMA_CHECK, - SCAN_ADDRESS_RANGE, - SCAN_SWAP_CACHE_PAGE, - SCAN_DEL_PAGE_LRU, - SCAN_ALLOC_HUGE_PAGE_FAIL, - SCAN_CGROUP_CHARGE_FAIL, - SCAN_EXCEED_SWAP_PTE -}; - -#define CREATE_TRACE_POINTS -#include - /* * By default transparent hugepage support is disabled in order that avoid * to risk increase the memory footprint of applications without a guaranteed @@ -84,128 +54,8 @@ unsigned long transparent_hugepage_flags __read_mostly = (1< min_free_kbytes) { - if (user_min_free_kbytes >= 0) - pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", - min_free_kbytes, recommended_min); - - min_free_kbytes = recommended_min; - } - setup_per_zone_wmarks(); -} - -static int start_stop_khugepaged(void) -{ - int err = 0; - if (khugepaged_enabled()) { - if (!khugepaged_thread) - khugepaged_thread = kthread_run(khugepaged, NULL, - "khugepaged"); - if (IS_ERR(khugepaged_thread)) { - pr_err("khugepaged: kthread_run(khugepaged) failed\n"); - err = PTR_ERR(khugepaged_thread); - khugepaged_thread = NULL; - goto fail; - } - - if (!list_empty(&khugepaged_scan.mm_head)) - wake_up_interruptible(&khugepaged_wait); - - set_recommended_min_free_kbytes(); - } else if (khugepaged_thread) { - kthread_stop(khugepaged_thread); - khugepaged_thread = NULL; - } -fail: - return err; -} - static atomic_t huge_zero_refcount; struct page *huge_zero_page __read_mostly; @@ -331,12 +181,7 @@ static ssize_t enabled_store(struct kobject *kobj, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); if (ret > 0) { - int err; - - mutex_lock(&khugepaged_mutex); - err = start_stop_khugepaged(); - mutex_unlock(&khugepaged_mutex); - + int err = start_stop_khugepaged(); if (err) ret = err; } @@ -346,7 +191,7 @@ static ssize_t enabled_store(struct kobject *kobj, static struct kobj_attribute enabled_attr = __ATTR(enabled, 0644, enabled_show, enabled_store); -static ssize_t single_flag_show(struct kobject *kobj, +ssize_t single_hugepage_flag_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, enum transparent_hugepage_flag flag) { @@ -354,7 +199,7 @@ static ssize_t single_flag_show(struct kobject *kobj, !!test_bit(flag, &transparent_hugepage_flags)); } -static ssize_t single_flag_store(struct kobject *kobj, +ssize_t single_hugepage_flag_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count, enum transparent_hugepage_flag flag) @@ -409,13 +254,13 @@ static struct kobj_attribute defrag_attr = static ssize_t use_zero_page_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return single_flag_show(kobj, attr, buf, + return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); } static ssize_t use_zero_page_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - return single_flag_store(kobj, attr, buf, count, + return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); } static struct kobj_attribute use_zero_page_attr = @@ -424,14 +269,14 @@ static struct kobj_attribute use_zero_page_attr = static ssize_t debug_cow_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - return single_flag_show(kobj, attr, buf, + return single_hugepage_flag_show(kobj, attr, buf, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static ssize_t debug_cow_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - return single_flag_store(kobj, attr, buf, count, + return single_hugepage_flag_store(kobj, attr, buf, count, TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); } static struct kobj_attribute debug_cow_attr = @@ -455,199 +300,6 @@ static struct attribute_group hugepage_attr_group = { .attrs = hugepage_attr, }; -static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); -} - -static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - unsigned long msecs; - int err; - - err = kstrtoul(buf, 10, &msecs); - if (err || msecs > UINT_MAX) - return -EINVAL; - - khugepaged_scan_sleep_millisecs = msecs; - khugepaged_sleep_expire = 0; - wake_up_interruptible(&khugepaged_wait); - - return count; -} -static struct kobj_attribute scan_sleep_millisecs_attr = - __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, - scan_sleep_millisecs_store); - -static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); -} - -static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - unsigned long msecs; - int err; - - err = kstrtoul(buf, 10, &msecs); - if (err || msecs > UINT_MAX) - return -EINVAL; - - khugepaged_alloc_sleep_millisecs = msecs; - khugepaged_sleep_expire = 0; - wake_up_interruptible(&khugepaged_wait); - - return count; -} -static struct kobj_attribute alloc_sleep_millisecs_attr = - __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, - alloc_sleep_millisecs_store); - -static ssize_t pages_to_scan_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_pages_to_scan); -} -static ssize_t pages_to_scan_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int err; - unsigned long pages; - - err = kstrtoul(buf, 10, &pages); - if (err || !pages || pages > UINT_MAX) - return -EINVAL; - - khugepaged_pages_to_scan = pages; - - return count; -} -static struct kobj_attribute pages_to_scan_attr = - __ATTR(pages_to_scan, 0644, pages_to_scan_show, - pages_to_scan_store); - -static ssize_t pages_collapsed_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_pages_collapsed); -} -static struct kobj_attribute pages_collapsed_attr = - __ATTR_RO(pages_collapsed); - -static ssize_t full_scans_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_full_scans); -} -static struct kobj_attribute full_scans_attr = - __ATTR_RO(full_scans); - -static ssize_t khugepaged_defrag_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -{ - return single_flag_show(kobj, attr, buf, - TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); -} -static ssize_t khugepaged_defrag_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - return single_flag_store(kobj, attr, buf, count, - TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); -} -static struct kobj_attribute khugepaged_defrag_attr = - __ATTR(defrag, 0644, khugepaged_defrag_show, - khugepaged_defrag_store); - -/* - * max_ptes_none controls if khugepaged should collapse hugepages over - * any unmapped ptes in turn potentially increasing the memory - * footprint of the vmas. When max_ptes_none is 0 khugepaged will not - * reduce the available free memory in the system as it - * runs. Increasing max_ptes_none will instead potentially reduce the - * free memory in the system during the khugepaged scan. - */ -static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_max_ptes_none); -} -static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int err; - unsigned long max_ptes_none; - - err = kstrtoul(buf, 10, &max_ptes_none); - if (err || max_ptes_none > HPAGE_PMD_NR-1) - return -EINVAL; - - khugepaged_max_ptes_none = max_ptes_none; - - return count; -} -static struct kobj_attribute khugepaged_max_ptes_none_attr = - __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, - khugepaged_max_ptes_none_store); - -static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, - struct kobj_attribute *attr, - char *buf) -{ - return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); -} - -static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, - struct kobj_attribute *attr, - const char *buf, size_t count) -{ - int err; - unsigned long max_ptes_swap; - - err = kstrtoul(buf, 10, &max_ptes_swap); - if (err || max_ptes_swap > HPAGE_PMD_NR-1) - return -EINVAL; - - khugepaged_max_ptes_swap = max_ptes_swap; - - return count; -} - -static struct kobj_attribute khugepaged_max_ptes_swap_attr = - __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, - khugepaged_max_ptes_swap_store); - -static struct attribute *khugepaged_attr[] = { - &khugepaged_defrag_attr.attr, - &khugepaged_max_ptes_none_attr.attr, - &pages_to_scan_attr.attr, - &pages_collapsed_attr.attr, - &full_scans_attr.attr, - &scan_sleep_millisecs_attr.attr, - &alloc_sleep_millisecs_attr.attr, - &khugepaged_max_ptes_swap_attr.attr, - NULL, -}; - -static struct attribute_group khugepaged_attr_group = { - .attrs = khugepaged_attr, - .name = "khugepaged", -}; - static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) { int err; @@ -706,9 +358,6 @@ static int __init hugepage_init(void) return -EINVAL; } - khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; - khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; - khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; /* * hugepages can't be allocated by the buddy allocator */ @@ -723,7 +372,7 @@ static int __init hugepage_init(void) if (err) goto err_sysfs; - err = khugepaged_slab_init(); + err = khugepaged_init(); if (err) goto err_slab; @@ -754,7 +403,7 @@ err_khugepaged: err_split_shrinker: unregister_shrinker(&huge_zero_page_shrinker); err_hzp_shrinker: - khugepaged_slab_exit(); + khugepaged_destroy(); err_slab: hugepage_exit_sysfs(hugepage_kobj); err_sysfs: @@ -909,12 +558,6 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) return GFP_TRANSHUGE | reclaim_flags; } -/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ -static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) -{ - return GFP_TRANSHUGE | (khugepaged_defrag() ? __GFP_DIRECT_RECLAIM : 0); -} - /* Caller must hold page table lock. */ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, @@ -1830,1124 +1473,6 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) return NULL; } -#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) - -int hugepage_madvise(struct vm_area_struct *vma, - unsigned long *vm_flags, int advice) -{ - switch (advice) { - case MADV_HUGEPAGE: -#ifdef CONFIG_S390 - /* - * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 - * can't handle this properly after s390_enable_sie, so we simply - * ignore the madvise to prevent qemu from causing a SIGSEGV. - */ - if (mm_has_pgste(vma->vm_mm)) - return 0; -#endif - *vm_flags &= ~VM_NOHUGEPAGE; - *vm_flags |= VM_HUGEPAGE; - /* - * If the vma become good for khugepaged to scan, - * register it here without waiting a page fault that - * may not happen any time soon. - */ - if (!(*vm_flags & VM_NO_KHUGEPAGED) && - khugepaged_enter_vma_merge(vma, *vm_flags)) - return -ENOMEM; - break; - case MADV_NOHUGEPAGE: - *vm_flags &= ~VM_HUGEPAGE; - *vm_flags |= VM_NOHUGEPAGE; - /* - * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning - * this vma even if we leave the mm registered in khugepaged if - * it got registered before VM_NOHUGEPAGE was set. - */ - break; - } - - return 0; -} - -static int __init khugepaged_slab_init(void) -{ - mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", - sizeof(struct mm_slot), - __alignof__(struct mm_slot), 0, NULL); - if (!mm_slot_cache) - return -ENOMEM; - - return 0; -} - -static void __init khugepaged_slab_exit(void) -{ - kmem_cache_destroy(mm_slot_cache); -} - -static inline struct mm_slot *alloc_mm_slot(void) -{ - if (!mm_slot_cache) /* initialization failed */ - return NULL; - return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); -} - -static inline void free_mm_slot(struct mm_slot *mm_slot) -{ - kmem_cache_free(mm_slot_cache, mm_slot); -} - -static struct mm_slot *get_mm_slot(struct mm_struct *mm) -{ - struct mm_slot *mm_slot; - - hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) - if (mm == mm_slot->mm) - return mm_slot; - - return NULL; -} - -static void insert_to_mm_slots_hash(struct mm_struct *mm, - struct mm_slot *mm_slot) -{ - mm_slot->mm = mm; - hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); -} - -static inline int khugepaged_test_exit(struct mm_struct *mm) -{ - return atomic_read(&mm->mm_users) == 0; -} - -int __khugepaged_enter(struct mm_struct *mm) -{ - struct mm_slot *mm_slot; - int wakeup; - - mm_slot = alloc_mm_slot(); - if (!mm_slot) - return -ENOMEM; - - /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); - if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { - free_mm_slot(mm_slot); - return 0; - } - - spin_lock(&khugepaged_mm_lock); - insert_to_mm_slots_hash(mm, mm_slot); - /* - * Insert just behind the scanning cursor, to let the area settle - * down a little. - */ - wakeup = list_empty(&khugepaged_scan.mm_head); - list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); - spin_unlock(&khugepaged_mm_lock); - - atomic_inc(&mm->mm_count); - if (wakeup) - wake_up_interruptible(&khugepaged_wait); - - return 0; -} - -int khugepaged_enter_vma_merge(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - unsigned long hstart, hend; - if (!vma->anon_vma) - /* - * Not yet faulted in so we will register later in the - * page fault if needed. - */ - return 0; - if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) - /* khugepaged not yet working on file or special mappings */ - return 0; - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (hstart < hend) - return khugepaged_enter(vma, vm_flags); - return 0; -} - -void __khugepaged_exit(struct mm_struct *mm) -{ - struct mm_slot *mm_slot; - int free = 0; - - spin_lock(&khugepaged_mm_lock); - mm_slot = get_mm_slot(mm); - if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { - hash_del(&mm_slot->hash); - list_del(&mm_slot->mm_node); - free = 1; - } - spin_unlock(&khugepaged_mm_lock); - - if (free) { - clear_bit(MMF_VM_HUGEPAGE, &mm->flags); - free_mm_slot(mm_slot); - mmdrop(mm); - } else if (mm_slot) { - /* - * This is required to serialize against - * khugepaged_test_exit() (which is guaranteed to run - * under mmap sem read mode). Stop here (after we - * return all pagetables will be destroyed) until - * khugepaged has finished working on the pagetables - * under the mmap_sem. - */ - down_write(&mm->mmap_sem); - up_write(&mm->mmap_sem); - } -} - -static void release_pte_page(struct page *page) -{ - /* 0 stands for page_is_file_cache(page) == false */ - dec_zone_page_state(page, NR_ISOLATED_ANON + 0); - unlock_page(page); - putback_lru_page(page); -} - -static void release_pte_pages(pte_t *pte, pte_t *_pte) -{ - while (--_pte >= pte) { - pte_t pteval = *_pte; - if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) - release_pte_page(pte_page(pteval)); - } -} - -static int __collapse_huge_page_isolate(struct vm_area_struct *vma, - unsigned long address, - pte_t *pte) -{ - struct page *page = NULL; - pte_t *_pte; - int none_or_zero = 0, result = 0; - bool referenced = false, writable = false; - - for (_pte = pte; _pte < pte+HPAGE_PMD_NR; - _pte++, address += PAGE_SIZE) { - pte_t pteval = *_pte; - if (pte_none(pteval) || (pte_present(pteval) && - is_zero_pfn(pte_pfn(pteval)))) { - if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { - continue; - } else { - result = SCAN_EXCEED_NONE_PTE; - goto out; - } - } - if (!pte_present(pteval)) { - result = SCAN_PTE_NON_PRESENT; - goto out; - } - page = vm_normal_page(vma, address, pteval); - if (unlikely(!page)) { - result = SCAN_PAGE_NULL; - goto out; - } - - VM_BUG_ON_PAGE(PageCompound(page), page); - VM_BUG_ON_PAGE(!PageAnon(page), page); - VM_BUG_ON_PAGE(!PageSwapBacked(page), page); - - /* - * We can do it before isolate_lru_page because the - * page can't be freed from under us. NOTE: PG_lock - * is needed to serialize against split_huge_page - * when invoked from the VM. - */ - if (!trylock_page(page)) { - result = SCAN_PAGE_LOCK; - goto out; - } - - /* - * cannot use mapcount: can't collapse if there's a gup pin. - * The page must only be referenced by the scanned process - * and page swap cache. - */ - if (page_count(page) != 1 + !!PageSwapCache(page)) { - unlock_page(page); - result = SCAN_PAGE_COUNT; - goto out; - } - if (pte_write(pteval)) { - writable = true; - } else { - if (PageSwapCache(page) && - !reuse_swap_page(page, NULL)) { - unlock_page(page); - result = SCAN_SWAP_CACHE_PAGE; - goto out; - } - /* - * Page is not in the swap cache. It can be collapsed - * into a THP. - */ - } - - /* - * Isolate the page to avoid collapsing an hugepage - * currently in use by the VM. - */ - if (isolate_lru_page(page)) { - unlock_page(page); - result = SCAN_DEL_PAGE_LRU; - goto out; - } - /* 0 stands for page_is_file_cache(page) == false */ - inc_zone_page_state(page, NR_ISOLATED_ANON + 0); - VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON_PAGE(PageLRU(page), page); - - /* If there is no mapped pte young don't collapse the page */ - if (pte_young(pteval) || - page_is_young(page) || PageReferenced(page) || - mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; - } - if (likely(writable)) { - if (likely(referenced)) { - result = SCAN_SUCCEED; - trace_mm_collapse_huge_page_isolate(page, none_or_zero, - referenced, writable, result); - return 1; - } - } else { - result = SCAN_PAGE_RO; - } - -out: - release_pte_pages(pte, _pte); - trace_mm_collapse_huge_page_isolate(page, none_or_zero, - referenced, writable, result); - return 0; -} - -static void __collapse_huge_page_copy(pte_t *pte, struct page *page, - struct vm_area_struct *vma, - unsigned long address, - spinlock_t *ptl) -{ - pte_t *_pte; - for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { - pte_t pteval = *_pte; - struct page *src_page; - - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - clear_user_highpage(page, address); - add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); - if (is_zero_pfn(pte_pfn(pteval))) { - /* - * ptl mostly unnecessary. - */ - spin_lock(ptl); - /* - * paravirt calls inside pte_clear here are - * superfluous. - */ - pte_clear(vma->vm_mm, address, _pte); - spin_unlock(ptl); - } - } else { - src_page = pte_page(pteval); - copy_user_highpage(page, src_page, address, vma); - VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); - release_pte_page(src_page); - /* - * ptl mostly unnecessary, but preempt has to - * be disabled to update the per-cpu stats - * inside page_remove_rmap(). - */ - spin_lock(ptl); - /* - * paravirt calls inside pte_clear here are - * superfluous. - */ - pte_clear(vma->vm_mm, address, _pte); - page_remove_rmap(src_page, false); - spin_unlock(ptl); - free_page_and_swap_cache(src_page); - } - - address += PAGE_SIZE; - page++; - } -} - -static void khugepaged_alloc_sleep(void) -{ - DEFINE_WAIT(wait); - - add_wait_queue(&khugepaged_wait, &wait); - freezable_schedule_timeout_interruptible( - msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); - remove_wait_queue(&khugepaged_wait, &wait); -} - -static int khugepaged_node_load[MAX_NUMNODES]; - -static bool khugepaged_scan_abort(int nid) -{ - int i; - - /* - * If zone_reclaim_mode is disabled, then no extra effort is made to - * allocate memory locally. - */ - if (!zone_reclaim_mode) - return false; - - /* If there is a count for this node already, it must be acceptable */ - if (khugepaged_node_load[nid]) - return false; - - for (i = 0; i < MAX_NUMNODES; i++) { - if (!khugepaged_node_load[i]) - continue; - if (node_distance(nid, i) > RECLAIM_DISTANCE) - return true; - } - return false; -} - -#ifdef CONFIG_NUMA -static int khugepaged_find_target_node(void) -{ - static int last_khugepaged_target_node = NUMA_NO_NODE; - int nid, target_node = 0, max_value = 0; - - /* find first node with max normal pages hit */ - for (nid = 0; nid < MAX_NUMNODES; nid++) - if (khugepaged_node_load[nid] > max_value) { - max_value = khugepaged_node_load[nid]; - target_node = nid; - } - - /* do some balance if several nodes have the same hit record */ - if (target_node <= last_khugepaged_target_node) - for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; - nid++) - if (max_value == khugepaged_node_load[nid]) { - target_node = nid; - break; - } - - last_khugepaged_target_node = target_node; - return target_node; -} - -static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) -{ - if (IS_ERR(*hpage)) { - if (!*wait) - return false; - - *wait = false; - *hpage = NULL; - khugepaged_alloc_sleep(); - } else if (*hpage) { - put_page(*hpage); - *hpage = NULL; - } - - return true; -} - -static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, - unsigned long address, int node) -{ - VM_BUG_ON_PAGE(*hpage, *hpage); - - /* - * Before allocating the hugepage, release the mmap_sem read lock. - * The allocation can take potentially a long time if it involves - * sync compaction, and we do not need to hold the mmap_sem during - * that. We will recheck the vma after taking it again in write mode. - */ - up_read(&mm->mmap_sem); - - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); - if (unlikely(!*hpage)) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - *hpage = ERR_PTR(-ENOMEM); - return NULL; - } - - prep_transhuge_page(*hpage); - count_vm_event(THP_COLLAPSE_ALLOC); - return *hpage; -} -#else -static int khugepaged_find_target_node(void) -{ - return 0; -} - -static inline struct page *alloc_khugepaged_hugepage(void) -{ - struct page *page; - - page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), - HPAGE_PMD_ORDER); - if (page) - prep_transhuge_page(page); - return page; -} - -static struct page *khugepaged_alloc_hugepage(bool *wait) -{ - struct page *hpage; - - do { - hpage = alloc_khugepaged_hugepage(); - if (!hpage) { - count_vm_event(THP_COLLAPSE_ALLOC_FAILED); - if (!*wait) - return NULL; - - *wait = false; - khugepaged_alloc_sleep(); - } else - count_vm_event(THP_COLLAPSE_ALLOC); - } while (unlikely(!hpage) && likely(khugepaged_enabled())); - - return hpage; -} - -static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) -{ - if (!*hpage) - *hpage = khugepaged_alloc_hugepage(wait); - - if (unlikely(!*hpage)) - return false; - - return true; -} - -static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, - unsigned long address, int node) -{ - up_read(&mm->mmap_sem); - VM_BUG_ON(!*hpage); - - return *hpage; -} -#endif - -static bool hugepage_vma_check(struct vm_area_struct *vma) -{ - if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || - (vma->vm_flags & VM_NOHUGEPAGE)) - return false; - if (!vma->anon_vma || vma->vm_ops) - return false; - if (is_vma_temporary_stack(vma)) - return false; - return !(vma->vm_flags & VM_NO_KHUGEPAGED); -} - -/* - * If mmap_sem temporarily dropped, revalidate vma - * before taking mmap_sem. - * Return 0 if succeeds, otherwise return none-zero - * value (scan code). - */ - -static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) -{ - struct vm_area_struct *vma; - unsigned long hstart, hend; - - if (unlikely(khugepaged_test_exit(mm))) - return SCAN_ANY_PROCESS; - - vma = find_vma(mm, address); - if (!vma) - return SCAN_VMA_NULL; - - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (address < hstart || address + HPAGE_PMD_SIZE > hend) - return SCAN_ADDRESS_RANGE; - if (!hugepage_vma_check(vma)) - return SCAN_VMA_CHECK; - return 0; -} - -/* - * Bring missing pages in from swap, to complete THP collapse. - * Only done if khugepaged_scan_pmd believes it is worthwhile. - * - * Called and returns without pte mapped or spinlocks held, - * but with mmap_sem held to protect against vma changes. - */ - -static bool __collapse_huge_page_swapin(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd) -{ - pte_t pteval; - int swapped_in = 0, ret = 0; - struct fault_env fe = { - .vma = vma, - .address = address, - .flags = FAULT_FLAG_ALLOW_RETRY, - .pmd = pmd, - }; - - fe.pte = pte_offset_map(pmd, address); - for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; - fe.pte++, fe.address += PAGE_SIZE) { - pteval = *fe.pte; - if (!is_swap_pte(pteval)) - continue; - swapped_in++; - ret = do_swap_page(&fe, pteval); - /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ - if (ret & VM_FAULT_RETRY) { - down_read(&mm->mmap_sem); - /* vma is no longer available, don't continue to swapin */ - if (hugepage_vma_revalidate(mm, address)) - return false; - /* check if the pmd is still valid */ - if (mm_find_pmd(mm, address) != pmd) - return false; - } - if (ret & VM_FAULT_ERROR) { - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); - return false; - } - /* pte is unmapped now, we need to map it */ - fe.pte = pte_offset_map(pmd, fe.address); - } - fe.pte--; - pte_unmap(fe.pte); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); - return true; -} - -static void collapse_huge_page(struct mm_struct *mm, - unsigned long address, - struct page **hpage, - struct vm_area_struct *vma, - int node) -{ - pmd_t *pmd, _pmd; - pte_t *pte; - pgtable_t pgtable; - struct page *new_page; - spinlock_t *pmd_ptl, *pte_ptl; - int isolated = 0, result = 0; - struct mem_cgroup *memcg; - unsigned long mmun_start; /* For mmu_notifiers */ - unsigned long mmun_end; /* For mmu_notifiers */ - gfp_t gfp; - - VM_BUG_ON(address & ~HPAGE_PMD_MASK); - - /* Only allocate from the target node */ - gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; - - /* release the mmap_sem read lock. */ - new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); - if (!new_page) { - result = SCAN_ALLOC_HUGE_PAGE_FAIL; - goto out_nolock; - } - - if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { - result = SCAN_CGROUP_CHARGE_FAIL; - goto out_nolock; - } - - down_read(&mm->mmap_sem); - result = hugepage_vma_revalidate(mm, address); - if (result) { - mem_cgroup_cancel_charge(new_page, memcg, true); - up_read(&mm->mmap_sem); - goto out_nolock; - } - - pmd = mm_find_pmd(mm, address); - if (!pmd) { - result = SCAN_PMD_NULL; - mem_cgroup_cancel_charge(new_page, memcg, true); - up_read(&mm->mmap_sem); - goto out_nolock; - } - - /* - * __collapse_huge_page_swapin always returns with mmap_sem locked. - * If it fails, release mmap_sem and jump directly out. - * Continuing to collapse causes inconsistency. - */ - if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { - mem_cgroup_cancel_charge(new_page, memcg, true); - up_read(&mm->mmap_sem); - goto out_nolock; - } - - up_read(&mm->mmap_sem); - /* - * Prevent all access to pagetables with the exception of - * gup_fast later handled by the ptep_clear_flush and the VM - * handled by the anon_vma lock + PG_lock. - */ - down_write(&mm->mmap_sem); - result = hugepage_vma_revalidate(mm, address); - if (result) - goto out; - /* check if the pmd is still valid */ - if (mm_find_pmd(mm, address) != pmd) - goto out; - - anon_vma_lock_write(vma->anon_vma); - - pte = pte_offset_map(pmd, address); - pte_ptl = pte_lockptr(mm, pmd); - - mmun_start = address; - mmun_end = address + HPAGE_PMD_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); - pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ - /* - * After this gup_fast can't run anymore. This also removes - * any huge TLB entry from the CPU so we won't allow - * huge and small TLB entries for the same virtual address - * to avoid the risk of CPU bugs in that area. - */ - _pmd = pmdp_collapse_flush(vma, address, pmd); - spin_unlock(pmd_ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - - spin_lock(pte_ptl); - isolated = __collapse_huge_page_isolate(vma, address, pte); - spin_unlock(pte_ptl); - - if (unlikely(!isolated)) { - pte_unmap(pte); - spin_lock(pmd_ptl); - BUG_ON(!pmd_none(*pmd)); - /* - * We can only use set_pmd_at when establishing - * hugepmds and never for establishing regular pmds that - * points to regular pagetables. Use pmd_populate for that - */ - pmd_populate(mm, pmd, pmd_pgtable(_pmd)); - spin_unlock(pmd_ptl); - anon_vma_unlock_write(vma->anon_vma); - result = SCAN_FAIL; - goto out; - } - - /* - * All pages are isolated and locked so anon_vma rmap - * can't run anymore. - */ - anon_vma_unlock_write(vma->anon_vma); - - __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); - pte_unmap(pte); - __SetPageUptodate(new_page); - pgtable = pmd_pgtable(_pmd); - - _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); - _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); - - /* - * spin_lock() below is not the equivalent of smp_wmb(), so - * this is needed to avoid the copy_huge_page writes to become - * visible after the set_pmd_at() write. - */ - smp_wmb(); - - spin_lock(pmd_ptl); - BUG_ON(!pmd_none(*pmd)); - page_add_new_anon_rmap(new_page, vma, address, true); - mem_cgroup_commit_charge(new_page, memcg, false, true); - lru_cache_add_active_or_unevictable(new_page, vma); - pgtable_trans_huge_deposit(mm, pmd, pgtable); - set_pmd_at(mm, address, pmd, _pmd); - update_mmu_cache_pmd(vma, address, pmd); - spin_unlock(pmd_ptl); - - *hpage = NULL; - - khugepaged_pages_collapsed++; - result = SCAN_SUCCEED; -out_up_write: - up_write(&mm->mmap_sem); -out_nolock: - trace_mm_collapse_huge_page(mm, isolated, result); - return; -out: - mem_cgroup_cancel_charge(new_page, memcg, true); - goto out_up_write; -} - -static int khugepaged_scan_pmd(struct mm_struct *mm, - struct vm_area_struct *vma, - unsigned long address, - struct page **hpage) -{ - pmd_t *pmd; - pte_t *pte, *_pte; - int ret = 0, none_or_zero = 0, result = 0; - struct page *page = NULL; - unsigned long _address; - spinlock_t *ptl; - int node = NUMA_NO_NODE, unmapped = 0; - bool writable = false, referenced = false; - - VM_BUG_ON(address & ~HPAGE_PMD_MASK); - - pmd = mm_find_pmd(mm, address); - if (!pmd) { - result = SCAN_PMD_NULL; - goto out; - } - - memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); - pte = pte_offset_map_lock(mm, pmd, address, &ptl); - for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; - _pte++, _address += PAGE_SIZE) { - pte_t pteval = *_pte; - if (is_swap_pte(pteval)) { - if (++unmapped <= khugepaged_max_ptes_swap) { - continue; - } else { - result = SCAN_EXCEED_SWAP_PTE; - goto out_unmap; - } - } - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { - if (!userfaultfd_armed(vma) && - ++none_or_zero <= khugepaged_max_ptes_none) { - continue; - } else { - result = SCAN_EXCEED_NONE_PTE; - goto out_unmap; - } - } - if (!pte_present(pteval)) { - result = SCAN_PTE_NON_PRESENT; - goto out_unmap; - } - if (pte_write(pteval)) - writable = true; - - page = vm_normal_page(vma, _address, pteval); - if (unlikely(!page)) { - result = SCAN_PAGE_NULL; - goto out_unmap; - } - - /* TODO: teach khugepaged to collapse THP mapped with pte */ - if (PageCompound(page)) { - result = SCAN_PAGE_COMPOUND; - goto out_unmap; - } - - /* - * Record which node the original page is from and save this - * information to khugepaged_node_load[]. - * Khupaged will allocate hugepage from the node has the max - * hit record. - */ - node = page_to_nid(page); - if (khugepaged_scan_abort(node)) { - result = SCAN_SCAN_ABORT; - goto out_unmap; - } - khugepaged_node_load[node]++; - if (!PageLRU(page)) { - result = SCAN_PAGE_LRU; - goto out_unmap; - } - if (PageLocked(page)) { - result = SCAN_PAGE_LOCK; - goto out_unmap; - } - if (!PageAnon(page)) { - result = SCAN_PAGE_ANON; - goto out_unmap; - } - - /* - * cannot use mapcount: can't collapse if there's a gup pin. - * The page must only be referenced by the scanned process - * and page swap cache. - */ - if (page_count(page) != 1 + !!PageSwapCache(page)) { - result = SCAN_PAGE_COUNT; - goto out_unmap; - } - if (pte_young(pteval) || - page_is_young(page) || PageReferenced(page) || - mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; - } - if (writable) { - if (referenced) { - result = SCAN_SUCCEED; - ret = 1; - } else { - result = SCAN_NO_REFERENCED_PAGE; - } - } else { - result = SCAN_PAGE_RO; - } -out_unmap: - pte_unmap_unlock(pte, ptl); - if (ret) { - node = khugepaged_find_target_node(); - /* collapse_huge_page will return with the mmap_sem released */ - collapse_huge_page(mm, address, hpage, vma, node); - } -out: - trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, - none_or_zero, result, unmapped); - return ret; -} - -static void collect_mm_slot(struct mm_slot *mm_slot) -{ - struct mm_struct *mm = mm_slot->mm; - - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); - - if (khugepaged_test_exit(mm)) { - /* free mm_slot */ - hash_del(&mm_slot->hash); - list_del(&mm_slot->mm_node); - - /* - * Not strictly needed because the mm exited already. - * - * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); - */ - - /* khugepaged_mm_lock actually not necessary for the below */ - free_mm_slot(mm_slot); - mmdrop(mm); - } -} - -static unsigned int khugepaged_scan_mm_slot(unsigned int pages, - struct page **hpage) - __releases(&khugepaged_mm_lock) - __acquires(&khugepaged_mm_lock) -{ - struct mm_slot *mm_slot; - struct mm_struct *mm; - struct vm_area_struct *vma; - int progress = 0; - - VM_BUG_ON(!pages); - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); - - if (khugepaged_scan.mm_slot) - mm_slot = khugepaged_scan.mm_slot; - else { - mm_slot = list_entry(khugepaged_scan.mm_head.next, - struct mm_slot, mm_node); - khugepaged_scan.address = 0; - khugepaged_scan.mm_slot = mm_slot; - } - spin_unlock(&khugepaged_mm_lock); - - mm = mm_slot->mm; - down_read(&mm->mmap_sem); - if (unlikely(khugepaged_test_exit(mm))) - vma = NULL; - else - vma = find_vma(mm, khugepaged_scan.address); - - progress++; - for (; vma; vma = vma->vm_next) { - unsigned long hstart, hend; - - cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) { - progress++; - break; - } - if (!hugepage_vma_check(vma)) { -skip: - progress++; - continue; - } - hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; - hend = vma->vm_end & HPAGE_PMD_MASK; - if (hstart >= hend) - goto skip; - if (khugepaged_scan.address > hend) - goto skip; - if (khugepaged_scan.address < hstart) - khugepaged_scan.address = hstart; - VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); - - while (khugepaged_scan.address < hend) { - int ret; - cond_resched(); - if (unlikely(khugepaged_test_exit(mm))) - goto breakouterloop; - - VM_BUG_ON(khugepaged_scan.address < hstart || - khugepaged_scan.address + HPAGE_PMD_SIZE > - hend); - ret = khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - hpage); - /* move to next address */ - khugepaged_scan.address += HPAGE_PMD_SIZE; - progress += HPAGE_PMD_NR; - if (ret) - /* we released mmap_sem so break loop */ - goto breakouterloop_mmap_sem; - if (progress >= pages) - goto breakouterloop; - } - } -breakouterloop: - up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ -breakouterloop_mmap_sem: - - spin_lock(&khugepaged_mm_lock); - VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); - /* - * Release the current mm_slot if this mm is about to die, or - * if we scanned all vmas of this mm. - */ - if (khugepaged_test_exit(mm) || !vma) { - /* - * Make sure that if mm_users is reaching zero while - * khugepaged runs here, khugepaged_exit will find - * mm_slot not pointing to the exiting mm. - */ - if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { - khugepaged_scan.mm_slot = list_entry( - mm_slot->mm_node.next, - struct mm_slot, mm_node); - khugepaged_scan.address = 0; - } else { - khugepaged_scan.mm_slot = NULL; - khugepaged_full_scans++; - } - - collect_mm_slot(mm_slot); - } - - return progress; -} - -static int khugepaged_has_work(void) -{ - return !list_empty(&khugepaged_scan.mm_head) && - khugepaged_enabled(); -} - -static int khugepaged_wait_event(void) -{ - return !list_empty(&khugepaged_scan.mm_head) || - kthread_should_stop(); -} - -static void khugepaged_do_scan(void) -{ - struct page *hpage = NULL; - unsigned int progress = 0, pass_through_head = 0; - unsigned int pages = khugepaged_pages_to_scan; - bool wait = true; - - barrier(); /* write khugepaged_pages_to_scan to local stack */ - - while (progress < pages) { - if (!khugepaged_prealloc_page(&hpage, &wait)) - break; - - cond_resched(); - - if (unlikely(kthread_should_stop() || try_to_freeze())) - break; - - spin_lock(&khugepaged_mm_lock); - if (!khugepaged_scan.mm_slot) - pass_through_head++; - if (khugepaged_has_work() && - pass_through_head < 2) - progress += khugepaged_scan_mm_slot(pages - progress, - &hpage); - else - progress = pages; - spin_unlock(&khugepaged_mm_lock); - } - - if (!IS_ERR_OR_NULL(hpage)) - put_page(hpage); -} - -static bool khugepaged_should_wakeup(void) -{ - return kthread_should_stop() || - time_after_eq(jiffies, khugepaged_sleep_expire); -} - -static void khugepaged_wait_work(void) -{ - if (khugepaged_has_work()) { - const unsigned long scan_sleep_jiffies = - msecs_to_jiffies(khugepaged_scan_sleep_millisecs); - - if (!scan_sleep_jiffies) - return; - - khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; - wait_event_freezable_timeout(khugepaged_wait, - khugepaged_should_wakeup(), - scan_sleep_jiffies); - return; - } - - if (khugepaged_enabled()) - wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); -} - -static int khugepaged(void *none) -{ - struct mm_slot *mm_slot; - - set_freezable(); - set_user_nice(current, MAX_NICE); - - while (!kthread_should_stop()) { - khugepaged_do_scan(); - khugepaged_wait_work(); - } - - spin_lock(&khugepaged_mm_lock); - mm_slot = khugepaged_scan.mm_slot; - khugepaged_scan.mm_slot = NULL; - if (mm_slot) - collect_mm_slot(mm_slot); - spin_unlock(&khugepaged_mm_lock); - return 0; -} - static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) { diff --git a/mm/khugepaged.c b/mm/khugepaged.c new file mode 100644 index 000000000000..3e6d1a1b7e2c --- /dev/null +++ b/mm/khugepaged.c @@ -0,0 +1,1490 @@ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "internal.h" + +enum scan_result { + SCAN_FAIL, + SCAN_SUCCEED, + SCAN_PMD_NULL, + SCAN_EXCEED_NONE_PTE, + SCAN_PTE_NON_PRESENT, + SCAN_PAGE_RO, + SCAN_NO_REFERENCED_PAGE, + SCAN_PAGE_NULL, + SCAN_SCAN_ABORT, + SCAN_PAGE_COUNT, + SCAN_PAGE_LRU, + SCAN_PAGE_LOCK, + SCAN_PAGE_ANON, + SCAN_PAGE_COMPOUND, + SCAN_ANY_PROCESS, + SCAN_VMA_NULL, + SCAN_VMA_CHECK, + SCAN_ADDRESS_RANGE, + SCAN_SWAP_CACHE_PAGE, + SCAN_DEL_PAGE_LRU, + SCAN_ALLOC_HUGE_PAGE_FAIL, + SCAN_CGROUP_CHARGE_FAIL, + SCAN_EXCEED_SWAP_PTE +}; + +#define CREATE_TRACE_POINTS +#include + +/* default scan 8*512 pte (or vmas) every 30 second */ +static unsigned int khugepaged_pages_to_scan __read_mostly; +static unsigned int khugepaged_pages_collapsed; +static unsigned int khugepaged_full_scans; +static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; +/* during fragmentation poll the hugepage allocator once every minute */ +static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; +static unsigned long khugepaged_sleep_expire; +static DEFINE_SPINLOCK(khugepaged_mm_lock); +static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); +/* + * default collapse hugepages if there is at least one pte mapped like + * it would have happened if the vma was large enough during page + * fault. + */ +static unsigned int khugepaged_max_ptes_none __read_mostly; +static unsigned int khugepaged_max_ptes_swap __read_mostly; + +#define MM_SLOTS_HASH_BITS 10 +static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); + +static struct kmem_cache *mm_slot_cache __read_mostly; + +/** + * struct mm_slot - hash lookup from mm to mm_slot + * @hash: hash collision list + * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head + * @mm: the mm that this information is valid for + */ +struct mm_slot { + struct hlist_node hash; + struct list_head mm_node; + struct mm_struct *mm; +}; + +/** + * struct khugepaged_scan - cursor for scanning + * @mm_head: the head of the mm list to scan + * @mm_slot: the current mm_slot we are scanning + * @address: the next address inside that to be scanned + * + * There is only the one khugepaged_scan instance of this cursor structure. + */ +struct khugepaged_scan { + struct list_head mm_head; + struct mm_slot *mm_slot; + unsigned long address; +}; + +static struct khugepaged_scan khugepaged_scan = { + .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), +}; + +static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); +} + +static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long msecs; + int err; + + err = kstrtoul(buf, 10, &msecs); + if (err || msecs > UINT_MAX) + return -EINVAL; + + khugepaged_scan_sleep_millisecs = msecs; + khugepaged_sleep_expire = 0; + wake_up_interruptible(&khugepaged_wait); + + return count; +} +static struct kobj_attribute scan_sleep_millisecs_attr = + __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, + scan_sleep_millisecs_store); + +static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); +} + +static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + unsigned long msecs; + int err; + + err = kstrtoul(buf, 10, &msecs); + if (err || msecs > UINT_MAX) + return -EINVAL; + + khugepaged_alloc_sleep_millisecs = msecs; + khugepaged_sleep_expire = 0; + wake_up_interruptible(&khugepaged_wait); + + return count; +} +static struct kobj_attribute alloc_sleep_millisecs_attr = + __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, + alloc_sleep_millisecs_store); + +static ssize_t pages_to_scan_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_pages_to_scan); +} +static ssize_t pages_to_scan_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long pages; + + err = kstrtoul(buf, 10, &pages); + if (err || !pages || pages > UINT_MAX) + return -EINVAL; + + khugepaged_pages_to_scan = pages; + + return count; +} +static struct kobj_attribute pages_to_scan_attr = + __ATTR(pages_to_scan, 0644, pages_to_scan_show, + pages_to_scan_store); + +static ssize_t pages_collapsed_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_pages_collapsed); +} +static struct kobj_attribute pages_collapsed_attr = + __ATTR_RO(pages_collapsed); + +static ssize_t full_scans_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_full_scans); +} +static struct kobj_attribute full_scans_attr = + __ATTR_RO(full_scans); + +static ssize_t khugepaged_defrag_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return single_hugepage_flag_show(kobj, attr, buf, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); +} +static ssize_t khugepaged_defrag_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + return single_hugepage_flag_store(kobj, attr, buf, count, + TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); +} +static struct kobj_attribute khugepaged_defrag_attr = + __ATTR(defrag, 0644, khugepaged_defrag_show, + khugepaged_defrag_store); + +/* + * max_ptes_none controls if khugepaged should collapse hugepages over + * any unmapped ptes in turn potentially increasing the memory + * footprint of the vmas. When max_ptes_none is 0 khugepaged will not + * reduce the available free memory in the system as it + * runs. Increasing max_ptes_none will instead potentially reduce the + * free memory in the system during the khugepaged scan. + */ +static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_none); +} +static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_none; + + err = kstrtoul(buf, 10, &max_ptes_none); + if (err || max_ptes_none > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_none = max_ptes_none; + + return count; +} +static struct kobj_attribute khugepaged_max_ptes_none_attr = + __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, + khugepaged_max_ptes_none_store); + +static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + return sprintf(buf, "%u\n", khugepaged_max_ptes_swap); +} + +static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + unsigned long max_ptes_swap; + + err = kstrtoul(buf, 10, &max_ptes_swap); + if (err || max_ptes_swap > HPAGE_PMD_NR-1) + return -EINVAL; + + khugepaged_max_ptes_swap = max_ptes_swap; + + return count; +} + +static struct kobj_attribute khugepaged_max_ptes_swap_attr = + __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show, + khugepaged_max_ptes_swap_store); + +static struct attribute *khugepaged_attr[] = { + &khugepaged_defrag_attr.attr, + &khugepaged_max_ptes_none_attr.attr, + &pages_to_scan_attr.attr, + &pages_collapsed_attr.attr, + &full_scans_attr.attr, + &scan_sleep_millisecs_attr.attr, + &alloc_sleep_millisecs_attr.attr, + &khugepaged_max_ptes_swap_attr.attr, + NULL, +}; + +struct attribute_group khugepaged_attr_group = { + .attrs = khugepaged_attr, + .name = "khugepaged", +}; + +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) + +int hugepage_madvise(struct vm_area_struct *vma, + unsigned long *vm_flags, int advice) +{ + switch (advice) { + case MADV_HUGEPAGE: +#ifdef CONFIG_S390 + /* + * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 + * can't handle this properly after s390_enable_sie, so we simply + * ignore the madvise to prevent qemu from causing a SIGSEGV. + */ + if (mm_has_pgste(vma->vm_mm)) + return 0; +#endif + *vm_flags &= ~VM_NOHUGEPAGE; + *vm_flags |= VM_HUGEPAGE; + /* + * If the vma become good for khugepaged to scan, + * register it here without waiting a page fault that + * may not happen any time soon. + */ + if (!(*vm_flags & VM_NO_KHUGEPAGED) && + khugepaged_enter_vma_merge(vma, *vm_flags)) + return -ENOMEM; + break; + case MADV_NOHUGEPAGE: + *vm_flags &= ~VM_HUGEPAGE; + *vm_flags |= VM_NOHUGEPAGE; + /* + * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning + * this vma even if we leave the mm registered in khugepaged if + * it got registered before VM_NOHUGEPAGE was set. + */ + break; + } + + return 0; +} + +int __init khugepaged_init(void) +{ + mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", + sizeof(struct mm_slot), + __alignof__(struct mm_slot), 0, NULL); + if (!mm_slot_cache) + return -ENOMEM; + + khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; + khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; + khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; + + return 0; +} + +void __init khugepaged_destroy(void) +{ + kmem_cache_destroy(mm_slot_cache); +} + +static inline struct mm_slot *alloc_mm_slot(void) +{ + if (!mm_slot_cache) /* initialization failed */ + return NULL; + return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); +} + +static inline void free_mm_slot(struct mm_slot *mm_slot) +{ + kmem_cache_free(mm_slot_cache, mm_slot); +} + +static struct mm_slot *get_mm_slot(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + + hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) + if (mm == mm_slot->mm) + return mm_slot; + + return NULL; +} + +static void insert_to_mm_slots_hash(struct mm_struct *mm, + struct mm_slot *mm_slot) +{ + mm_slot->mm = mm; + hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); +} + +static inline int khugepaged_test_exit(struct mm_struct *mm) +{ + return atomic_read(&mm->mm_users) == 0; +} + +int __khugepaged_enter(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + int wakeup; + + mm_slot = alloc_mm_slot(); + if (!mm_slot) + return -ENOMEM; + + /* __khugepaged_exit() must not run from under us */ + VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { + free_mm_slot(mm_slot); + return 0; + } + + spin_lock(&khugepaged_mm_lock); + insert_to_mm_slots_hash(mm, mm_slot); + /* + * Insert just behind the scanning cursor, to let the area settle + * down a little. + */ + wakeup = list_empty(&khugepaged_scan.mm_head); + list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); + spin_unlock(&khugepaged_mm_lock); + + atomic_inc(&mm->mm_count); + if (wakeup) + wake_up_interruptible(&khugepaged_wait); + + return 0; +} + +int khugepaged_enter_vma_merge(struct vm_area_struct *vma, + unsigned long vm_flags) +{ + unsigned long hstart, hend; + if (!vma->anon_vma) + /* + * Not yet faulted in so we will register later in the + * page fault if needed. + */ + return 0; + if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) + /* khugepaged not yet working on file or special mappings */ + return 0; + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (hstart < hend) + return khugepaged_enter(vma, vm_flags); + return 0; +} + +void __khugepaged_exit(struct mm_struct *mm) +{ + struct mm_slot *mm_slot; + int free = 0; + + spin_lock(&khugepaged_mm_lock); + mm_slot = get_mm_slot(mm); + if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { + hash_del(&mm_slot->hash); + list_del(&mm_slot->mm_node); + free = 1; + } + spin_unlock(&khugepaged_mm_lock); + + if (free) { + clear_bit(MMF_VM_HUGEPAGE, &mm->flags); + free_mm_slot(mm_slot); + mmdrop(mm); + } else if (mm_slot) { + /* + * This is required to serialize against + * khugepaged_test_exit() (which is guaranteed to run + * under mmap sem read mode). Stop here (after we + * return all pagetables will be destroyed) until + * khugepaged has finished working on the pagetables + * under the mmap_sem. + */ + down_write(&mm->mmap_sem); + up_write(&mm->mmap_sem); + } +} + +static void release_pte_page(struct page *page) +{ + /* 0 stands for page_is_file_cache(page) == false */ + dec_zone_page_state(page, NR_ISOLATED_ANON + 0); + unlock_page(page); + putback_lru_page(page); +} + +static void release_pte_pages(pte_t *pte, pte_t *_pte) +{ + while (--_pte >= pte) { + pte_t pteval = *_pte; + if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) + release_pte_page(pte_page(pteval)); + } +} + +static int __collapse_huge_page_isolate(struct vm_area_struct *vma, + unsigned long address, + pte_t *pte) +{ + struct page *page = NULL; + pte_t *_pte; + int none_or_zero = 0, result = 0; + bool referenced = false, writable = false; + + for (_pte = pte; _pte < pte+HPAGE_PMD_NR; + _pte++, address += PAGE_SIZE) { + pte_t pteval = *_pte; + if (pte_none(pteval) || (pte_present(pteval) && + is_zero_pfn(pte_pfn(pteval)))) { + if (!userfaultfd_armed(vma) && + ++none_or_zero <= khugepaged_max_ptes_none) { + continue; + } else { + result = SCAN_EXCEED_NONE_PTE; + goto out; + } + } + if (!pte_present(pteval)) { + result = SCAN_PTE_NON_PRESENT; + goto out; + } + page = vm_normal_page(vma, address, pteval); + if (unlikely(!page)) { + result = SCAN_PAGE_NULL; + goto out; + } + + VM_BUG_ON_PAGE(PageCompound(page), page); + VM_BUG_ON_PAGE(!PageAnon(page), page); + VM_BUG_ON_PAGE(!PageSwapBacked(page), page); + + /* + * We can do it before isolate_lru_page because the + * page can't be freed from under us. NOTE: PG_lock + * is needed to serialize against split_huge_page + * when invoked from the VM. + */ + if (!trylock_page(page)) { + result = SCAN_PAGE_LOCK; + goto out; + } + + /* + * cannot use mapcount: can't collapse if there's a gup pin. + * The page must only be referenced by the scanned process + * and page swap cache. + */ + if (page_count(page) != 1 + !!PageSwapCache(page)) { + unlock_page(page); + result = SCAN_PAGE_COUNT; + goto out; + } + if (pte_write(pteval)) { + writable = true; + } else { + if (PageSwapCache(page) && + !reuse_swap_page(page, NULL)) { + unlock_page(page); + result = SCAN_SWAP_CACHE_PAGE; + goto out; + } + /* + * Page is not in the swap cache. It can be collapsed + * into a THP. + */ + } + + /* + * Isolate the page to avoid collapsing an hugepage + * currently in use by the VM. + */ + if (isolate_lru_page(page)) { + unlock_page(page); + result = SCAN_DEL_PAGE_LRU; + goto out; + } + /* 0 stands for page_is_file_cache(page) == false */ + inc_zone_page_state(page, NR_ISOLATED_ANON + 0); + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageLRU(page), page); + + /* If there is no mapped pte young don't collapse the page */ + if (pte_young(pteval) || + page_is_young(page) || PageReferenced(page) || + mmu_notifier_test_young(vma->vm_mm, address)) + referenced = true; + } + if (likely(writable)) { + if (likely(referenced)) { + result = SCAN_SUCCEED; + trace_mm_collapse_huge_page_isolate(page, none_or_zero, + referenced, writable, result); + return 1; + } + } else { + result = SCAN_PAGE_RO; + } + +out: + release_pte_pages(pte, _pte); + trace_mm_collapse_huge_page_isolate(page, none_or_zero, + referenced, writable, result); + return 0; +} + +static void __collapse_huge_page_copy(pte_t *pte, struct page *page, + struct vm_area_struct *vma, + unsigned long address, + spinlock_t *ptl) +{ + pte_t *_pte; + for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { + pte_t pteval = *_pte; + struct page *src_page; + + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { + clear_user_highpage(page, address); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); + if (is_zero_pfn(pte_pfn(pteval))) { + /* + * ptl mostly unnecessary. + */ + spin_lock(ptl); + /* + * paravirt calls inside pte_clear here are + * superfluous. + */ + pte_clear(vma->vm_mm, address, _pte); + spin_unlock(ptl); + } + } else { + src_page = pte_page(pteval); + copy_user_highpage(page, src_page, address, vma); + VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); + release_pte_page(src_page); + /* + * ptl mostly unnecessary, but preempt has to + * be disabled to update the per-cpu stats + * inside page_remove_rmap(). + */ + spin_lock(ptl); + /* + * paravirt calls inside pte_clear here are + * superfluous. + */ + pte_clear(vma->vm_mm, address, _pte); + page_remove_rmap(src_page, false); + spin_unlock(ptl); + free_page_and_swap_cache(src_page); + } + + address += PAGE_SIZE; + page++; + } +} + +static void khugepaged_alloc_sleep(void) +{ + DEFINE_WAIT(wait); + + add_wait_queue(&khugepaged_wait, &wait); + freezable_schedule_timeout_interruptible( + msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); + remove_wait_queue(&khugepaged_wait, &wait); +} + +static int khugepaged_node_load[MAX_NUMNODES]; + +static bool khugepaged_scan_abort(int nid) +{ + int i; + + /* + * If zone_reclaim_mode is disabled, then no extra effort is made to + * allocate memory locally. + */ + if (!zone_reclaim_mode) + return false; + + /* If there is a count for this node already, it must be acceptable */ + if (khugepaged_node_load[nid]) + return false; + + for (i = 0; i < MAX_NUMNODES; i++) { + if (!khugepaged_node_load[i]) + continue; + if (node_distance(nid, i) > RECLAIM_DISTANCE) + return true; + } + return false; +} + +/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */ +static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void) +{ + return GFP_TRANSHUGE | (khugepaged_defrag() ? __GFP_DIRECT_RECLAIM : 0); +} + +#ifdef CONFIG_NUMA +static int khugepaged_find_target_node(void) +{ + static int last_khugepaged_target_node = NUMA_NO_NODE; + int nid, target_node = 0, max_value = 0; + + /* find first node with max normal pages hit */ + for (nid = 0; nid < MAX_NUMNODES; nid++) + if (khugepaged_node_load[nid] > max_value) { + max_value = khugepaged_node_load[nid]; + target_node = nid; + } + + /* do some balance if several nodes have the same hit record */ + if (target_node <= last_khugepaged_target_node) + for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; + nid++) + if (max_value == khugepaged_node_load[nid]) { + target_node = nid; + break; + } + + last_khugepaged_target_node = target_node; + return target_node; +} + +static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) +{ + if (IS_ERR(*hpage)) { + if (!*wait) + return false; + + *wait = false; + *hpage = NULL; + khugepaged_alloc_sleep(); + } else if (*hpage) { + put_page(*hpage); + *hpage = NULL; + } + + return true; +} + +static struct page * +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, + unsigned long address, int node) +{ + VM_BUG_ON_PAGE(*hpage, *hpage); + + /* + * Before allocating the hugepage, release the mmap_sem read lock. + * The allocation can take potentially a long time if it involves + * sync compaction, and we do not need to hold the mmap_sem during + * that. We will recheck the vma after taking it again in write mode. + */ + up_read(&mm->mmap_sem); + + *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); + if (unlikely(!*hpage)) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + *hpage = ERR_PTR(-ENOMEM); + return NULL; + } + + prep_transhuge_page(*hpage); + count_vm_event(THP_COLLAPSE_ALLOC); + return *hpage; +} +#else +static int khugepaged_find_target_node(void) +{ + return 0; +} + +static inline struct page *alloc_khugepaged_hugepage(void) +{ + struct page *page; + + page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), + HPAGE_PMD_ORDER); + if (page) + prep_transhuge_page(page); + return page; +} + +static struct page *khugepaged_alloc_hugepage(bool *wait) +{ + struct page *hpage; + + do { + hpage = alloc_khugepaged_hugepage(); + if (!hpage) { + count_vm_event(THP_COLLAPSE_ALLOC_FAILED); + if (!*wait) + return NULL; + + *wait = false; + khugepaged_alloc_sleep(); + } else + count_vm_event(THP_COLLAPSE_ALLOC); + } while (unlikely(!hpage) && likely(khugepaged_enabled())); + + return hpage; +} + +static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) +{ + if (!*hpage) + *hpage = khugepaged_alloc_hugepage(wait); + + if (unlikely(!*hpage)) + return false; + + return true; +} + +static struct page * +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, + unsigned long address, int node) +{ + up_read(&mm->mmap_sem); + VM_BUG_ON(!*hpage); + + return *hpage; +} +#endif + +static bool hugepage_vma_check(struct vm_area_struct *vma) +{ + if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || + (vma->vm_flags & VM_NOHUGEPAGE)) + return false; + if (!vma->anon_vma || vma->vm_ops) + return false; + if (is_vma_temporary_stack(vma)) + return false; + return !(vma->vm_flags & VM_NO_KHUGEPAGED); +} + +/* + * If mmap_sem temporarily dropped, revalidate vma + * before taking mmap_sem. + * Return 0 if succeeds, otherwise return none-zero + * value (scan code). + */ + +static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) +{ + struct vm_area_struct *vma; + unsigned long hstart, hend; + + if (unlikely(khugepaged_test_exit(mm))) + return SCAN_ANY_PROCESS; + + vma = find_vma(mm, address); + if (!vma) + return SCAN_VMA_NULL; + + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (address < hstart || address + HPAGE_PMD_SIZE > hend) + return SCAN_ADDRESS_RANGE; + if (!hugepage_vma_check(vma)) + return SCAN_VMA_CHECK; + return 0; +} + +/* + * Bring missing pages in from swap, to complete THP collapse. + * Only done if khugepaged_scan_pmd believes it is worthwhile. + * + * Called and returns without pte mapped or spinlocks held, + * but with mmap_sem held to protect against vma changes. + */ + +static bool __collapse_huge_page_swapin(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ + pte_t pteval; + int swapped_in = 0, ret = 0; + struct fault_env fe = { + .vma = vma, + .address = address, + .flags = FAULT_FLAG_ALLOW_RETRY, + .pmd = pmd, + }; + + fe.pte = pte_offset_map(pmd, address); + for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE; + fe.pte++, fe.address += PAGE_SIZE) { + pteval = *fe.pte; + if (!is_swap_pte(pteval)) + continue; + swapped_in++; + ret = do_swap_page(&fe, pteval); + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ + if (ret & VM_FAULT_RETRY) { + down_read(&mm->mmap_sem); + /* vma is no longer available, don't continue to swapin */ + if (hugepage_vma_revalidate(mm, address)) + return false; + /* check if the pmd is still valid */ + if (mm_find_pmd(mm, address) != pmd) + return false; + } + if (ret & VM_FAULT_ERROR) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + return false; + } + /* pte is unmapped now, we need to map it */ + fe.pte = pte_offset_map(pmd, fe.address); + } + fe.pte--; + pte_unmap(fe.pte); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); + return true; +} + +static void collapse_huge_page(struct mm_struct *mm, + unsigned long address, + struct page **hpage, + struct vm_area_struct *vma, + int node) +{ + pmd_t *pmd, _pmd; + pte_t *pte; + pgtable_t pgtable; + struct page *new_page; + spinlock_t *pmd_ptl, *pte_ptl; + int isolated = 0, result = 0; + struct mem_cgroup *memcg; + unsigned long mmun_start; /* For mmu_notifiers */ + unsigned long mmun_end; /* For mmu_notifiers */ + gfp_t gfp; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + /* Only allocate from the target node */ + gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; + + /* release the mmap_sem read lock. */ + new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); + if (!new_page) { + result = SCAN_ALLOC_HUGE_PAGE_FAIL; + goto out_nolock; + } + + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { + result = SCAN_CGROUP_CHARGE_FAIL; + goto out_nolock; + } + + down_read(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, address); + if (result) { + mem_cgroup_cancel_charge(new_page, memcg, true); + up_read(&mm->mmap_sem); + goto out_nolock; + } + + pmd = mm_find_pmd(mm, address); + if (!pmd) { + result = SCAN_PMD_NULL; + mem_cgroup_cancel_charge(new_page, memcg, true); + up_read(&mm->mmap_sem); + goto out_nolock; + } + + /* + * __collapse_huge_page_swapin always returns with mmap_sem locked. + * If it fails, release mmap_sem and jump directly out. + * Continuing to collapse causes inconsistency. + */ + if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + mem_cgroup_cancel_charge(new_page, memcg, true); + up_read(&mm->mmap_sem); + goto out_nolock; + } + + up_read(&mm->mmap_sem); + /* + * Prevent all access to pagetables with the exception of + * gup_fast later handled by the ptep_clear_flush and the VM + * handled by the anon_vma lock + PG_lock. + */ + down_write(&mm->mmap_sem); + result = hugepage_vma_revalidate(mm, address); + if (result) + goto out; + /* check if the pmd is still valid */ + if (mm_find_pmd(mm, address) != pmd) + goto out; + + anon_vma_lock_write(vma->anon_vma); + + pte = pte_offset_map(pmd, address); + pte_ptl = pte_lockptr(mm, pmd); + + mmun_start = address; + mmun_end = address + HPAGE_PMD_SIZE; + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ + /* + * After this gup_fast can't run anymore. This also removes + * any huge TLB entry from the CPU so we won't allow + * huge and small TLB entries for the same virtual address + * to avoid the risk of CPU bugs in that area. + */ + _pmd = pmdp_collapse_flush(vma, address, pmd); + spin_unlock(pmd_ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + + spin_lock(pte_ptl); + isolated = __collapse_huge_page_isolate(vma, address, pte); + spin_unlock(pte_ptl); + + if (unlikely(!isolated)) { + pte_unmap(pte); + spin_lock(pmd_ptl); + BUG_ON(!pmd_none(*pmd)); + /* + * We can only use set_pmd_at when establishing + * hugepmds and never for establishing regular pmds that + * points to regular pagetables. Use pmd_populate for that + */ + pmd_populate(mm, pmd, pmd_pgtable(_pmd)); + spin_unlock(pmd_ptl); + anon_vma_unlock_write(vma->anon_vma); + result = SCAN_FAIL; + goto out; + } + + /* + * All pages are isolated and locked so anon_vma rmap + * can't run anymore. + */ + anon_vma_unlock_write(vma->anon_vma); + + __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); + pte_unmap(pte); + __SetPageUptodate(new_page); + pgtable = pmd_pgtable(_pmd); + + _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); + _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); + + /* + * spin_lock() below is not the equivalent of smp_wmb(), so + * this is needed to avoid the copy_huge_page writes to become + * visible after the set_pmd_at() write. + */ + smp_wmb(); + + spin_lock(pmd_ptl); + BUG_ON(!pmd_none(*pmd)); + page_add_new_anon_rmap(new_page, vma, address, true); + mem_cgroup_commit_charge(new_page, memcg, false, true); + lru_cache_add_active_or_unevictable(new_page, vma); + pgtable_trans_huge_deposit(mm, pmd, pgtable); + set_pmd_at(mm, address, pmd, _pmd); + update_mmu_cache_pmd(vma, address, pmd); + spin_unlock(pmd_ptl); + + *hpage = NULL; + + khugepaged_pages_collapsed++; + result = SCAN_SUCCEED; +out_up_write: + up_write(&mm->mmap_sem); +out_nolock: + trace_mm_collapse_huge_page(mm, isolated, result); + return; +out: + mem_cgroup_cancel_charge(new_page, memcg, true); + goto out_up_write; +} + +static int khugepaged_scan_pmd(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, + struct page **hpage) +{ + pmd_t *pmd; + pte_t *pte, *_pte; + int ret = 0, none_or_zero = 0, result = 0; + struct page *page = NULL; + unsigned long _address; + spinlock_t *ptl; + int node = NUMA_NO_NODE, unmapped = 0; + bool writable = false, referenced = false; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + pmd = mm_find_pmd(mm, address); + if (!pmd) { + result = SCAN_PMD_NULL; + goto out; + } + + memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); + pte = pte_offset_map_lock(mm, pmd, address, &ptl); + for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; + _pte++, _address += PAGE_SIZE) { + pte_t pteval = *_pte; + if (is_swap_pte(pteval)) { + if (++unmapped <= khugepaged_max_ptes_swap) { + continue; + } else { + result = SCAN_EXCEED_SWAP_PTE; + goto out_unmap; + } + } + if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { + if (!userfaultfd_armed(vma) && + ++none_or_zero <= khugepaged_max_ptes_none) { + continue; + } else { + result = SCAN_EXCEED_NONE_PTE; + goto out_unmap; + } + } + if (!pte_present(pteval)) { + result = SCAN_PTE_NON_PRESENT; + goto out_unmap; + } + if (pte_write(pteval)) + writable = true; + + page = vm_normal_page(vma, _address, pteval); + if (unlikely(!page)) { + result = SCAN_PAGE_NULL; + goto out_unmap; + } + + /* TODO: teach khugepaged to collapse THP mapped with pte */ + if (PageCompound(page)) { + result = SCAN_PAGE_COMPOUND; + goto out_unmap; + } + + /* + * Record which node the original page is from and save this + * information to khugepaged_node_load[]. + * Khupaged will allocate hugepage from the node has the max + * hit record. + */ + node = page_to_nid(page); + if (khugepaged_scan_abort(node)) { + result = SCAN_SCAN_ABORT; + goto out_unmap; + } + khugepaged_node_load[node]++; + if (!PageLRU(page)) { + result = SCAN_PAGE_LRU; + goto out_unmap; + } + if (PageLocked(page)) { + result = SCAN_PAGE_LOCK; + goto out_unmap; + } + if (!PageAnon(page)) { + result = SCAN_PAGE_ANON; + goto out_unmap; + } + + /* + * cannot use mapcount: can't collapse if there's a gup pin. + * The page must only be referenced by the scanned process + * and page swap cache. + */ + if (page_count(page) != 1 + !!PageSwapCache(page)) { + result = SCAN_PAGE_COUNT; + goto out_unmap; + } + if (pte_young(pteval) || + page_is_young(page) || PageReferenced(page) || + mmu_notifier_test_young(vma->vm_mm, address)) + referenced = true; + } + if (writable) { + if (referenced) { + result = SCAN_SUCCEED; + ret = 1; + } else { + result = SCAN_NO_REFERENCED_PAGE; + } + } else { + result = SCAN_PAGE_RO; + } +out_unmap: + pte_unmap_unlock(pte, ptl); + if (ret) { + node = khugepaged_find_target_node(); + /* collapse_huge_page will return with the mmap_sem released */ + collapse_huge_page(mm, address, hpage, vma, node); + } +out: + trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, + none_or_zero, result, unmapped); + return ret; +} + +static void collect_mm_slot(struct mm_slot *mm_slot) +{ + struct mm_struct *mm = mm_slot->mm; + + VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + + if (khugepaged_test_exit(mm)) { + /* free mm_slot */ + hash_del(&mm_slot->hash); + list_del(&mm_slot->mm_node); + + /* + * Not strictly needed because the mm exited already. + * + * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); + */ + + /* khugepaged_mm_lock actually not necessary for the below */ + free_mm_slot(mm_slot); + mmdrop(mm); + } +} + +static unsigned int khugepaged_scan_mm_slot(unsigned int pages, + struct page **hpage) + __releases(&khugepaged_mm_lock) + __acquires(&khugepaged_mm_lock) +{ + struct mm_slot *mm_slot; + struct mm_struct *mm; + struct vm_area_struct *vma; + int progress = 0; + + VM_BUG_ON(!pages); + VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); + + if (khugepaged_scan.mm_slot) + mm_slot = khugepaged_scan.mm_slot; + else { + mm_slot = list_entry(khugepaged_scan.mm_head.next, + struct mm_slot, mm_node); + khugepaged_scan.address = 0; + khugepaged_scan.mm_slot = mm_slot; + } + spin_unlock(&khugepaged_mm_lock); + + mm = mm_slot->mm; + down_read(&mm->mmap_sem); + if (unlikely(khugepaged_test_exit(mm))) + vma = NULL; + else + vma = find_vma(mm, khugepaged_scan.address); + + progress++; + for (; vma; vma = vma->vm_next) { + unsigned long hstart, hend; + + cond_resched(); + if (unlikely(khugepaged_test_exit(mm))) { + progress++; + break; + } + if (!hugepage_vma_check(vma)) { +skip: + progress++; + continue; + } + hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; + hend = vma->vm_end & HPAGE_PMD_MASK; + if (hstart >= hend) + goto skip; + if (khugepaged_scan.address > hend) + goto skip; + if (khugepaged_scan.address < hstart) + khugepaged_scan.address = hstart; + VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + + while (khugepaged_scan.address < hend) { + int ret; + cond_resched(); + if (unlikely(khugepaged_test_exit(mm))) + goto breakouterloop; + + VM_BUG_ON(khugepaged_scan.address < hstart || + khugepaged_scan.address + HPAGE_PMD_SIZE > + hend); + ret = khugepaged_scan_pmd(mm, vma, + khugepaged_scan.address, + hpage); + /* move to next address */ + khugepaged_scan.address += HPAGE_PMD_SIZE; + progress += HPAGE_PMD_NR; + if (ret) + /* we released mmap_sem so break loop */ + goto breakouterloop_mmap_sem; + if (progress >= pages) + goto breakouterloop; + } + } +breakouterloop: + up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ +breakouterloop_mmap_sem: + + spin_lock(&khugepaged_mm_lock); + VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); + /* + * Release the current mm_slot if this mm is about to die, or + * if we scanned all vmas of this mm. + */ + if (khugepaged_test_exit(mm) || !vma) { + /* + * Make sure that if mm_users is reaching zero while + * khugepaged runs here, khugepaged_exit will find + * mm_slot not pointing to the exiting mm. + */ + if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { + khugepaged_scan.mm_slot = list_entry( + mm_slot->mm_node.next, + struct mm_slot, mm_node); + khugepaged_scan.address = 0; + } else { + khugepaged_scan.mm_slot = NULL; + khugepaged_full_scans++; + } + + collect_mm_slot(mm_slot); + } + + return progress; +} + +static int khugepaged_has_work(void) +{ + return !list_empty(&khugepaged_scan.mm_head) && + khugepaged_enabled(); +} + +static int khugepaged_wait_event(void) +{ + return !list_empty(&khugepaged_scan.mm_head) || + kthread_should_stop(); +} + +static void khugepaged_do_scan(void) +{ + struct page *hpage = NULL; + unsigned int progress = 0, pass_through_head = 0; + unsigned int pages = khugepaged_pages_to_scan; + bool wait = true; + + barrier(); /* write khugepaged_pages_to_scan to local stack */ + + while (progress < pages) { + if (!khugepaged_prealloc_page(&hpage, &wait)) + break; + + cond_resched(); + + if (unlikely(kthread_should_stop() || try_to_freeze())) + break; + + spin_lock(&khugepaged_mm_lock); + if (!khugepaged_scan.mm_slot) + pass_through_head++; + if (khugepaged_has_work() && + pass_through_head < 2) + progress += khugepaged_scan_mm_slot(pages - progress, + &hpage); + else + progress = pages; + spin_unlock(&khugepaged_mm_lock); + } + + if (!IS_ERR_OR_NULL(hpage)) + put_page(hpage); +} + +static bool khugepaged_should_wakeup(void) +{ + return kthread_should_stop() || + time_after_eq(jiffies, khugepaged_sleep_expire); +} + +static void khugepaged_wait_work(void) +{ + if (khugepaged_has_work()) { + const unsigned long scan_sleep_jiffies = + msecs_to_jiffies(khugepaged_scan_sleep_millisecs); + + if (!scan_sleep_jiffies) + return; + + khugepaged_sleep_expire = jiffies + scan_sleep_jiffies; + wait_event_freezable_timeout(khugepaged_wait, + khugepaged_should_wakeup(), + scan_sleep_jiffies); + return; + } + + if (khugepaged_enabled()) + wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); +} + +static int khugepaged(void *none) +{ + struct mm_slot *mm_slot; + + set_freezable(); + set_user_nice(current, MAX_NICE); + + while (!kthread_should_stop()) { + khugepaged_do_scan(); + khugepaged_wait_work(); + } + + spin_lock(&khugepaged_mm_lock); + mm_slot = khugepaged_scan.mm_slot; + khugepaged_scan.mm_slot = NULL; + if (mm_slot) + collect_mm_slot(mm_slot); + spin_unlock(&khugepaged_mm_lock); + return 0; +} + +static void set_recommended_min_free_kbytes(void) +{ + struct zone *zone; + int nr_zones = 0; + unsigned long recommended_min; + + for_each_populated_zone(zone) + nr_zones++; + + /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ + recommended_min = pageblock_nr_pages * nr_zones * 2; + + /* + * Make sure that on average at least two pageblocks are almost free + * of another type, one for a migratetype to fall back to and a + * second to avoid subsequent fallbacks of other types There are 3 + * MIGRATE_TYPES we care about. + */ + recommended_min += pageblock_nr_pages * nr_zones * + MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; + + /* don't ever allow to reserve more than 5% of the lowmem */ + recommended_min = min(recommended_min, + (unsigned long) nr_free_buffer_pages() / 20); + recommended_min <<= (PAGE_SHIFT-10); + + if (recommended_min > min_free_kbytes) { + if (user_min_free_kbytes >= 0) + pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n", + min_free_kbytes, recommended_min); + + min_free_kbytes = recommended_min; + } + setup_per_zone_wmarks(); +} + +int start_stop_khugepaged(void) +{ + static struct task_struct *khugepaged_thread __read_mostly; + static DEFINE_MUTEX(khugepaged_mutex); + int err = 0; + + mutex_lock(&khugepaged_mutex); + if (khugepaged_enabled()) { + if (!khugepaged_thread) + khugepaged_thread = kthread_run(khugepaged, NULL, + "khugepaged"); + if (IS_ERR(khugepaged_thread)) { + pr_err("khugepaged: kthread_run(khugepaged) failed\n"); + err = PTR_ERR(khugepaged_thread); + khugepaged_thread = NULL; + goto fail; + } + + if (!list_empty(&khugepaged_scan.mm_head)) + wake_up_interruptible(&khugepaged_wait); + + set_recommended_min_free_kbytes(); + } else if (khugepaged_thread) { + kthread_stop(khugepaged_thread); + khugepaged_thread = NULL; + } +fail: + mutex_unlock(&khugepaged_mutex); + return err; +} -- cgit v1.2.3-70-g09d2 From 988ddb710bb5be27f793b7e50455c769118a389f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:26 -0700 Subject: khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() Both variants of khugepaged_alloc_page() do up_read(&mm->mmap_sem) first: no point keep it inside the function. Link: http://lkml.kernel.org/r/1466021202-61880-33-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3e6d1a1b7e2c..639047cc6ea7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -739,19 +739,10 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) } static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, - unsigned long address, int node) +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) { VM_BUG_ON_PAGE(*hpage, *hpage); - /* - * Before allocating the hugepage, release the mmap_sem read lock. - * The allocation can take potentially a long time if it involves - * sync compaction, and we do not need to hold the mmap_sem during - * that. We will recheck the vma after taking it again in write mode. - */ - up_read(&mm->mmap_sem); - *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); if (unlikely(!*hpage)) { count_vm_event(THP_COLLAPSE_ALLOC_FAILED); @@ -812,10 +803,8 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) } static struct page * -khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, - unsigned long address, int node) +khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) { - up_read(&mm->mmap_sem); VM_BUG_ON(!*hpage); return *hpage; @@ -936,8 +925,14 @@ static void collapse_huge_page(struct mm_struct *mm, /* Only allocate from the target node */ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE; - /* release the mmap_sem read lock. */ - new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); + /* + * Before allocating the hugepage, release the mmap_sem read lock. + * The allocation can take potentially a long time if it involves + * sync compaction, and we do not need to hold the mmap_sem during + * that. We will recheck the vma after taking it again in write mode. + */ + up_read(&mm->mmap_sem); + new_page = khugepaged_alloc_page(hpage, gfp, node); if (!new_page) { result = SCAN_ALLOC_HUGE_PAGE_FAIL; goto out_nolock; -- cgit v1.2.3-70-g09d2 From 4595ef88d136134a9470c955575640f5c96344ed Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:29 -0700 Subject: shmem: make shmem_inode_info::lock irq-safe We are going to need to call shmem_charge() under tree_lock to get accoutning right on collapse of small tmpfs pages into a huge one. The problem is that tree_lock is irq-safe and lockdep is not happy, that we take irq-unsafe lock under irq-safe[1]. Let's convert the lock to irq-safe. [1] https://gist.github.com/kiryl/80c0149e03ed35dfaf26628b8e03cdbc Link: http://lkml.kernel.org/r/1466021202-61880-34-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 4 ++-- mm/shmem.c | 50 ++++++++++++++++++++++++++------------------------ 2 files changed, 28 insertions(+), 26 deletions(-) diff --git a/ipc/shm.c b/ipc/shm.c index 7fa5cbebbf19..dbac8860c721 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -766,10 +766,10 @@ static void shm_add_rss_swap(struct shmid_kernel *shp, } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif diff --git a/mm/shmem.c b/mm/shmem.c index f19b6b44ae46..03eb915c82eb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -258,14 +258,15 @@ bool shmem_charge(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + unsigned long flags; if (shmem_acct_block(info->flags, pages)) return false; - spin_lock(&info->lock); + spin_lock_irqsave(&info->lock, flags); info->alloced += pages; inode->i_blocks += pages * BLOCKS_PER_PAGE; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irqrestore(&info->lock, flags); inode->i_mapping->nrpages += pages; if (!sbinfo->max_blocks) @@ -273,10 +274,10 @@ bool shmem_charge(struct inode *inode, long pages) if (percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks - pages) > 0) { inode->i_mapping->nrpages -= pages; - spin_lock(&info->lock); + spin_lock_irqsave(&info->lock, flags); info->alloced -= pages; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irqrestore(&info->lock, flags); return false; } @@ -288,12 +289,13 @@ void shmem_uncharge(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + unsigned long flags; - spin_lock(&info->lock); + spin_lock_irqsave(&info->lock, flags); info->alloced -= pages; inode->i_blocks -= pages * BLOCKS_PER_PAGE; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irqrestore(&info->lock, flags); if (sbinfo->max_blocks) percpu_counter_sub(&sbinfo->used_blocks, pages); @@ -818,10 +820,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, index++; } - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->swapped -= nr_swaps_freed; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); } void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) @@ -838,9 +840,9 @@ static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry, struct shmem_inode_info *info = SHMEM_I(inode); if (info->alloced - info->swapped != inode->i_mapping->nrpages) { - spin_lock(&info->lock); + spin_lock_irq(&info->lock); shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); } generic_fillattr(inode, stat); return 0; @@ -984,9 +986,9 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, delete_from_swap_cache(*pagep); set_page_dirty(*pagep); if (!error) { - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->swapped--; - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); swap_free(swap); } } @@ -1134,10 +1136,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) list_add_tail(&info->swaplist, &shmem_swaplist); if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { - spin_lock(&info->lock); + spin_lock_irq(&info->lock); shmem_recalc_inode(inode); info->swapped++; - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); swap_shmem_alloc(swap); shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); @@ -1523,10 +1525,10 @@ repeat: mem_cgroup_commit_charge(page, memcg, true, false); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->swapped--; shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); if (sgp == SGP_WRITE) mark_page_accessed(page); @@ -1603,11 +1605,11 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, PageTransHuge(page)); lru_cache_add_anon(page); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); info->alloced += 1 << compound_order(page); inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); alloced = true; /* @@ -1639,9 +1641,9 @@ clear: if (alloced) { ClearPageDirty(page); delete_from_page_cache(page); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); } error = -EINVAL; goto unlock; @@ -1673,9 +1675,9 @@ unlock: } if (error == -ENOSPC && !once++) { info = SHMEM_I(inode); - spin_lock(&info->lock); + spin_lock_irq(&info->lock); shmem_recalc_inode(inode); - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); goto repeat; } if (error == -EEXIST) /* from above or from radix_tree_insert */ @@ -1874,7 +1876,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) struct shmem_inode_info *info = SHMEM_I(inode); int retval = -ENOMEM; - spin_lock(&info->lock); + spin_lock_irq(&info->lock); if (lock && !(info->flags & VM_LOCKED)) { if (!user_shm_lock(inode->i_size, user)) goto out_nomem; @@ -1889,7 +1891,7 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user) retval = 0; out_nomem: - spin_unlock(&info->lock); + spin_unlock_irq(&info->lock); return retval; } -- cgit v1.2.3-70-g09d2 From f3f0e1d2150b2b99da2cbdfaad000089efe9bf30 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:32 -0700 Subject: khugepaged: add support of collapse for tmpfs/shmem pages This patch extends khugepaged to support collapse of tmpfs/shmem pages. We share fair amount of infrastructure with anon-THP collapse. Few design points: - First we are looking for VMA which can be suitable for mapping huge page; - If the VMA maps shmem file, the rest scan/collapse operations operates on page cache, not on page tables as in anon VMA case. - khugepaged_scan_shmem() finds a range which is suitable for huge page. The scan is lockless and shouldn't disturb system too much. - once the candidate for collapse is found, collapse_shmem() attempts to create a huge page: + scan over radix tree, making the range point to new huge page; + new huge page is not-uptodate, locked and freezed (refcount is 0), so nobody can touch them until we say so. + we swap in pages during the scan. khugepaged_scan_shmem() filters out ranges with more than khugepaged_max_ptes_swap swapped out pages. It's HPAGE_PMD_NR/8 by default. + old pages are isolated, unmapped and put to local list in case to be restored back if collapse failed. - if collapse succeed, we retract pte page tables from VMAs where huge pages mapping is possible. The huge page will be mapped as PMD on next minor fault into the range. Link: http://lkml.kernel.org/r/1466021202-61880-35-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 23 ++ include/trace/events/huge_memory.h | 3 +- mm/khugepaged.c | 435 ++++++++++++++++++++++++++++++++++++- mm/shmem.c | 56 ++++- 4 files changed, 500 insertions(+), 17 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 94eaaa2c6ad9..0890f700a546 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -54,6 +54,7 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); +extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -64,6 +65,19 @@ extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +/* Flag allocation requirements to shmem_getpage */ +enum sgp_type { + SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_CACHE, /* don't exceed i_size, may allocate page */ + SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ + SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ + SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ + SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ +}; + +extern int shmem_getpage(struct inode *inode, pgoff_t index, + struct page **pagep, enum sgp_type sgp); + static inline struct page *shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) { @@ -71,6 +85,15 @@ static inline struct page *shmem_read_mapping_page( mapping_gfp_mask(mapping)); } +static inline bool shmem_file(struct file *file) +{ + if (!IS_ENABLED(CONFIG_SHMEM)) + return false; + if (!file || !file->f_mapping) + return false; + return shmem_mapping(file->f_mapping); +} + extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index bda21183eb05..830d47d5ca41 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -29,7 +29,8 @@ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ - EMe( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") + EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ + EMe(SCAN_TRUNCATED, "truncated") \ #undef EM #undef EMe diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 639047cc6ea7..573e4366d3b9 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -42,7 +43,8 @@ enum scan_result { SCAN_DEL_PAGE_LRU, SCAN_ALLOC_HUGE_PAGE_FAIL, SCAN_CGROUP_CHARGE_FAIL, - SCAN_EXCEED_SWAP_PTE + SCAN_EXCEED_SWAP_PTE, + SCAN_TRUNCATED, }; #define CREATE_TRACE_POINTS @@ -294,7 +296,7 @@ struct attribute_group khugepaged_attr_group = { .name = "khugepaged", }; -#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) +#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -816,6 +818,10 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || (vma->vm_flags & VM_NOHUGEPAGE)) return false; + if (shmem_file(vma->vm_file)) { + return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, + HPAGE_PMD_NR); + } if (!vma->anon_vma || vma->vm_ops) return false; if (is_vma_temporary_stack(vma)) @@ -1216,6 +1222,412 @@ static void collect_mm_slot(struct mm_slot *mm_slot) } } +#ifdef CONFIG_SHMEM +static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) +{ + struct vm_area_struct *vma; + unsigned long addr; + pmd_t *pmd, _pmd; + + i_mmap_lock_write(mapping); + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { + /* probably overkill */ + if (vma->anon_vma) + continue; + addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); + if (addr & ~HPAGE_PMD_MASK) + continue; + if (vma->vm_end < addr + HPAGE_PMD_SIZE) + continue; + pmd = mm_find_pmd(vma->vm_mm, addr); + if (!pmd) + continue; + /* + * We need exclusive mmap_sem to retract page table. + * If trylock fails we would end up with pte-mapped THP after + * re-fault. Not ideal, but it's more important to not disturb + * the system too much. + */ + if (down_write_trylock(&vma->vm_mm->mmap_sem)) { + spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); + /* assume page table is clear */ + _pmd = pmdp_collapse_flush(vma, addr, pmd); + spin_unlock(ptl); + up_write(&vma->vm_mm->mmap_sem); + atomic_long_dec(&vma->vm_mm->nr_ptes); + pte_free(vma->vm_mm, pmd_pgtable(_pmd)); + } + } + i_mmap_unlock_write(mapping); +} + +/** + * collapse_shmem - collapse small tmpfs/shmem pages into huge one. + * + * Basic scheme is simple, details are more complex: + * - allocate and freeze a new huge page; + * - scan over radix tree replacing old pages the new one + * + swap in pages if necessary; + * + fill in gaps; + * + keep old pages around in case if rollback is required; + * - if replacing succeed: + * + copy data over; + * + free old pages; + * + unfreeze huge page; + * - if replacing failed; + * + put all pages back and unfreeze them; + * + restore gaps in the radix-tree; + * + free huge page; + */ +static void collapse_shmem(struct mm_struct *mm, + struct address_space *mapping, pgoff_t start, + struct page **hpage, int node) +{ + gfp_t gfp; + struct page *page, *new_page, *tmp; + struct mem_cgroup *memcg; + pgoff_t index, end = start + HPAGE_PMD_NR; + LIST_HEAD(pagelist); + struct radix_tree_iter iter; + void **slot; + int nr_none = 0, result = SCAN_SUCCEED; + + VM_BUG_ON(start & (HPAGE_PMD_NR - 1)); + + /* Only allocate from the target node */ + gfp = alloc_hugepage_khugepaged_gfpmask() | + __GFP_OTHER_NODE | __GFP_THISNODE; + + new_page = khugepaged_alloc_page(hpage, gfp, node); + if (!new_page) { + result = SCAN_ALLOC_HUGE_PAGE_FAIL; + goto out; + } + + if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { + result = SCAN_CGROUP_CHARGE_FAIL; + goto out; + } + + new_page->index = start; + new_page->mapping = mapping; + __SetPageSwapBacked(new_page); + __SetPageLocked(new_page); + BUG_ON(!page_ref_freeze(new_page, 1)); + + + /* + * At this point the new_page is 'frozen' (page_count() is zero), locked + * and not up-to-date. It's safe to insert it into radix tree, because + * nobody would be able to map it or use it in other way until we + * unfreeze it. + */ + + index = start; + spin_lock_irq(&mapping->tree_lock); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { + int n = min(iter.index, end) - index; + + /* + * Handle holes in the radix tree: charge it from shmem and + * insert relevant subpage of new_page into the radix-tree. + */ + if (n && !shmem_charge(mapping->host, n)) { + result = SCAN_FAIL; + break; + } + nr_none += n; + for (; index < min(iter.index, end); index++) { + radix_tree_insert(&mapping->page_tree, index, + new_page + (index % HPAGE_PMD_NR)); + } + + /* We are done. */ + if (index >= end) + break; + + page = radix_tree_deref_slot_protected(slot, + &mapping->tree_lock); + if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) { + spin_unlock_irq(&mapping->tree_lock); + /* swap in or instantiate fallocated page */ + if (shmem_getpage(mapping->host, index, &page, + SGP_NOHUGE)) { + result = SCAN_FAIL; + goto tree_unlocked; + } + spin_lock_irq(&mapping->tree_lock); + } else if (trylock_page(page)) { + get_page(page); + } else { + result = SCAN_PAGE_LOCK; + break; + } + + /* + * The page must be locked, so we can drop the tree_lock + * without racing with truncate. + */ + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(!PageUptodate(page), page); + VM_BUG_ON_PAGE(PageTransCompound(page), page); + + if (page_mapping(page) != mapping) { + result = SCAN_TRUNCATED; + goto out_unlock; + } + spin_unlock_irq(&mapping->tree_lock); + + if (isolate_lru_page(page)) { + result = SCAN_DEL_PAGE_LRU; + goto out_isolate_failed; + } + + if (page_mapped(page)) + unmap_mapping_range(mapping, index << PAGE_SHIFT, + PAGE_SIZE, 0); + + spin_lock_irq(&mapping->tree_lock); + + VM_BUG_ON_PAGE(page_mapped(page), page); + + /* + * The page is expected to have page_count() == 3: + * - we hold a pin on it; + * - one reference from radix tree; + * - one from isolate_lru_page; + */ + if (!page_ref_freeze(page, 3)) { + result = SCAN_PAGE_COUNT; + goto out_lru; + } + + /* + * Add the page to the list to be able to undo the collapse if + * something go wrong. + */ + list_add_tail(&page->lru, &pagelist); + + /* Finally, replace with the new page. */ + radix_tree_replace_slot(slot, + new_page + (index % HPAGE_PMD_NR)); + + index++; + continue; +out_lru: + spin_unlock_irq(&mapping->tree_lock); + putback_lru_page(page); +out_isolate_failed: + unlock_page(page); + put_page(page); + goto tree_unlocked; +out_unlock: + unlock_page(page); + put_page(page); + break; + } + + /* + * Handle hole in radix tree at the end of the range. + * This code only triggers if there's nothing in radix tree + * beyond 'end'. + */ + if (result == SCAN_SUCCEED && index < end) { + int n = end - index; + + if (!shmem_charge(mapping->host, n)) { + result = SCAN_FAIL; + goto tree_locked; + } + + for (; index < end; index++) { + radix_tree_insert(&mapping->page_tree, index, + new_page + (index % HPAGE_PMD_NR)); + } + nr_none += n; + } + +tree_locked: + spin_unlock_irq(&mapping->tree_lock); +tree_unlocked: + + if (result == SCAN_SUCCEED) { + unsigned long flags; + struct zone *zone = page_zone(new_page); + + /* + * Replacing old pages with new one has succeed, now we need to + * copy the content and free old pages. + */ + list_for_each_entry_safe(page, tmp, &pagelist, lru) { + copy_highpage(new_page + (page->index % HPAGE_PMD_NR), + page); + list_del(&page->lru); + unlock_page(page); + page_ref_unfreeze(page, 1); + page->mapping = NULL; + ClearPageActive(page); + ClearPageUnevictable(page); + put_page(page); + } + + local_irq_save(flags); + __inc_zone_page_state(new_page, NR_SHMEM_THPS); + if (nr_none) { + __mod_zone_page_state(zone, NR_FILE_PAGES, nr_none); + __mod_zone_page_state(zone, NR_SHMEM, nr_none); + } + local_irq_restore(flags); + + /* + * Remove pte page tables, so we can re-faulti + * the page as huge. + */ + retract_page_tables(mapping, start); + + /* Everything is ready, let's unfreeze the new_page */ + set_page_dirty(new_page); + SetPageUptodate(new_page); + page_ref_unfreeze(new_page, HPAGE_PMD_NR); + mem_cgroup_commit_charge(new_page, memcg, false, true); + lru_cache_add_anon(new_page); + unlock_page(new_page); + + *hpage = NULL; + } else { + /* Something went wrong: rollback changes to the radix-tree */ + shmem_uncharge(mapping->host, nr_none); + spin_lock_irq(&mapping->tree_lock); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, + start) { + if (iter.index >= end) + break; + page = list_first_entry_or_null(&pagelist, + struct page, lru); + if (!page || iter.index < page->index) { + if (!nr_none) + break; + /* Put holes back where they were */ + radix_tree_replace_slot(slot, NULL); + nr_none--; + continue; + } + + VM_BUG_ON_PAGE(page->index != iter.index, page); + + /* Unfreeze the page. */ + list_del(&page->lru); + page_ref_unfreeze(page, 2); + radix_tree_replace_slot(slot, page); + spin_unlock_irq(&mapping->tree_lock); + putback_lru_page(page); + unlock_page(page); + spin_lock_irq(&mapping->tree_lock); + } + VM_BUG_ON(nr_none); + spin_unlock_irq(&mapping->tree_lock); + + /* Unfreeze new_page, caller would take care about freeing it */ + page_ref_unfreeze(new_page, 1); + mem_cgroup_cancel_charge(new_page, memcg, true); + unlock_page(new_page); + new_page->mapping = NULL; + } +out: + VM_BUG_ON(!list_empty(&pagelist)); + /* TODO: tracepoints */ +} + +static void khugepaged_scan_shmem(struct mm_struct *mm, + struct address_space *mapping, + pgoff_t start, struct page **hpage) +{ + struct page *page = NULL; + struct radix_tree_iter iter; + void **slot; + int present, swap; + int node = NUMA_NO_NODE; + int result = SCAN_SUCCEED; + + present = 0; + swap = 0; + memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { + if (iter.index >= start + HPAGE_PMD_NR) + break; + + page = radix_tree_deref_slot(slot); + if (radix_tree_deref_retry(page)) { + slot = radix_tree_iter_retry(&iter); + continue; + } + + if (radix_tree_exception(page)) { + if (++swap > khugepaged_max_ptes_swap) { + result = SCAN_EXCEED_SWAP_PTE; + break; + } + continue; + } + + if (PageTransCompound(page)) { + result = SCAN_PAGE_COMPOUND; + break; + } + + node = page_to_nid(page); + if (khugepaged_scan_abort(node)) { + result = SCAN_SCAN_ABORT; + break; + } + khugepaged_node_load[node]++; + + if (!PageLRU(page)) { + result = SCAN_PAGE_LRU; + break; + } + + if (page_count(page) != 1 + page_mapcount(page)) { + result = SCAN_PAGE_COUNT; + break; + } + + /* + * We probably should check if the page is referenced here, but + * nobody would transfer pte_young() to PageReferenced() for us. + * And rmap walk here is just too costly... + */ + + present++; + + if (need_resched()) { + cond_resched_rcu(); + slot = radix_tree_iter_next(&iter); + } + } + rcu_read_unlock(); + + if (result == SCAN_SUCCEED) { + if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) { + result = SCAN_EXCEED_NONE_PTE; + } else { + node = khugepaged_find_target_node(); + collapse_shmem(mm, mapping, start, hpage, node); + } + } + + /* TODO: tracepoints */ +} +#else +static void khugepaged_scan_shmem(struct mm_struct *mm, + struct address_space *mapping, + pgoff_t start, struct page **hpage) +{ + BUILD_BUG(); +} +#endif + static unsigned int khugepaged_scan_mm_slot(unsigned int pages, struct page **hpage) __releases(&khugepaged_mm_lock) @@ -1269,6 +1681,8 @@ skip: if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) + goto skip; while (khugepaged_scan.address < hend) { int ret; @@ -1279,9 +1693,20 @@ skip: VM_BUG_ON(khugepaged_scan.address < hstart || khugepaged_scan.address + HPAGE_PMD_SIZE > hend); - ret = khugepaged_scan_pmd(mm, vma, - khugepaged_scan.address, - hpage); + if (shmem_file(vma->vm_file)) { + struct file *file = get_file(vma->vm_file); + pgoff_t pgoff = linear_page_index(vma, + khugepaged_scan.address); + up_read(&mm->mmap_sem); + ret = 1; + khugepaged_scan_shmem(mm, file->f_mapping, + pgoff, hpage); + fput(file); + } else { + ret = khugepaged_scan_pmd(mm, vma, + khugepaged_scan.address, + hpage); + } /* move to next address */ khugepaged_scan.address += HPAGE_PMD_SIZE; progress += HPAGE_PMD_NR; diff --git a/mm/shmem.c b/mm/shmem.c index 03eb915c82eb..905c0e1cf5af 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -32,6 +32,7 @@ #include #include #include +#include static struct vfsmount *shm_mnt; @@ -97,16 +98,6 @@ struct shmem_falloc { pgoff_t nr_unswapped; /* how often writepage refused to swap out */ }; -/* Flag allocation requirements to shmem_getpage */ -enum sgp_type { - SGP_READ, /* don't exceed i_size, don't allocate page */ - SGP_CACHE, /* don't exceed i_size, may allocate page */ - SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ - SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ - SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ - SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ -}; - #ifdef CONFIG_TMPFS static unsigned long shmem_default_max_blocks(void) { @@ -126,7 +117,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, struct mm_struct *fault_mm, int *fault_type); -static inline int shmem_getpage(struct inode *inode, pgoff_t index, +int shmem_getpage(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp) { return shmem_getpage_gfp(inode, index, pagep, sgp, @@ -1899,6 +1890,11 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &shmem_vm_ops; + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK)) { + khugepaged_enter(vma, vma->vm_flags); + } return 0; } @@ -3803,6 +3799,37 @@ static ssize_t shmem_enabled_store(struct kobject *kobj, struct kobj_attribute shmem_enabled_attr = __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); + +bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + struct inode *inode = file_inode(vma->vm_file); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); + loff_t i_size; + pgoff_t off; + + if (shmem_huge == SHMEM_HUGE_FORCE) + return true; + if (shmem_huge == SHMEM_HUGE_DENY) + return false; + switch (sbinfo->huge) { + case SHMEM_HUGE_NEVER: + return false; + case SHMEM_HUGE_ALWAYS: + return true; + case SHMEM_HUGE_WITHIN_SIZE: + off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); + i_size = round_up(i_size_read(inode), PAGE_SIZE); + if (i_size >= HPAGE_PMD_SIZE && + i_size >> PAGE_SHIFT >= off) + return true; + case SHMEM_HUGE_ADVISE: + /* TODO: implement fadvise() hints */ + return (vma->vm_flags & VM_HUGEPAGE); + default: + VM_BUG_ON(1); + return false; + } +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ #else /* !CONFIG_SHMEM */ @@ -3982,6 +4009,13 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < + (vma->vm_end & HPAGE_PMD_MASK)) { + khugepaged_enter(vma, vma->vm_flags); + } + return 0; } -- cgit v1.2.3-70-g09d2 From e496cf3d782135c1cca0d154d4b924517ff58de0 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:35 -0700 Subject: thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE For file mappings, we don't deposit page tables on THP allocation because it's not strictly required to implement split_huge_pmd(): we can just clear pmd and let following page faults to reconstruct the page table. But Power makes use of deposited page table to address MMU quirk. Let's hide THP page cache, including huge tmpfs, under separate config option, so it can be forbidden on Power. We can revert the patch later once solution for Power found. Link: http://lkml.kernel.org/r/1466021202-61880-36-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Cc: Aneesh Kumar K.V Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 10 +++++++++- mm/Kconfig | 8 ++++++++ mm/huge_memory.c | 2 +- mm/khugepaged.c | 11 +++++++---- mm/memory.c | 5 +++-- mm/shmem.c | 26 +++++++++++++------------- 6 files changed, 41 insertions(+), 21 deletions(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 0890f700a546..54fa28dfbd89 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -54,7 +54,6 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); extern int shmem_lock(struct file *file, int lock, struct user_struct *user); extern bool shmem_mapping(struct address_space *mapping); -extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern void shmem_unlock_mapping(struct address_space *mapping); extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); @@ -112,4 +111,13 @@ static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) #endif +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE +extern bool shmem_huge_enabled(struct vm_area_struct *vma); +#else +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return false; +} +#endif + #endif diff --git a/mm/Kconfig b/mm/Kconfig index 3e2daef3c946..3c81803b00a3 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -439,6 +439,14 @@ choice benefit. endchoice +# +# We don't deposit page tables on file THP mapping, +# but Power makes use of them to address MMU quirk. +# +config TRANSPARENT_HUGE_PAGECACHE + def_bool y + depends on TRANSPARENT_HUGEPAGE && !PPC + # # UP and nommu archs use km based percpu allocator # diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2706182787d8..d3abbf249fa0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -287,7 +287,7 @@ static struct attribute *hugepage_attr[] = { &enabled_attr.attr, &defrag_attr.attr, &use_zero_page_attr.attr, -#ifdef CONFIG_SHMEM +#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &shmem_enabled_attr.attr, #endif #ifdef CONFIG_DEBUG_VM diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 573e4366d3b9..93d5f87c00d5 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -819,6 +819,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) (vma->vm_flags & VM_NOHUGEPAGE)) return false; if (shmem_file(vma->vm_file)) { + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) + return false; return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, HPAGE_PMD_NR); } @@ -1222,7 +1224,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot) } } -#ifdef CONFIG_SHMEM +#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) { struct vm_area_struct *vma; @@ -1681,8 +1683,6 @@ skip: if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); - if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) - goto skip; while (khugepaged_scan.address < hend) { int ret; @@ -1694,9 +1694,12 @@ skip: khugepaged_scan.address + HPAGE_PMD_SIZE > hend); if (shmem_file(vma->vm_file)) { - struct file *file = get_file(vma->vm_file); + struct file *file; pgoff_t pgoff = linear_page_index(vma, khugepaged_scan.address); + if (!shmem_huge_enabled(vma)) + goto skip; + file = get_file(vma->vm_file); up_read(&mm->mmap_sem); ret = 1; khugepaged_scan_shmem(mm, file->f_mapping, diff --git a/mm/memory.c b/mm/memory.c index 712790e95f08..4425b6059339 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2920,7 +2920,7 @@ map_pte: return 0; } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1) static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, @@ -3002,7 +3002,8 @@ int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg, pte_t entry; int ret; - if (pmd_none(*fe->pmd) && PageTransCompound(page)) { + if (pmd_none(*fe->pmd) && PageTransCompound(page) && + IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { /* THP on COW? */ VM_BUG_ON_PAGE(memcg, page); diff --git a/mm/shmem.c b/mm/shmem.c index 905c0e1cf5af..25f55a3a93b3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -363,7 +363,7 @@ static bool shmem_confirm_swap(struct address_space *mapping, #define SHMEM_HUGE_DENY (-1) #define SHMEM_HUGE_FORCE (-2) -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE /* ifdef here to avoid bloating shmem.o when not necessary */ int shmem_huge __read_mostly; @@ -406,11 +406,11 @@ static const char *shmem_format_huge(int huge) } } -#else /* !CONFIG_TRANSPARENT_HUGEPAGE */ +#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ #define shmem_huge SHMEM_HUGE_DENY -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ /* * Like add_to_page_cache_locked, but error if expected item has gone. @@ -1229,7 +1229,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp, void __rcu **results; struct page *page; - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) return NULL; rcu_read_lock(); @@ -1270,7 +1270,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp, int nr; int err = -ENOSPC; - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) huge = false; nr = huge ? HPAGE_PMD_NR : 1; @@ -1773,7 +1773,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, get_area = current->mm->get_unmapped_area; addr = get_area(file, uaddr, len, pgoff, flags); - if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) return addr; if (IS_ERR_VALUE(addr)) return addr; @@ -1890,7 +1890,7 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < (vma->vm_end & HPAGE_PMD_MASK)) { khugepaged_enter(vma, vma->vm_flags); @@ -3287,7 +3287,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, sbinfo->gid = make_kgid(current_user_ns(), gid); if (!gid_valid(sbinfo->gid)) goto bad_val; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE } else if (!strcmp(this_char, "huge")) { int huge; huge = shmem_parse_huge(value); @@ -3384,7 +3384,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ if (sbinfo->huge) seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); @@ -3730,7 +3730,7 @@ int __init shmem_init(void) goto out1; } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY) SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; else @@ -3747,7 +3747,7 @@ out3: return error; } -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) +#if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) static ssize_t shmem_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -3830,7 +3830,7 @@ bool shmem_huge_enabled(struct vm_area_struct *vma) return false; } } -#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ +#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ #else /* !CONFIG_SHMEM */ @@ -4010,7 +4010,7 @@ int shmem_zero_setup(struct vm_area_struct *vma) vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; - if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < (vma->vm_end & HPAGE_PMD_MASK)) { khugepaged_enter(vma, vma->vm_flags); -- cgit v1.2.3-70-g09d2 From 779750d20b93bb2e0c75dfe924f31b02f6a78bfa Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:38 -0700 Subject: shmem: split huge pages beyond i_size under memory pressure Even if user asked to allocate huge pages always (huge=always), we should be able to free up some memory by splitting pages which are partly byound i_size if memory presure comes or once we hit limit on filesystem size (-o size=). In order to do this we maintain per-superblock list of inodes, which potentially have huge pages on the border of file size. Per-fs shrinker can reclaim memory by splitting such pages. If we hit -ENOSPC during shmem_getpage_gfp(), we try to split a page to free up space on the filesystem and retry allocation if it succeed. Link: http://lkml.kernel.org/r/1466021202-61880-37-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/shmem_fs.h | 6 +- mm/shmem.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+), 1 deletion(-) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 54fa28dfbd89..ff078e7043b6 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -16,8 +16,9 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ - struct shared_policy policy; /* NUMA memory alloc policy */ + struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ + struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ struct inode vfs_inode; }; @@ -33,6 +34,9 @@ struct shmem_sb_info { kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ struct mempolicy *mpol; /* default memory policy for mappings */ + spinlock_t shrinklist_lock; /* Protects shrinklist */ + struct list_head shrinklist; /* List of shinkable inodes */ + unsigned long shrinklist_len; /* Length of shrinklist */ }; static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) diff --git a/mm/shmem.c b/mm/shmem.c index 25f55a3a93b3..62e42c7d544c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -188,6 +188,7 @@ static const struct inode_operations shmem_inode_operations; static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; static const struct vm_operations_struct shmem_vm_ops; +static struct file_system_type shmem_fs_type; static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex); @@ -406,10 +407,122 @@ static const char *shmem_format_huge(int huge) } } +static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, + struct shrink_control *sc, unsigned long nr_to_split) +{ + LIST_HEAD(list), *pos, *next; + struct inode *inode; + struct shmem_inode_info *info; + struct page *page; + unsigned long batch = sc ? sc->nr_to_scan : 128; + int removed = 0, split = 0; + + if (list_empty(&sbinfo->shrinklist)) + return SHRINK_STOP; + + spin_lock(&sbinfo->shrinklist_lock); + list_for_each_safe(pos, next, &sbinfo->shrinklist) { + info = list_entry(pos, struct shmem_inode_info, shrinklist); + + /* pin the inode */ + inode = igrab(&info->vfs_inode); + + /* inode is about to be evicted */ + if (!inode) { + list_del_init(&info->shrinklist); + removed++; + goto next; + } + + /* Check if there's anything to gain */ + if (round_up(inode->i_size, PAGE_SIZE) == + round_up(inode->i_size, HPAGE_PMD_SIZE)) { + list_del_init(&info->shrinklist); + removed++; + iput(inode); + goto next; + } + + list_move(&info->shrinklist, &list); +next: + if (!--batch) + break; + } + spin_unlock(&sbinfo->shrinklist_lock); + + list_for_each_safe(pos, next, &list) { + int ret; + + info = list_entry(pos, struct shmem_inode_info, shrinklist); + inode = &info->vfs_inode; + + if (nr_to_split && split >= nr_to_split) { + iput(inode); + continue; + } + + page = find_lock_page(inode->i_mapping, + (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); + if (!page) + goto drop; + + if (!PageTransHuge(page)) { + unlock_page(page); + put_page(page); + goto drop; + } + + ret = split_huge_page(page); + unlock_page(page); + put_page(page); + + if (ret) { + /* split failed: leave it on the list */ + iput(inode); + continue; + } + + split++; +drop: + list_del_init(&info->shrinklist); + removed++; + iput(inode); + } + + spin_lock(&sbinfo->shrinklist_lock); + list_splice_tail(&list, &sbinfo->shrinklist); + sbinfo->shrinklist_len -= removed; + spin_unlock(&sbinfo->shrinklist_lock); + + return split; +} + +static long shmem_unused_huge_scan(struct super_block *sb, + struct shrink_control *sc) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + + if (!READ_ONCE(sbinfo->shrinklist_len)) + return SHRINK_STOP; + + return shmem_unused_huge_shrink(sbinfo, sc, 0); +} + +static long shmem_unused_huge_count(struct super_block *sb, + struct shrink_control *sc) +{ + struct shmem_sb_info *sbinfo = SHMEM_SB(sb); + return READ_ONCE(sbinfo->shrinklist_len); +} #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ #define shmem_huge SHMEM_HUGE_DENY +static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, + struct shrink_control *sc, unsigned long nr_to_split) +{ + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ /* @@ -843,6 +956,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); int error; error = inode_change_ok(inode, attr); @@ -878,6 +992,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) if (oldsize > holebegin) unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); + + /* + * Part of the huge page can be beyond i_size: subject + * to shrink under memory pressure. + */ + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { + spin_lock(&sbinfo->shrinklist_lock); + if (list_empty(&info->shrinklist)) { + list_add_tail(&info->shrinklist, + &sbinfo->shrinklist); + sbinfo->shrinklist_len++; + } + spin_unlock(&sbinfo->shrinklist_lock); + } } } @@ -890,11 +1018,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr) static void shmem_evict_inode(struct inode *inode) { struct shmem_inode_info *info = SHMEM_I(inode); + struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); if (inode->i_mapping->a_ops == &shmem_aops) { shmem_unacct_size(info->flags, inode->i_size); inode->i_size = 0; shmem_truncate_range(inode, 0, (loff_t)-1); + if (!list_empty(&info->shrinklist)) { + spin_lock(&sbinfo->shrinklist_lock); + if (!list_empty(&info->shrinklist)) { + list_del_init(&info->shrinklist); + sbinfo->shrinklist_len--; + } + spin_unlock(&sbinfo->shrinklist_lock); + } if (!list_empty(&info->swaplist)) { mutex_lock(&shmem_swaplist_mutex); list_del_init(&info->swaplist); @@ -1563,8 +1700,23 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, index, false); } if (IS_ERR(page)) { + int retry = 5; error = PTR_ERR(page); page = NULL; + if (error != -ENOSPC) + goto failed; + /* + * Try to reclaim some spece by splitting a huge page + * beyond i_size on the filesystem. + */ + while (retry--) { + int ret; + ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); + if (ret == SHRINK_STOP) + break; + if (ret) + goto alloc_nohuge; + } goto failed; } @@ -1603,6 +1755,22 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, spin_unlock_irq(&info->lock); alloced = true; + if (PageTransHuge(page) && + DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < + hindex + HPAGE_PMD_NR - 1) { + /* + * Part of the huge page is beyond i_size: subject + * to shrink under memory pressure. + */ + spin_lock(&sbinfo->shrinklist_lock); + if (list_empty(&info->shrinklist)) { + list_add_tail(&info->shrinklist, + &sbinfo->shrinklist); + sbinfo->shrinklist_len++; + } + spin_unlock(&sbinfo->shrinklist_lock); + } + /* * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. */ @@ -1920,6 +2088,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode spin_lock_init(&info->lock); info->seals = F_SEAL_SEAL; info->flags = flags & VM_NORESERVE; + INIT_LIST_HEAD(&info->shrinklist); INIT_LIST_HEAD(&info->swaplist); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); @@ -3518,6 +3687,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) goto failed; sbinfo->free_inodes = sbinfo->max_inodes; + spin_lock_init(&sbinfo->shrinklist_lock); + INIT_LIST_HEAD(&sbinfo->shrinklist); sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; @@ -3680,6 +3851,10 @@ static const struct super_operations shmem_ops = { .evict_inode = shmem_evict_inode, .drop_inode = generic_delete_inode, .put_super = shmem_put_super, +#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE + .nr_cached_objects = shmem_unused_huge_count, + .free_cached_objects = shmem_unused_huge_scan, +#endif }; static const struct vm_operations_struct shmem_vm_ops = { -- cgit v1.2.3-70-g09d2 From 1b5946a84d6eb096158e535bdb9bda06e7cdd941 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 26 Jul 2016 15:26:40 -0700 Subject: thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Add info about tmpfs/shmem with huge pages. Link: http://lkml.kernel.org/r/1466021202-61880-38-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 9 +++ Documentation/vm/transhuge.txt | 128 ++++++++++++++++++++++++++----------- 2 files changed, 101 insertions(+), 36 deletions(-) diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 5b61eeae3f6e..68080ad6a75e 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -436,6 +436,7 @@ Private_Dirty: 0 kB Referenced: 892 kB Anonymous: 0 kB AnonHugePages: 0 kB +ShmemPmdMapped: 0 kB Shared_Hugetlb: 0 kB Private_Hugetlb: 0 kB Swap: 0 kB @@ -464,6 +465,8 @@ accessed. a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE and a page is modified, the file page is replaced by a private anonymous copy. "AnonHugePages" shows the ammount of memory backed by transparent hugepage. +"ShmemPmdMapped" shows the ammount of shared (shmem/tmpfs) memory backed by +huge pages. "Shared_Hugetlb" and "Private_Hugetlb" show the ammounts of memory backed by hugetlbfs page which is *not* counted in "RSS" or "PSS" field for historical reasons. And these are not included in {Shared,Private}_{Clean,Dirty} field. @@ -868,6 +871,9 @@ VmallocTotal: 112216 kB VmallocUsed: 428 kB VmallocChunk: 111088 kB AnonHugePages: 49152 kB +ShmemHugePages: 0 kB +ShmemPmdMapped: 0 kB + MemTotal: Total usable ram (i.e. physical ram minus a few reserved bits and the kernel binary code) @@ -912,6 +918,9 @@ MemAvailable: An estimate of how much memory is available for starting new AnonHugePages: Non-file backed huge pages mapped into userspace page tables Mapped: files which have been mmaped, such as libraries Shmem: Total memory used by shared memory (shmem) and tmpfs +ShmemHugePages: Memory used by shared memory (shmem) and tmpfs allocated + with huge pages +ShmemPmdMapped: Shared memory mapped into userspace with huge pages Slab: in-kernel data structures cache SReclaimable: Part of Slab, that might be reclaimed, such as caches SUnreclaim: Part of Slab, that cannot be reclaimed on memory pressure diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt index 7c871d6beb63..2ec6adb5a4ce 100644 --- a/Documentation/vm/transhuge.txt +++ b/Documentation/vm/transhuge.txt @@ -9,8 +9,8 @@ using huge pages for the backing of virtual memory with huge pages that supports the automatic promotion and demotion of page sizes and without the shortcomings of hugetlbfs. -Currently it only works for anonymous memory mappings but in the -future it can expand over the pagecache layer starting with tmpfs. +Currently it only works for anonymous memory mappings and tmpfs/shmem. +But in the future it can expand to other filesystems. The reason applications are running faster is because of two factors. The first factor is almost completely irrelevant and it's not @@ -57,10 +57,6 @@ miss is going to run faster. feature that applies to all dynamic high order allocations in the kernel) -- this initial support only offers the feature in the anonymous memory - regions but it'd be ideal to move it to tmpfs and the pagecache - later - Transparent Hugepage Support maximizes the usefulness of free memory if compared to the reservation approach of hugetlbfs by allowing all unused memory to be used as cache or other movable (or even unmovable @@ -94,21 +90,21 @@ madvise(MADV_HUGEPAGE) on their critical mmapped regions. == sysfs == -Transparent Hugepage Support can be entirely disabled (mostly for -debugging purposes) or only enabled inside MADV_HUGEPAGE regions (to -avoid the risk of consuming more memory resources) or enabled system -wide. This can be achieved with one of: +Transparent Hugepage Support for anonymous memory can be entirely disabled +(mostly for debugging purposes) or only enabled inside MADV_HUGEPAGE +regions (to avoid the risk of consuming more memory resources) or enabled +system wide. This can be achieved with one of: echo always >/sys/kernel/mm/transparent_hugepage/enabled echo madvise >/sys/kernel/mm/transparent_hugepage/enabled echo never >/sys/kernel/mm/transparent_hugepage/enabled It's also possible to limit defrag efforts in the VM to generate -hugepages in case they're not immediately free to madvise regions or -to never try to defrag memory and simply fallback to regular pages -unless hugepages are immediately available. Clearly if we spend CPU -time to defrag memory, we would expect to gain even more by the fact -we use hugepages later instead of regular pages. This isn't always +anonymous hugepages in case they're not immediately free to madvise +regions or to never try to defrag memory and simply fallback to regular +pages unless hugepages are immediately available. Clearly if we spend CPU +time to defrag memory, we would expect to gain even more by the fact we +use hugepages later instead of regular pages. This isn't always guaranteed, but it may be more likely in case the allocation is for a MADV_HUGEPAGE region. @@ -133,9 +129,9 @@ that are have used madvise(MADV_HUGEPAGE). This is the default behaviour. "never" should be self-explanatory. -By default kernel tries to use huge zero page on read page fault. -It's possible to disable huge zero page by writing 0 or enable it -back by writing 1: +By default kernel tries to use huge zero page on read page fault to +anonymous mapping. It's possible to disable huge zero page by writing 0 +or enable it back by writing 1: echo 0 >/sys/kernel/mm/transparent_hugepage/use_zero_page echo 1 >/sys/kernel/mm/transparent_hugepage/use_zero_page @@ -204,21 +200,67 @@ Support by passing the parameter "transparent_hugepage=always" or "transparent_hugepage=madvise" or "transparent_hugepage=never" (without "") to the kernel command line. +== Hugepages in tmpfs/shmem == + +You can control hugepage allocation policy in tmpfs with mount option +"huge=". It can have following values: + + - "always": + Attempt to allocate huge pages every time we need a new page; + + - "never": + Do not allocate huge pages; + + - "within_size": + Only allocate huge page if it will be fully within i_size. + Also respect fadvise()/madvise() hints; + + - "advise: + Only allocate huge pages if requested with fadvise()/madvise(); + +The default policy is "never". + +"mount -o remount,huge= /mountpoint" works fine after mount: remounting +huge=never will not attempt to break up huge pages at all, just stop more +from being allocated. + +There's also sysfs knob to control hugepage allocation policy for internal +shmem mount: /sys/kernel/mm/transparent_hugepage/shmem_enabled. The mount +is used for SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or +MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. + +In addition to policies listed above, shmem_enabled allows two further +values: + + - "deny": + For use in emergencies, to force the huge option off from + all mounts; + - "force": + Force the huge option on for all - very useful for testing; + == Need of application restart == -The transparent_hugepage/enabled values only affect future -behavior. So to make them effective you need to restart any -application that could have been using hugepages. This also applies to -the regions registered in khugepaged. +The transparent_hugepage/enabled values and tmpfs mount option only affect +future behavior. So to make them effective you need to restart any +application that could have been using hugepages. This also applies to the +regions registered in khugepaged. == Monitoring usage == -The number of transparent huge pages currently used by the system is -available by reading the AnonHugePages field in /proc/meminfo. To -identify what applications are using transparent huge pages, it is -necessary to read /proc/PID/smaps and count the AnonHugePages fields -for each mapping. Note that reading the smaps file is expensive and -reading it frequently will incur overhead. +The number of anonymous transparent huge pages currently used by the +system is available by reading the AnonHugePages field in /proc/meminfo. +To identify what applications are using anonymous transparent huge pages, +it is necessary to read /proc/PID/smaps and count the AnonHugePages fields +for each mapping. + +The number of file transparent huge pages mapped to userspace is available +by reading ShmemPmdMapped and ShmemHugePages fields in /proc/meminfo. +To identify what applications are mapping file transparent huge pages, it +is necessary to read /proc/PID/smaps and count the FileHugeMapped fields +for each mapping. + +Note that reading the smaps file is expensive and reading it +frequently will incur overhead. There are a number of counters in /proc/vmstat that may be used to monitor how successfully the system is providing huge pages for use. @@ -238,6 +280,12 @@ thp_collapse_alloc_failed is incremented if khugepaged found a range of pages that should be collapsed into one huge page but failed the allocation. +thp_file_alloc is incremented every time a file huge page is successfully +i allocated. + +thp_file_mapped is incremented every time a file huge page is mapped into + user address space. + thp_split_page is incremented every time a huge page is split into base pages. This can happen for a variety of reasons but a common reason is that a huge page is old and is being reclaimed. @@ -403,19 +451,27 @@ pages: on relevant sub-page of the compound page. - map/unmap of the whole compound page accounted in compound_mapcount - (stored in first tail page). + (stored in first tail page). For file huge pages, we also increment + ->_mapcount of all sub-pages in order to have race-free detection of + last unmap of subpages. -PageDoubleMap() indicates that ->_mapcount in all subpages is offset up by one. -This additional reference is required to get race-free detection of unmap of -subpages when we have them mapped with both PMDs and PTEs. +PageDoubleMap() indicates that the page is *possibly* mapped with PTEs. + +For anonymous pages PageDoubleMap() also indicates ->_mapcount in all +subpages is offset up by one. This additional reference is required to +get race-free detection of unmap of subpages when we have them mapped with +both PMDs and PTEs. This is optimization required to lower overhead of per-subpage mapcount tracking. The alternative is alter ->_mapcount in all subpages on each map/unmap of the whole compound page. -We set PG_double_map when a PMD of the page got split for the first time, -but still have PMD mapping. The additional references go away with last -compound_mapcount. +For anonymous pages, we set PG_double_map when a PMD of the page got split +for the first time, but still have PMD mapping. The additional references +go away with last compound_mapcount. + +File pages get PG_double_map set on first map of the page with PTE and +goes away when the page gets evicted from page cache. split_huge_page internally has to distribute the refcounts in the head page to the tail pages before clearing all PG_head/tail bits from the page @@ -427,7 +483,7 @@ sum of mapcount of all sub-pages plus one (split_huge_page caller must have reference for head page). split_huge_page uses migration entries to stabilize page->_refcount and -page->_mapcount. +page->_mapcount of anonymous pages. File pages just got unmapped. We safe against physical memory scanners too: the only legitimate way scanner can get reference to a page is get_page_unless_zero(). -- cgit v1.2.3-70-g09d2 From 47f863ea220067f5c87096893876f44500fcc8c9 Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:26:43 -0700 Subject: mm, thp: fix comment inconsistency for swapin readahead functions After fixing swapin issues, comment lines stayed as in old version. This patch updates the comments. Link: http://lkml.kernel.org/r/1468109345-32258-1-git-send-email-ebru.akagunduz@gmail.com Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Johannes Weiner Cc: Michal Hocko Cc: Boaz Harrosh Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/khugepaged.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 93d5f87c00d5..566148489e33 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -891,9 +891,10 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); - /* vma is no longer available, don't continue to swapin */ - if (hugepage_vma_revalidate(mm, address)) + if (hugepage_vma_revalidate(mm, address)) { + /* vma is no longer available, don't continue to swapin */ return false; + } /* check if the pmd is still valid */ if (mm_find_pmd(mm, address) != pmd) return false; @@ -969,7 +970,7 @@ static void collapse_huge_page(struct mm_struct *mm, /* * __collapse_huge_page_swapin always returns with mmap_sem locked. - * If it fails, release mmap_sem and jump directly out. + * If it fails, we release mmap_sem and jump out_nolock. * Continuing to collapse causes inconsistency. */ if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { -- cgit v1.2.3-70-g09d2 From 0db501f7a34c11d3b964205e5b6d00692a648035 Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:26:46 -0700 Subject: mm, thp: convert from optimistic swapin collapsing to conservative To detect whether khugepaged swapin is worthwhile, this patch checks the amount of young pages. There should be at least half of HPAGE_PMD_NR to swapin. Link: http://lkml.kernel.org/r/1468109451-1615-1-git-send-email-ebru.akagunduz@gmail.com Signed-off-by: Ebru Akagunduz Suggested-by: Minchan Kim Acked-by: Rik van Riel Cc: Hugh Dickins Cc: Kirill A. Shutemov Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Johannes Weiner Cc: Michal Hocko Cc: Boaz Harrosh Cc: Hillf Danton Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/trace/events/huge_memory.h | 19 +++++++++++-------- mm/khugepaged.c | 38 +++++++++++++++++++++++--------------- 2 files changed, 34 insertions(+), 23 deletions(-) diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 830d47d5ca41..04f58acda8e8 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -13,7 +13,7 @@ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ EM( SCAN_PAGE_RO, "no_writable_page") \ - EM( SCAN_NO_REFERENCED_PAGE, "no_referenced_page") \ + EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ EM( SCAN_PAGE_NULL, "page_null") \ EM( SCAN_SCAN_ABORT, "scan_aborted") \ EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ @@ -47,7 +47,7 @@ SCAN_STATUS TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, - bool referenced, int none_or_zero, int status, int unmapped), + int referenced, int none_or_zero, int status, int unmapped), TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), @@ -55,7 +55,7 @@ TRACE_EVENT(mm_khugepaged_scan_pmd, __field(struct mm_struct *, mm) __field(unsigned long, pfn) __field(bool, writable) - __field(bool, referenced) + __field(int, referenced) __field(int, none_or_zero) __field(int, status) __field(int, unmapped) @@ -108,14 +108,14 @@ TRACE_EVENT(mm_collapse_huge_page, TRACE_EVENT(mm_collapse_huge_page_isolate, TP_PROTO(struct page *page, int none_or_zero, - bool referenced, bool writable, int status), + int referenced, bool writable, int status), TP_ARGS(page, none_or_zero, referenced, writable, status), TP_STRUCT__entry( __field(unsigned long, pfn) __field(int, none_or_zero) - __field(bool, referenced) + __field(int, referenced) __field(bool, writable) __field(int, status) ), @@ -138,25 +138,28 @@ TRACE_EVENT(mm_collapse_huge_page_isolate, TRACE_EVENT(mm_collapse_huge_page_swapin, - TP_PROTO(struct mm_struct *mm, int swapped_in, int ret), + TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret), - TP_ARGS(mm, swapped_in, ret), + TP_ARGS(mm, swapped_in, referenced, ret), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, swapped_in) + __field(int, referenced) __field(int, ret) ), TP_fast_assign( __entry->mm = mm; __entry->swapped_in = swapped_in; + __entry->referenced = referenced; __entry->ret = ret; ), - TP_printk("mm=%p, swapped_in=%d, ret=%d", + TP_printk("mm=%p, swapped_in=%d, referenced=%d, ret=%d", __entry->mm, __entry->swapped_in, + __entry->referenced, __entry->ret) ); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 566148489e33..7dbee698d6aa 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -27,7 +27,7 @@ enum scan_result { SCAN_EXCEED_NONE_PTE, SCAN_PTE_NON_PRESENT, SCAN_PAGE_RO, - SCAN_NO_REFERENCED_PAGE, + SCAN_LACK_REFERENCED_PAGE, SCAN_PAGE_NULL, SCAN_SCAN_ABORT, SCAN_PAGE_COUNT, @@ -500,8 +500,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, { struct page *page = NULL; pte_t *_pte; - int none_or_zero = 0, result = 0; - bool referenced = false, writable = false; + int none_or_zero = 0, result = 0, referenced = 0; + bool writable = false; for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { @@ -580,11 +580,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); - /* If there is no mapped pte young don't collapse the page */ + /* There should be enough young pte to collapse the page */ if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; + referenced++; } if (likely(writable)) { if (likely(referenced)) { @@ -869,7 +869,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address) static bool __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, pmd_t *pmd) + unsigned long address, pmd_t *pmd, + int referenced) { pte_t pteval; int swapped_in = 0, ret = 0; @@ -887,12 +888,19 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, if (!is_swap_pte(pteval)) continue; swapped_in++; + /* we only decide to swapin, if there is enough young ptes */ + if (referenced < HPAGE_PMD_NR/2) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); + return false; + } ret = do_swap_page(&fe, pteval); + /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */ if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (hugepage_vma_revalidate(mm, address)) { /* vma is no longer available, don't continue to swapin */ + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; } /* check if the pmd is still valid */ @@ -900,7 +908,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, return false; } if (ret & VM_FAULT_ERROR) { - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); return false; } /* pte is unmapped now, we need to map it */ @@ -908,7 +916,7 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm, } fe.pte--; pte_unmap(fe.pte); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); return true; } @@ -916,7 +924,7 @@ static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, - int node) + int node, int referenced) { pmd_t *pmd, _pmd; pte_t *pte; @@ -973,7 +981,7 @@ static void collapse_huge_page(struct mm_struct *mm, * If it fails, we release mmap_sem and jump out_nolock. * Continuing to collapse causes inconsistency. */ - if (!__collapse_huge_page_swapin(mm, vma, address, pmd)) { + if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { mem_cgroup_cancel_charge(new_page, memcg, true); up_read(&mm->mmap_sem); goto out_nolock; @@ -1084,12 +1092,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, { pmd_t *pmd; pte_t *pte, *_pte; - int ret = 0, none_or_zero = 0, result = 0; + int ret = 0, none_or_zero = 0, result = 0, referenced = 0; struct page *page = NULL; unsigned long _address; spinlock_t *ptl; int node = NUMA_NO_NODE, unmapped = 0; - bool writable = false, referenced = false; + bool writable = false; VM_BUG_ON(address & ~HPAGE_PMD_MASK); @@ -1177,14 +1185,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, if (pte_young(pteval) || page_is_young(page) || PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm, address)) - referenced = true; + referenced++; } if (writable) { if (referenced) { result = SCAN_SUCCEED; ret = 1; } else { - result = SCAN_NO_REFERENCED_PAGE; + result = SCAN_LACK_REFERENCED_PAGE; } } else { result = SCAN_PAGE_RO; @@ -1194,7 +1202,7 @@ out_unmap: if (ret) { node = khugepaged_find_target_node(); /* collapse_huge_page will return with the mmap_sem released */ - collapse_huge_page(mm, address, hpage, vma, node); + collapse_huge_page(mm, address, hpage, vma, node, referenced); } out: trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, -- cgit v1.2.3-70-g09d2 From dd4123f324bbaec7618b677b7bce2b11aee9594e Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 26 Jul 2016 15:26:50 -0700 Subject: mm: fix build warnings in Randy reported below build error. > In file included from ../include/linux/balloon_compaction.h:48:0, > from ../mm/balloon_compaction.c:11: > ../include/linux/compaction.h:237:51: warning: 'struct node' declared inside parameter list [enabled by default] > static inline int compaction_register_node(struct node *node) > ../include/linux/compaction.h:237:51: warning: its scope is only this definition or declaration, which is probably not what you want [enabled by default] > ../include/linux/compaction.h:242:54: warning: 'struct node' declared inside parameter list [enabled by default] > static inline void compaction_unregister_node(struct node *node) > It was caused by non-lru page migration which needs compaction.h but compaction.h doesn't include any header to be standalone. I think proper header for non-lru page migration is migrate.h rather than compaction.h because migrate.h has already headers needed to work non-lru page migration indirectly like isolate_mode_t, migrate_mode MIGRATEPAGE_SUCCESS. [akpm@linux-foundation.org: revert mm-balloon-use-general-non-lru-movable-page-feature-fix.patch temp fix] Link: http://lkml.kernel.org/r/20160610003304.GE29779@bbox Signed-off-by: Minchan Kim Reported-by: Randy Dunlap Cc: Konstantin Khlebnikov Cc: Vlastimil Babka Cc: Gioh Kim Cc: Rafael Aquini Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/page_migration | 11 ++++++----- drivers/virtio/virtio_balloon.c | 2 +- include/linux/balloon_compaction.h | 3 +-- include/linux/compaction.h | 16 ---------------- include/linux/migrate.h | 15 +++++++++++++++ mm/zsmalloc.c | 4 ++-- 6 files changed, 25 insertions(+), 26 deletions(-) diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration index 18d37c7ac50b..94bd9c11c4e0 100644 --- a/Documentation/vm/page_migration +++ b/Documentation/vm/page_migration @@ -181,11 +181,12 @@ After isolation, VM calls migratepage of driver with isolated page. The function of migratepage is to move content of the old page to new page and set up fields of struct page newpage. Keep in mind that you should indicate to the VM the oldpage is no longer movable via __ClearPageMovable() -under page_lock if you migrated the oldpage successfully and returns 0. -If driver cannot migrate the page at the moment, driver can return -EAGAIN. -On -EAGAIN, VM will retry page migration in a short time because VM interprets --EAGAIN as "temporal migration failure". On returning any error except -EAGAIN, -VM will give up the page migration without retrying in this time. +under page_lock if you migrated the oldpage successfully and returns +MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver +can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time +because VM interprets -EAGAIN as "temporal migration failure". On returning +any error except -EAGAIN, VM will give up the page migration without retrying +in this time. Driver shouldn't touch page.lru field VM using in the functions. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 88d5609375de..888d5f8322ce 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -493,7 +493,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, put_page(page); /* balloon reference */ - return 0; + return MIGRATEPAGE_SUCCESS; } static struct dentry *balloon_mount(struct file_system_type *fs_type, diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 504bd724e6ab..79542b2698ec 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -45,8 +45,7 @@ #define _LINUX_BALLOON_COMPACTION_H #include #include -#include -#include +#include #include #include #include diff --git a/include/linux/compaction.h b/include/linux/compaction.h index c6b47c861cea..1a02dab16646 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -54,9 +54,6 @@ enum compact_result { struct alloc_context; /* in mm/internal.h */ #ifdef CONFIG_COMPACTION -extern int PageMovable(struct page *page); -extern void __SetPageMovable(struct page *page, struct address_space *mapping); -extern void __ClearPageMovable(struct page *page); extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos); @@ -154,19 +151,6 @@ extern void kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); #else -static inline int PageMovable(struct page *page) -{ - return 0; -} -static inline void __SetPageMovable(struct page *page, - struct address_space *mapping) -{ -} - -static inline void __ClearPageMovable(struct page *page) -{ -} - static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, int alloc_flags, const struct alloc_context *ac, diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 404fbfefeb33..ae8d475a9385 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -71,6 +71,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ +#ifdef CONFIG_COMPACTION +extern int PageMovable(struct page *page); +extern void __SetPageMovable(struct page *page, struct address_space *mapping); +extern void __ClearPageMovable(struct page *page); +#else +static inline int PageMovable(struct page *page) { return 0; }; +static inline void __SetPageMovable(struct page *page, + struct address_space *mapping) +{ +} +static inline void __ClearPageMovable(struct page *page) +{ +} +#endif + #ifdef CONFIG_NUMA_BALANCING extern bool pmd_trans_migrating(pmd_t pmd); extern int migrate_misplaced_page(struct page *page, diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e4e8081b160b..04176de6df70 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -50,7 +50,7 @@ #include #include #include -#include +#include #include #define ZSPAGE_MAGIC 0x58 @@ -2107,7 +2107,7 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage, put_page(page); page = newpage; - ret = 0; + ret = MIGRATEPAGE_SUCCESS; unpin_objects: for (addr = s_addr + offset; addr < s_addr + pos; addr += class->size) { -- cgit v1.2.3-70-g09d2 From 17408d785a2eda374b7e673afe20309687a63f08 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Tue, 26 Jul 2016 15:26:53 -0700 Subject: mm: memcontrol: remove BUG_ON in uncharge_list When calling uncharge_list, if a page is transparent huge we don't need to BUG_ON about non-transparent huge, since nobody should be able to see the page at this stage and this page cannot be raced against with a THP split. This check became unneeded after 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API"). [mhocko@suse.com: changelog enhancements] Link: http://lkml.kernel.org/r/1465369248-13865-1-git-send-email-roy.qing.li@gmail.com Signed-off-by: Li RongQing Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1a1a3093a5c9..36d6e324fadc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5580,7 +5580,6 @@ static void uncharge_list(struct list_head *page_list) if (PageTransHuge(page)) { nr_pages <<= compound_order(page); - VM_BUG_ON_PAGE(!PageTransHuge(page), page); nr_huge += nr_pages; } if (PageAnon(page)) -- cgit v1.2.3-70-g09d2 From 25843c2b21d675c68e512daa4a2b50a18f7a840d Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Tue, 26 Jul 2016 15:26:56 -0700 Subject: mm: memcontrol: fix documentation for compound parameter Commit f627c2f53786 ("memcg: adjust to support new THP refcounting") adds a compound parameter for several functions, and change one as compound for mem_cgroup_move_account but it does not change the comments. Link: http://lkml.kernel.org/r/1465368216-9393-1-git-send-email-roy.qing.li@gmail.com Signed-off-by: Li RongQing Acked-by: Michal Hocko Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 36d6e324fadc..f3a84c64f35c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4507,7 +4507,7 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma, /** * mem_cgroup_move_account - move account of the page * @page: the page - * @nr_pages: number of regular pages (>1 for huge pages) + * @compound: charge the page as compound or small page * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. * @@ -5369,6 +5369,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) * @mm: mm context of the victim * @gfp_mask: reclaim mode * @memcgp: charged memcg return + * @compound: charge the page as compound or small page * * Try to charge @page to the memcg that @mm belongs to, reclaiming * pages according to @gfp_mask if necessary. @@ -5431,6 +5432,7 @@ out: * @page: page to charge * @memcg: memcg to charge the page to * @lrucare: page might be on LRU already + * @compound: charge the page as compound or small page * * Finalize a charge transaction started by mem_cgroup_try_charge(), * after page->mapping has been set up. This must happen atomically @@ -5482,6 +5484,7 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, * mem_cgroup_cancel_charge - cancel a page charge * @page: page to charge * @memcg: memcg to charge the page to + * @compound: charge the page as compound or small page * * Cancel a charge transaction started by mem_cgroup_try_charge(). */ -- cgit v1.2.3-70-g09d2 From 1fe4d021acbc356723818a633fe0a10c59c2a4c1 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 26 Jul 2016 15:26:58 -0700 Subject: cgroup: fix idr leak for the first cgroup root The valid cgroup hierarchy ID range includes 0, so we can't filter for positive numbers when freeing it, or it'll leak the first ID. No big deal, just disruptive when reading the code. The ID is freed during error handling and when the reference count hits zero, so the double-free test is not necessary; remove it. Link: http://lkml.kernel.org/r/20160617162359.GB19084@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: Tejun Heo Cc: Nikolay Borisov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 75c0ff00aca6..3108150e47b1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1160,18 +1160,12 @@ static void cgroup_exit_root_id(struct cgroup_root *root) { lockdep_assert_held(&cgroup_mutex); - if (root->hierarchy_id) { - idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); - root->hierarchy_id = 0; - } + idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id); } static void cgroup_free_root(struct cgroup_root *root) { if (root) { - /* hierarchy ID should already have been released */ - WARN_ON_ONCE(root->hierarchy_id); - idr_destroy(&root->cgroup_idr); kfree(root); } -- cgit v1.2.3-70-g09d2 From cb773df88a737d7d7e05ca7ca516414d3fcdcab8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 26 Jul 2016 15:27:01 -0700 Subject: cgroup: remove unnecessary 0 check from css_from_id() css_idr allocation starts at 1, so index 0 will never point to an item. css_from_id() currently filters that before asking idr_find(), but idr_find() would also just return NULL, so this is not needed. Link: http://lkml.kernel.org/r/20160617162427.GC19084@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: Tejun Heo Cc: Nikolay Borisov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 3108150e47b1..fa943843a32f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -6166,7 +6166,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) { WARN_ON_ONCE(!rcu_read_lock_held()); - return id > 0 ? idr_find(&ss->css_idr, id) : NULL; + return idr_find(&ss->css_idr, id); } /** -- cgit v1.2.3-70-g09d2 From 8f19b0c058d93a678a99dd6fec03af2e769943f2 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Tue, 26 Jul 2016 15:27:04 -0700 Subject: thp: fix comments of __pmd_trans_huge_lock() To make the comments consistent with the already changed code. Link: http://lkml.kernel.org/r/1466200004-6196-1-git-send-email-ying.huang@intel.com Signed-off-by: "Huang, Ying" Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d3abbf249fa0..3647334c2ef9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1458,10 +1458,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, } /* - * Returns true if a given pmd maps a thp, false otherwise. + * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise. * - * Note that if it returns true, this routine returns without unlocking page - * table lock. So callers must unlock it. + * Note that if it returns page table lock pointer, this routine returns without + * unlocking page table lock. So callers must unlock it. */ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { -- cgit v1.2.3-70-g09d2