diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-06-02 08:51:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-06-02 08:51:30 -0700 |
commit | 7b3064f0e8deb55b8655dd8d36d9d1e8fb62b71b (patch) | |
tree | a991e83f2711bc4ee1ca7daa868fc269637b6d45 /mm | |
parent | 3ab4436f688c2d2f221793953cd05435ca84261c (diff) | |
parent | e577c8b64d58fe307ea4d5149d31615df2d90861 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton:
"Various fixes and followups"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm, compaction: make sure we isolate a valid PFN
include/linux/generic-radix-tree.h: fix kerneldoc comment
kernel/signal.c: trace_signal_deliver when signal_group_exit
drivers/iommu/intel-iommu.c: fix variable 'iommu' set but not used
spdxcheck.py: fix directory structures
kasan: initialize tag to 0xff in __kasan_kmalloc
z3fold: fix sheduling while atomic
scripts/gdb: fix invocation when CONFIG_COMMON_CLK is not set
mm/gup: continue VM_FAULT_RETRY processing even for pre-faults
ocfs2: fix error path kobject memory leak
memcg: make it work on sparse non-0-node systems
mm, memcg: consider subtrees in memory.events
prctl_set_mm: downgrade mmap_sem to read lock
prctl_set_mm: refactor checks from validate_prctl_map
kernel/fork.c: make max_threads symbol static
arch/arm/boot/compressed/decompress.c: fix build error due to lz4 changes
arch/parisc/configs/c8000_defconfig: remove obsoleted CONFIG_DEBUG_SLAB_LEAK
mm/vmalloc.c: fix typo in comment
lib/sort.c: fix kernel-doc notation warnings
mm: fix Documentation/vm/hmm.rst Sphinx warnings
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 2 | ||||
-rw-r--r-- | mm/gup.c | 15 | ||||
-rw-r--r-- | mm/kasan/common.c | 2 | ||||
-rw-r--r-- | mm/list_lru.c | 8 | ||||
-rw-r--r-- | mm/util.c | 4 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 | ||||
-rw-r--r-- | mm/z3fold.c | 11 |
7 files changed, 22 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 9febc8cc84e7..9e1b9acb116b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1399,7 +1399,7 @@ fast_isolate_freepages(struct compact_control *cc) page = pfn_to_page(highest); cc->free_pfn = highest; } else { - if (cc->direct_compaction) { + if (cc->direct_compaction && pfn_valid(min_pfn)) { page = pfn_to_page(min_pfn); cc->free_pfn = min_pfn; } @@ -1042,10 +1042,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, BUG_ON(ret >= nr_pages); } - if (!pages) - /* If it's a prefault don't insist harder */ - return ret; - if (ret > 0) { nr_pages -= ret; pages_done += ret; @@ -1061,8 +1057,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done = ret; break; } - /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ - pages += ret; + /* + * VM_FAULT_RETRY triggered, so seek to the faulting offset. + * For the prefault case (!pages) we only update counts. + */ + if (likely(pages)) + pages += ret; start += ret << PAGE_SHIFT; /* @@ -1085,7 +1085,8 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, pages_done++; if (!nr_pages) break; - pages++; + if (likely(pages)) + pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 36afcf64e016..242fdc01aaa9 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -464,7 +464,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, { unsigned long redzone_start; unsigned long redzone_end; - u8 tag; + u8 tag = 0xff; if (gfpflags_allow_blocking(flags)) quarantine_reduce(); diff --git a/mm/list_lru.c b/mm/list_lru.c index 0bdf3152735e..e4709fdaa8e6 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -38,11 +38,7 @@ static int lru_shrinker_id(struct list_lru *lru) static inline bool list_lru_memcg_aware(struct list_lru *lru) { - /* - * This needs node 0 to be always present, even - * in the systems supporting sparse numa ids. - */ - return !!lru->node[0].memcg_lrus; + return lru->memcg_aware; } static inline struct list_lru_one * @@ -452,6 +448,8 @@ static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware) { int i; + lru->memcg_aware = memcg_aware; + if (!memcg_aware) return 0; diff --git a/mm/util.c b/mm/util.c index 91682a2090ee..9834c4ab7d8e 100644 --- a/mm/util.c +++ b/mm/util.c @@ -718,12 +718,12 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ - down_read(&mm->mmap_sem); + spin_lock(&mm->arg_lock); arg_start = mm->arg_start; arg_end = mm->arg_end; env_start = mm->env_start; env_end = mm->env_end; - up_read(&mm->mmap_sem); + spin_unlock(&mm->arg_lock); len = arg_end - arg_start; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 233af6936c93..7350a124524b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -815,7 +815,7 @@ find_vmap_lowest_match(unsigned long size, } /* - * OK. We roll back and find the fist right sub-tree, + * OK. We roll back and find the first right sub-tree, * that will satisfy the search criteria. It can happen * only once due to "vstart" restriction. */ diff --git a/mm/z3fold.c b/mm/z3fold.c index 99be52c5ca45..985732c8b025 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -190,10 +190,11 @@ static int size_to_chunks(size_t size) static void compact_page_work(struct work_struct *w); -static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool) +static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, + gfp_t gfp) { struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle, - GFP_KERNEL); + gfp); if (slots) { memset(slots->slot, 0, sizeof(slots->slot)); @@ -295,10 +296,10 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) /* Initializes the z3fold header of a newly allocated z3fold page */ static struct z3fold_header *init_z3fold_page(struct page *page, - struct z3fold_pool *pool) + struct z3fold_pool *pool, gfp_t gfp) { struct z3fold_header *zhdr = page_address(page); - struct z3fold_buddy_slots *slots = alloc_slots(pool); + struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp); if (!slots) return NULL; @@ -912,7 +913,7 @@ retry: if (!page) return -ENOMEM; - zhdr = init_z3fold_page(page, pool); + zhdr = init_z3fold_page(page, pool, gfp); if (!zhdr) { __free_page(page); return -ENOMEM; |