diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 3 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 20 | ||||
-rw-r--r-- | mm/highmem.c | 34 | ||||
-rw-r--r-- | mm/hugetlb.c | 36 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 14 | ||||
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 41 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slab.h | 2 | ||||
-rw-r--r-- | mm/slob.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 1 | ||||
-rw-r--r-- | mm/userfaultfd.c | 5 | ||||
-rw-r--r-- | mm/util.c | 2 |
15 files changed, 80 insertions, 90 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 068ce591a13a..28edafc820ad 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS config KMAP_LOCAL bool +config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY + bool + # struct io_mapping based helper. Selected by drivers that need them config IO_MAPPING bool diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index eccc14b34901..9b520bb4a3e7 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) if (*ppos) return ERR_PTR(-EINVAL); - kbuf = kmalloc(count + 1, GFP_KERNEL); + kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return ERR_PTR(-ENOMEM); @@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -452,7 +452,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -578,7 +578,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file, char *kbuf; ssize_t len; - kbuf = kmalloc(count, GFP_KERNEL); + kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); if (!kbuf) return -ENOMEM; @@ -877,12 +877,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, return -EINVAL; } + mutex_lock(&damon_dbgfs_lock); if (!strncmp(kbuf, "on", count)) { int i; for (i = 0; i < dbgfs_nr_ctxs; i++) { if (damon_targets_empty(dbgfs_ctxs[i])) { kfree(kbuf); + mutex_unlock(&damon_dbgfs_lock); return -EINVAL; } } @@ -892,6 +894,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file, } else { ret = -EINVAL; } + mutex_unlock(&damon_dbgfs_lock); if (!ret) ret = count; @@ -944,15 +947,16 @@ static int __init __damon_dbgfs_init(void) static int __init damon_dbgfs_init(void) { - int rc; + int rc = -ENOMEM; + mutex_lock(&damon_dbgfs_lock); dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); if (!dbgfs_ctxs) - return -ENOMEM; + goto out; dbgfs_ctxs[0] = dbgfs_new_ctx(); if (!dbgfs_ctxs[0]) { kfree(dbgfs_ctxs); - return -ENOMEM; + goto out; } dbgfs_nr_ctxs = 1; @@ -963,6 +967,8 @@ static int __init damon_dbgfs_init(void) pr_err("%s: dbgfs init failed\n", __func__); } +out: + mutex_unlock(&damon_dbgfs_lock); return rc; } diff --git a/mm/highmem.c b/mm/highmem.c index 88f65f155845..762679050c9a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -359,7 +359,6 @@ void kunmap_high(struct page *page) } EXPORT_SYMBOL(kunmap_high); -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { @@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, BUG_ON((start1 | start2 | end1 | end2) != 0); } EXPORT_SYMBOL(zero_user_segments); -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_HIGHMEM */ #ifdef CONFIG_KMAP_LOCAL @@ -503,16 +501,22 @@ static inline int kmap_local_calc_idx(int idx) static pte_t *__kmap_pte; -static pte_t *kmap_get_pte(void) +static pte_t *kmap_get_pte(unsigned long vaddr, int idx) { + if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY)) + /* + * Set by the arch if __kmap_pte[-idx] does not produce + * the correct entry. + */ + return virt_to_kpte(vaddr); if (!__kmap_pte) __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); - return __kmap_pte; + return &__kmap_pte[-idx]; } void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) { - pte_t pteval, *kmap_pte = kmap_get_pte(); + pte_t pteval, *kmap_pte; unsigned long vaddr; int idx; @@ -524,9 +528,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) preempt_disable(); idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte - idx))); + kmap_pte = kmap_get_pte(vaddr, idx); + BUG_ON(!pte_none(*kmap_pte)); pteval = pfn_pte(pfn, prot); - arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval); + arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; preempt_enable(); @@ -559,7 +564,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot); void kunmap_local_indexed(void *vaddr) { unsigned long addr = (unsigned long) vaddr & PAGE_MASK; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int idx; if (addr < __fix_to_virt(FIX_KMAP_END) || @@ -584,8 +589,9 @@ void kunmap_local_indexed(void *vaddr) idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr); WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); kmap_local_idx_pop(); @@ -607,7 +613,7 @@ EXPORT_SYMBOL(kunmap_local_indexed); void __kmap_local_sched_out(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Clear kmaps */ @@ -634,8 +640,9 @@ void __kmap_local_sched_out(void) idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + kmap_pte = kmap_get_pte(addr, idx); arch_kmap_local_pre_unmap(addr); - pte_clear(&init_mm, addr, kmap_pte - idx); + pte_clear(&init_mm, addr, kmap_pte); arch_kmap_local_post_unmap(addr); } } @@ -643,7 +650,7 @@ void __kmap_local_sched_out(void) void __kmap_local_sched_in(void) { struct task_struct *tsk = current; - pte_t *kmap_pte = kmap_get_pte(); + pte_t *kmap_pte; int i; /* Restore kmaps */ @@ -663,7 +670,8 @@ void __kmap_local_sched_in(void) /* See comment in __kmap_local_sched_out() */ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte_at(&init_mm, addr, kmap_pte - idx, pteval); + kmap_pte = kmap_get_pte(addr, idx); + set_pte_at(&init_mm, addr, kmap_pte, pteval); arch_kmap_local_post_map(addr, pteval); } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e09159c957e3..abcd1785c629 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) */ struct resv_map *reservations = vma_resv_map(vma); - if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) + if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { + resv_map_put_hugetlb_cgroup_uncharge_info(reservations); kref_put(&reservations->refs, resv_map_release); + } reset_vma_resv_huge_pages(vma); } @@ -4917,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, move_huge_pte(vma, old_addr, new_addr, src_pte); } - i_mmap_unlock_write(mapping); flush_tlb_range(vma, old_end - len, old_end); mmu_notifier_invalidate_range_end(&range); + i_mmap_unlock_write(mapping); return len + old_addr - old_end; } @@ -4937,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -4965,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, vma, &address, ptep)) { spin_unlock(ptl); - /* - * We just unmapped a page of PMDs by clearing a PUD. - * The caller's TLB flush range should cover this area. - */ + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -5025,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, @@ -5734,13 +5751,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, int ret = -ENOMEM; struct page *page; int writable; - bool new_pagecache_page = false; + bool page_in_pagecache = false; if (is_continue) { ret = -EFAULT; page = find_lock_page(mapping, idx); if (!page) goto out; + page_in_pagecache = true; } else if (!*pagep) { /* If a page already exists, then it's UFFDIO_COPY for * a non-missing case. Return -EEXIST. @@ -5828,7 +5846,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ret = huge_add_to_page_cache(page, mapping, idx); if (ret) goto out_release_nounlock; - new_pagecache_page = true; + page_in_pagecache = true; } ptl = huge_pte_lockptr(h, dst_mm, dst_pte); @@ -5892,7 +5910,7 @@ out_release_unlock: if (vm_shared || is_continue) unlock_page(page); out_release_nounlock: - if (!new_pagecache_page) + if (!page_in_pagecache) restore_reserve_on_error(h, dst_vma, dst_addr, page); put_page(page); goto out; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 781605e92015..6863a834ed42 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page, VM_BUG_ON(from == to); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - VM_BUG_ON(compound && !folio_test_multi(folio)); + VM_BUG_ON(compound && !folio_test_large(folio)); /* * Prevent mem_cgroup_migrate() from looking at diff --git a/mm/memory-failure.c b/mm/memory-failure.c index f64ebb6226cb..07c875fdeaf0 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -58,7 +58,6 @@ #include <linux/ratelimit.h> #include <linux/page-isolation.h> #include <linux/pagewalk.h> -#include <linux/shmem_fs.h> #include "internal.h" #include "ras/ras_event.h" @@ -868,7 +867,6 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) { int ret; struct address_space *mapping; - bool extra_pins; delete_from_lru_cache(p); @@ -898,23 +896,17 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p) } /* - * The shmem page is kept in page cache instead of truncating - * so is expected to have an extra refcount after error-handling. - */ - extra_pins = shmem_mapping(mapping); - - /* * Truncation is a bit tricky. Enable it per file system for now. * * Open: to take i_rwsem or not for this? Right now we don't. */ ret = truncate_error_page(p, page_to_pfn(p), mapping); - if (has_extra_refcount(ps, p, extra_pins)) - ret = MF_FAILED; - out: unlock_page(p); + if (has_extra_refcount(ps, p, false)) + ret = MF_FAILED; + return ret; } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2d498bb62248..a613f8ef6a02 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2967,7 +2967,7 @@ EXPORT_SYMBOL_GPL(folio_wait_writeback_killable); */ void folio_wait_stable(struct folio *folio) { - if (folio->mapping->host->i_sb->s_iflags & SB_I_STABLE_WRITES) + if (folio_inode(folio)->i_sb->s_iflags & SB_I_STABLE_WRITES) folio_wait_writeback(folio); } EXPORT_SYMBOL_GPL(folio_wait_stable); diff --git a/mm/shmem.c b/mm/shmem.c index f0eee4e221a7..18f93c2d68f1 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode INIT_LIST_HEAD(&info->swaplist); simple_xattrs_init(&info->xattrs); cache_no_acl(inode); + mapping_set_large_folios(inode->i_mapping); switch (mode & S_IFMT) { default: @@ -2456,7 +2457,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); pgoff_t index = pos >> PAGE_SHIFT; - int ret = 0; /* i_rwsem is held by caller */ if (unlikely(info->seals & (F_SEAL_GROW | @@ -2467,15 +2467,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, return -EPERM; } - ret = shmem_getpage(inode, index, pagep, SGP_WRITE); - - if (*pagep && PageHWPoison(*pagep)) { - unlock_page(*pagep); - put_page(*pagep); - ret = -EIO; - } - - return ret; + return shmem_getpage(inode, index, pagep, SGP_WRITE); } static int @@ -2562,12 +2554,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (sgp == SGP_CACHE) set_page_dirty(page); unlock_page(page); - - if (PageHWPoison(page)) { - put_page(page); - error = -EIO; - break; - } } /* @@ -3107,8 +3093,7 @@ static const char *shmem_get_link(struct dentry *dentry, page = find_get_page(inode->i_mapping, 0); if (!page) return ERR_PTR(-ECHILD); - if (PageHWPoison(page) || - !PageUptodate(page)) { + if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-ECHILD); } @@ -3116,11 +3101,6 @@ static const char *shmem_get_link(struct dentry *dentry, error = shmem_getpage(inode, 0, &page, SGP_READ); if (error) return ERR_PTR(error); - if (page && PageHWPoison(page)) { - unlock_page(page); - put_page(page); - return ERR_PTR(-ECHILD); - } unlock_page(page); } set_delayed_call(done, shmem_put_link, page); @@ -3771,13 +3751,6 @@ static void shmem_destroy_inodecache(void) kmem_cache_destroy(shmem_inode_cachep); } -/* Keep the page in page cache instead of truncating it */ -static int shmem_error_remove_page(struct address_space *mapping, - struct page *page) -{ - return 0; -} - const struct address_space_operations shmem_aops = { .writepage = shmem_writepage, .set_page_dirty = __set_page_dirty_no_writeback, @@ -3788,7 +3761,7 @@ const struct address_space_operations shmem_aops = { #ifdef CONFIG_MIGRATION .migratepage = migrate_page, #endif - .error_remove_page = shmem_error_remove_page, + .error_remove_page = generic_error_remove_page, }; EXPORT_SYMBOL(shmem_aops); @@ -3898,7 +3871,7 @@ static struct file_system_type shmem_fs_type = { .parameters = shmem_fs_parameters, #endif .kill_sb = kill_litter_super, - .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, + .fs_flags = FS_USERNS_MOUNT, }; int __init shmem_init(void) @@ -4199,10 +4172,6 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, page = ERR_PTR(error); else unlock_page(page); - - if (PageHWPoison(page)) - page = ERR_PTR(-EIO); - return page; #else /* diff --git a/mm/slab.c b/mm/slab.c index da132a9ae6f8..ca4822f6b2b6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) if (!cachep) return; + trace_kmem_cache_free(_RET_IP_, objp, cachep->name); local_irq_save(flags); debug_check_no_locks_freed(objp, cachep->object_size); if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(objp, cachep->object_size); __cache_free(cachep, objp, _RET_IP_); local_irq_restore(flags); - - trace_kmem_cache_free(_RET_IP_, objp, cachep->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slab.h b/mm/slab.h index 58c01a34e5b8..56ad7eea3ddf 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size, #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_ACCOUNT) #else -#define SLAB_CACHE_FLAGS (0) +#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) #endif /* Common flags available with current configuration */ diff --git a/mm/slob.c b/mm/slob.c index 74d3f6e60666..03deee1e6a94 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head) void kmem_cache_free(struct kmem_cache *c, void *b) { kmemleak_free_recursive(b, c->flags); + trace_kmem_cache_free(_RET_IP_, b, c->name); if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); @@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b) } else { __kmem_cache_free(b, c->size); } - - trace_kmem_cache_free(_RET_IP_, b, c->name); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/slub.c b/mm/slub.c index f7368bfffb7a..a8626825a829 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x) s = cache_from_obj(s, x); if (!s) return; - slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); trace_kmem_cache_free(_RET_IP_, x, s->name); + slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); } EXPORT_SYMBOL(kmem_cache_free); diff --git a/mm/swap.c b/mm/swap.c index 1841c24682f8..e8c9dc6d0377 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -156,6 +156,7 @@ void put_pages_list(struct list_head *pages) } free_unref_page_list(pages); + INIT_LIST_HEAD(pages); } EXPORT_SYMBOL(put_pages_list); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 0780c2a57ff1..ac6f036298cd 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -232,11 +232,6 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm, goto out; } - if (PageHWPoison(page)) { - ret = -EIO; - goto out_release; - } - ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, page, false, wp_copy); if (ret) diff --git a/mm/util.c b/mm/util.c index e58151a61255..741ba32a43ac 100644 --- a/mm/util.c +++ b/mm/util.c @@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio) { long i, nr; - if (folio_test_single(folio)) + if (!folio_test_large(folio)) return atomic_read(&folio->_mapcount) >= 0; if (atomic_read(folio_mapcount_ptr(folio)) >= 0) return true; |