diff options
-rw-r--r-- | drivers/iio/magnetometer/yamaha-yas530.c | 13 | ||||
-rw-r--r-- | fs/nilfs2/dir.c | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlmglue.c | 1 | ||||
-rw-r--r-- | fs/ocfs2/namei.c | 4 | ||||
-rw-r--r-- | fs/proc/vmcore.c | 56 | ||||
-rw-r--r-- | include/linux/page-flags.h | 4 | ||||
-rw-r--r-- | include/linux/pgalloc_tag.h | 4 | ||||
-rw-r--r-- | include/linux/scatterlist.h | 2 | ||||
-rw-r--r-- | include/linux/stackdepot.h | 6 | ||||
-rw-r--r-- | include/trace/events/damon.h | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 12 | ||||
-rw-r--r-- | lib/alloc_tag.c | 36 | ||||
-rw-r--r-- | lib/stackdepot.c | 10 | ||||
-rw-r--r-- | lib/stackinit_kunit.c | 1 | ||||
-rw-r--r-- | mm/debug.c | 7 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/gup.c | 11 | ||||
-rw-r--r-- | mm/kasan/report.c | 6 | ||||
-rw-r--r-- | mm/memcontrol-v1.h | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/mmap.c | 1 | ||||
-rw-r--r-- | mm/readahead.c | 5 | ||||
-rw-r--r-- | mm/vma.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 3 | ||||
-rw-r--r-- | tools/testing/selftests/damon/Makefile | 2 | ||||
-rw-r--r-- | tools/testing/selftests/mm/hugetlb_dio.c | 14 |
27 files changed, 128 insertions, 88 deletions
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 65011a8598d3..c55a38650c0d 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y u8 data[8]; u16 xy1y2[3]; s32 h[3], s[3]; + int half_range = BIT(13); int i, ret; mutex_lock(&yas5xx->lock); @@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y /* The second version of YAS537 needs to include calibration coefficients */ if (yas5xx->version == YAS537_VERSION_1) { for (i = 0; i < 3; i++) - s[i] = xy1y2[i] - BIT(13); - h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13); - h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13); - h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13); + s[i] = xy1y2[i] - half_range; + h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range; + h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range; + h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range; for (i = 0; i < 3; i++) { - clamp_val(h[i], -BIT(13), BIT(13) - 1); - xy1y2[i] = h[i] + BIT(13); + h[i] = clamp(h[i], -half_range, half_range - 1); + xy1y2[i] = h[i] + half_range; } } diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 14e8d82f8629..0a3aea6c416b 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -70,7 +70,7 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode) */ static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) { - unsigned int last_byte = inode->i_size; + u64 last_byte = inode->i_size; last_byte -= page_nr << PAGE_SHIFT; if (last_byte > PAGE_SIZE) diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 60df52e4c1f8..764ecbd5ad41 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3110,6 +3110,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) struct ocfs2_lock_res *iter = v; struct ocfs2_lock_res *dummy = &priv->p_iter_res; + (*pos)++; spin_lock(&ocfs2_dlm_tracking_lock); iter = ocfs2_dlm_next_res(iter, priv); list_del_init(&dummy->l_debug_list); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 59c92353151a..5550f8afa438 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -200,8 +200,10 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode) mode = mode_strip_sgid(&nop_mnt_idmap, dir, mode); inode_init_owner(&nop_mnt_idmap, inode, dir, mode); status = dquot_initialize(inode); - if (status) + if (status) { + iput(inode); return ERR_PTR(status); + } return inode; } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index b4521b096058..3d8a82cee63e 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -414,6 +414,34 @@ static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter) return __read_vmcore(iter, &iocb->ki_pos); } +/** + * vmcore_alloc_buf - allocate buffer in vmalloc memory + * @size: size of buffer + * + * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap + * the buffer to user-space by means of remap_vmalloc_range(). + * + * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is + * disabled and there's no need to allow users to mmap the buffer. + */ +static inline char *vmcore_alloc_buf(size_t size) +{ +#ifdef CONFIG_MMU + return vmalloc_user(size); +#else + return vzalloc(size); +#endif +} + +/* + * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is + * essential for mmap_vmcore() in order to map physically + * non-contiguous objects (ELF header, ELF note segment and memory + * regions in the 1st kernel pointed to by PT_LOAD entries) into + * virtually contiguous user-space in ELF layout. + */ +#ifdef CONFIG_MMU + /* * The vmcore fault handler uses the page cache and fills data using the * standard __read_vmcore() function. @@ -457,34 +485,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf) #endif } -/** - * vmcore_alloc_buf - allocate buffer in vmalloc memory - * @size: size of buffer - * - * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap - * the buffer to user-space by means of remap_vmalloc_range(). - * - * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is - * disabled and there's no need to allow users to mmap the buffer. - */ -static inline char *vmcore_alloc_buf(size_t size) -{ -#ifdef CONFIG_MMU - return vmalloc_user(size); -#else - return vzalloc(size); -#endif -} - -/* - * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is - * essential for mmap_vmcore() in order to map physically - * non-contiguous objects (ELF header, ELF note segment and memory - * regions in the 1st kernel pointed to by PT_LOAD entries) into - * virtually contiguous user-space in ELF layout. - */ -#ifdef CONFIG_MMU - static const struct vm_operations_struct vmcore_mmap_ops = { .fault = mmap_vmcore_fault, }; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2220bfec278e..cf46ac720802 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -306,7 +306,7 @@ static const unsigned long *const_folio_flags(const struct folio *folio, { const struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } @@ -315,7 +315,7 @@ static unsigned long *folio_flags(struct folio *folio, unsigned n) { struct page *page = &folio->page; - VM_BUG_ON_PGFLAGS(PageTail(page), page); + VM_BUG_ON_PGFLAGS(page->compound_head & 1, page); VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page); return &page[n].flags; } diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 0e43ab653ab6..3469c4b20105 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -231,7 +231,7 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) } void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); -void pgalloc_tag_copy(struct folio *new, struct folio *old); +void pgalloc_tag_swap(struct folio *new, struct folio *old); void __init alloc_tag_sec_init(void); @@ -245,7 +245,7 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} static inline void alloc_tag_sec_init(void) {} static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {} +static inline void pgalloc_tag_swap(struct folio *new, struct folio *old) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index c5e2239b550e..d836e7440ee8 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -313,7 +313,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg) } /** - * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address * @sg: SG entry * * Description: diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index e9ec32fb97d4..2cc21ffcdaf9 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -147,7 +147,7 @@ static inline int stack_depot_early_init(void) { return 0; } * If the provided stack trace comes from the interrupt context, only the part * up to the interrupt entry is saved. * - * Context: Any context, but setting STACK_DEPOT_FLAG_CAN_ALLOC is required if + * Context: Any context, but unsetting STACK_DEPOT_FLAG_CAN_ALLOC is required if * alloc_pages() cannot be used from the current context. Currently * this is the case for contexts where neither %GFP_ATOMIC nor * %GFP_NOWAIT can be used (NMI, raw_spin_lock). @@ -156,7 +156,7 @@ static inline int stack_depot_early_init(void) { return 0; } */ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, unsigned int nr_entries, - gfp_t gfp_flags, + gfp_t alloc_flags, depot_flags_t depot_flags); /** @@ -175,7 +175,7 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, * Return: Handle of the stack trace stored in depot, 0 on failure */ depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, gfp_t gfp_flags); + unsigned int nr_entries, gfp_t alloc_flags); /** * __stack_depot_get_stack_record - Get a pointer to a stack_record struct diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index 23200aabccac..da4bd9fd1162 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply, unsigned int target_idx, struct damon_region *r, unsigned int nr_regions, bool do_trace), - TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace), + TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace), TP_CONDITION(do_trace), diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fbdca89c677f..a59ae2e23daf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3399,10 +3399,16 @@ retry_pids: /* Initialise new per-VMA NUMAB state. */ if (!vma->numab_state) { - vma->numab_state = kzalloc(sizeof(struct vma_numab_state), - GFP_KERNEL); - if (!vma->numab_state) + struct vma_numab_state *ptr; + + ptr = kzalloc(sizeof(*ptr), GFP_KERNEL); + if (!ptr) + continue; + + if (cmpxchg(&vma->numab_state, NULL, ptr)) { + kfree(ptr); continue; + } vma->numab_state->start_scan_seq = mm->numa_scan_seq; diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 2414a7ee7ec7..35f7560a309a 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -189,26 +189,34 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) } } -void pgalloc_tag_copy(struct folio *new, struct folio *old) +void pgalloc_tag_swap(struct folio *new, struct folio *old) { - union pgtag_ref_handle handle; - union codetag_ref ref; - struct alloc_tag *tag; + union pgtag_ref_handle handle_old, handle_new; + union codetag_ref ref_old, ref_new; + struct alloc_tag *tag_old, *tag_new; - tag = pgalloc_tag_get(&old->page); - if (!tag) + tag_old = pgalloc_tag_get(&old->page); + if (!tag_old) + return; + tag_new = pgalloc_tag_get(&new->page); + if (!tag_new) return; - if (!get_page_tag_ref(&new->page, &ref, &handle)) + if (!get_page_tag_ref(&old->page, &ref_old, &handle_old)) return; + if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) { + put_page_tag_ref(handle_old); + return; + } + + /* swap tags */ + __alloc_tag_ref_set(&ref_old, tag_new); + update_page_tag_ref(handle_old, &ref_old); + __alloc_tag_ref_set(&ref_new, tag_old); + update_page_tag_ref(handle_new, &ref_new); - /* Clear the old ref to the original allocation tag. */ - clear_page_tag_ref(&old->page); - /* Decrement the counters of the tag on get_new_folio. */ - alloc_tag_sub(&ref, folio_size(new)); - __alloc_tag_ref_set(&ref, tag); - update_page_tag_ref(handle, &ref); - put_page_tag_ref(handle); + put_page_tag_ref(handle_old); + put_page_tag_ref(handle_new); } static void shutdown_mem_profiling(bool remove_file) diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 5ed34cc963fc..245d5b416699 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -630,7 +630,15 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, prealloc = page_address(page); } - raw_spin_lock_irqsave(&pool_lock, flags); + if (in_nmi()) { + /* We can never allocate in NMI context. */ + WARN_ON_ONCE(can_alloc); + /* Best effort; bail if we fail to take the lock. */ + if (!raw_spin_trylock_irqsave(&pool_lock, flags)) + goto exit; + } else { + raw_spin_lock_irqsave(&pool_lock, flags); + } printk_deferred_enter(); /* Try to find again, to avoid concurrently inserting duplicates. */ diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index c14c6f8e6308..c40818ec9c18 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -212,6 +212,7 @@ static noinline void test_ ## name (struct kunit *test) \ static noinline DO_NOTHING_TYPE_ ## which(var_type) \ do_nothing_ ## name(var_type *ptr) \ { \ + OPTIMIZER_HIDE_VAR(ptr); \ /* Will always be true, but compiler doesn't know. */ \ if ((unsigned long)ptr > 0x2) \ return DO_NOTHING_RETURN_ ## which(ptr); \ diff --git a/mm/debug.c b/mm/debug.c index aa57d3ffd4ed..95b6ab809c0e 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -124,19 +124,22 @@ static void __dump_page(const struct page *page) { struct folio *foliop, folio; struct page precise; + unsigned long head; unsigned long pfn = page_to_pfn(page); unsigned long idx, nr_pages = 1; int loops = 5; again: memcpy(&precise, page, sizeof(*page)); - foliop = page_folio(&precise); - if (foliop == (struct folio *)&precise) { + head = precise.compound_head; + if ((head & 1) == 0) { + foliop = (struct folio *)&precise; idx = 0; if (!folio_test_large(foliop)) goto dump; foliop = (struct folio *)page; } else { + foliop = (struct folio *)(head - 1); idx = folio_page_idx(foliop, page); } diff --git a/mm/filemap.c b/mm/filemap.c index 7c76a123ba18..f61cf51c2238 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3501,10 +3501,10 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, continue; if (xa_is_value(folio)) continue; - if (folio_test_locked(folio)) - continue; if (!folio_try_get(folio)) continue; + if (folio_test_locked(folio)) + goto skip; /* Has the page moved or been split? */ if (unlikely(folio != xas_reload(xas))) goto skip; @@ -52,7 +52,12 @@ static inline void sanity_check_pinned_pages(struct page **pages, */ for (; npages; npages--, pages++) { struct page *page = *pages; - struct folio *folio = page_folio(page); + struct folio *folio; + + if (!page) + continue; + + folio = page_folio(page); if (is_zero_page(page) || !folio_test_anon(folio)) @@ -409,6 +414,10 @@ void unpin_user_pages(struct page **pages, unsigned long npages) sanity_check_pinned_pages(pages, npages); for (i = 0; i < npages; i += nr) { + if (!pages[i]) { + nr = 1; + continue; + } folio = gup_folio_next(pages, npages, i, &nr); gup_put_folio(folio, nr, FOLL_PIN); } diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 50fb19ad4388..3fe77a360f1c 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -201,7 +201,7 @@ static inline void fail_non_kasan_kunit_test(void) { } #endif /* CONFIG_KUNIT */ -static DEFINE_SPINLOCK(report_lock); +static DEFINE_RAW_SPINLOCK(report_lock); static void start_report(unsigned long *flags, bool sync) { @@ -212,7 +212,7 @@ static void start_report(unsigned long *flags, bool sync) lockdep_off(); /* Make sure we don't end up in loop. */ report_suppress_start(); - spin_lock_irqsave(&report_lock, *flags); + raw_spin_lock_irqsave(&report_lock, *flags); pr_err("==================================================================\n"); } @@ -222,7 +222,7 @@ static void end_report(unsigned long *flags, const void *addr, bool is_write) trace_error_report_end(ERROR_DETECTOR_KASAN, (unsigned long)addr); pr_err("==================================================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); switch (kasan_arg_fault) { diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 0e3b82951d91..144d71b65907 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -38,7 +38,7 @@ void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n); iter = mem_cgroup_iter(NULL, iter, NULL)) /* Whether legacy memory+swap accounting is active */ -static bool do_memsw_account(void) +static inline bool do_memsw_account(void) { return !cgroup_subsys_on_dfl(memory_cgrp_subsys); } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bb37cd1a51d8..04f35659717a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1080,6 +1080,10 @@ static long migrate_to_node(struct mm_struct *mm, int source, int dest, mmap_read_lock(mm); vma = find_vma(mm, 0); + if (unlikely(!vma)) { + mmap_read_unlock(mm); + return 0; + } /* * This does not migrate the range, but isolates all pages that diff --git a/mm/migrate.c b/mm/migrate.c index 2ce6b4b814df..cc68583c86f9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -745,7 +745,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) folio_set_readahead(newfolio); folio_copy_owner(newfolio, folio); - pgalloc_tag_copy(newfolio, folio); + pgalloc_tag_swap(newfolio, folio); mem_cgroup_migrate(folio, newfolio); } diff --git a/mm/mmap.c b/mm/mmap.c index 386429f7db5a..d32b7e701058 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -889,6 +889,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (get_area) { addr = get_area(file, addr, len, pgoff, flags); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) + && !addr /* no hint */ && IS_ALIGNED(len, PMD_SIZE)) { /* Ensures that larger anonymous mappings are THP aligned. */ addr = thp_get_unmapped_area_vmflags(file, addr, len, diff --git a/mm/readahead.c b/mm/readahead.c index 8f1cf599b572..ea650b8b02fb 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -458,8 +458,7 @@ void page_cache_ra_order(struct readahead_control *ractl, struct file_ra_state *ra, unsigned int new_order) { struct address_space *mapping = ractl->mapping; - pgoff_t start = readahead_index(ractl); - pgoff_t index = start; + pgoff_t index = readahead_index(ractl); unsigned int min_order = mapping_min_folio_order(mapping); pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; @@ -522,7 +521,7 @@ void page_cache_ra_order(struct readahead_control *ractl, if (!err) return; fallback: - do_page_cache_ra(ractl, ra->size - (index - start), ra->async_size); + do_page_cache_ra(ractl, ra->size, ra->async_size); } static unsigned long ractl_max_pages(struct readahead_control *ractl, @@ -35,7 +35,7 @@ struct mmap_state { .mm = mm_, \ .vmi = vmi_, \ .addr = addr_, \ - .end = (addr_) + len, \ + .end = (addr_) + (len_), \ .pgoff = pgoff_, \ .pglen = PHYS_PFN(len_), \ .flags = flags_, \ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7ed39d104201..f009b21705c1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4093,7 +4093,8 @@ void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) /* Zero out spare memory. */ if (want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); - + kasan_poison_vmalloc(p + size, old_size - size); + kasan_unpoison_vmalloc(p, size, KASAN_VMALLOC_PROT_NORMAL); return (void *)p; } diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile index 5b2a6a5dd1af..812f656260fb 100644 --- a/tools/testing/selftests/damon/Makefile +++ b/tools/testing/selftests/damon/Makefile @@ -6,7 +6,7 @@ TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race TEST_GEN_FILES += debugfs_target_ids_pid_leak TEST_GEN_FILES += access_memory access_memory_even -TEST_FILES = _chk_dependency.sh _debugfs_common.sh +TEST_FILES = _chk_dependency.sh _debugfs_common.sh _damon_sysfs.py # functionality tests TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c index 432d5af15e66..db63abe5ee5e 100644 --- a/tools/testing/selftests/mm/hugetlb_dio.c +++ b/tools/testing/selftests/mm/hugetlb_dio.c @@ -76,19 +76,15 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off) /* Get the free huge pages after unmap*/ free_hpage_a = get_free_hugepages(); + ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b); + ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a); + /* * If the no. of free hugepages before allocation and after unmap does * not match - that means there could still be a page which is pinned. */ - if (free_hpage_a != free_hpage_b) { - ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b); - ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a); - ksft_test_result_fail(": Huge pages not freed!\n"); - } else { - ksft_print_msg("No. Free pages before allocation : %d\n", free_hpage_b); - ksft_print_msg("No. Free pages after munmap : %d\n", free_hpage_a); - ksft_test_result_pass(": Huge pages freed successfully !\n"); - } + ksft_test_result(free_hpage_a == free_hpage_b, + "free huge pages from %u-%u\n", start_off, end_off); } int main(void) |