diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 8 | ||||
-rw-r--r-- | lib/Kconfig.debug | 9 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/closure.c | 205 | ||||
-rw-r--r-- | lib/cpumask.c | 4 | ||||
-rw-r--r-- | lib/errname.c | 1 | ||||
-rw-r--r-- | lib/generic-radix-tree.c | 76 | ||||
-rw-r--r-- | lib/iov_iter.c | 437 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 8 | ||||
-rw-r--r-- | lib/llist.c | 28 | ||||
-rw-r--r-- | lib/lwq.c | 158 | ||||
-rw-r--r-- | lib/rcuref.c | 2 | ||||
-rw-r--r-- | lib/string_helpers.c | 10 |
13 files changed, 633 insertions, 317 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index c686f4adc124..2d90935d5a21 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -506,6 +506,9 @@ config ASSOCIATIVE_ARRAY for more information. +config CLOSURES + bool + config HAS_IOMEM bool depends on !NO_IOMEM @@ -729,6 +732,11 @@ config PARMAN config OBJAGG tristate "objagg" if COMPILE_TEST +config LWQ_TEST + bool "Boot-time test for lwq queuing" + help + Run boot-time test of light-weight queuing. + endmenu config GENERIC_IOREMAP diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fa307f93fa2e..ce3a4abf40f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1720,6 +1720,15 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. +config DEBUG_CLOSURES + bool "Debug closures (bcache async widgits)" + depends on CLOSURES + select DEBUG_FS + help + Keeps all active closures in a linked list and provides a debugfs + interface to list them, which makes it possible to see asynchronous + operations that get stuck. + config DEBUG_MAPLE_TREE bool "Debug maple trees" depends on DEBUG_KERNEL diff --git a/lib/Makefile b/lib/Makefile index 740109b6e2c8..1b311c7fd32b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -45,7 +45,7 @@ obj-y += lockref.o obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ - bsearch.o find_bit.o llist.o memweight.o kfifo.o \ + bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o base64.o \ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o @@ -255,6 +255,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o +obj-$(CONFIG_CLOSURES) += closure.o + obj-$(CONFIG_DQL) += dynamic_queue_limits.o obj-$(CONFIG_GLOB) += glob.o diff --git a/lib/closure.c b/lib/closure.c new file mode 100644 index 000000000000..0855e698ced1 --- /dev/null +++ b/lib/closure.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Asynchronous refcounty things + * + * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> + * Copyright 2012 Google, Inc. + */ + +#include <linux/closure.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/rcupdate.h> +#include <linux/seq_file.h> +#include <linux/sched/debug.h> + +static inline void closure_put_after_sub(struct closure *cl, int flags) +{ + int r = flags & CLOSURE_REMAINING_MASK; + + BUG_ON(flags & CLOSURE_GUARD_MASK); + BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); + + if (!r) { + if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { + atomic_set(&cl->remaining, + CLOSURE_REMAINING_INITIALIZER); + closure_queue(cl); + } else { + struct closure *parent = cl->parent; + closure_fn *destructor = cl->fn; + + closure_debug_destroy(cl); + + if (destructor) + destructor(cl); + + if (parent) + closure_put(parent); + } + } +} + +/* For clearing flags with the same atomic op as a put */ +void closure_sub(struct closure *cl, int v) +{ + closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); +} +EXPORT_SYMBOL(closure_sub); + +/* + * closure_put - decrement a closure's refcount + */ +void closure_put(struct closure *cl) +{ + closure_put_after_sub(cl, atomic_dec_return(&cl->remaining)); +} +EXPORT_SYMBOL(closure_put); + +/* + * closure_wake_up - wake up all closures on a wait list, without memory barrier + */ +void __closure_wake_up(struct closure_waitlist *wait_list) +{ + struct llist_node *list; + struct closure *cl, *t; + struct llist_node *reverse = NULL; + + list = llist_del_all(&wait_list->list); + + /* We first reverse the list to preserve FIFO ordering and fairness */ + reverse = llist_reverse_order(list); + + /* Then do the wakeups */ + llist_for_each_entry_safe(cl, t, reverse, list) { + closure_set_waiting(cl, 0); + closure_sub(cl, CLOSURE_WAITING + 1); + } +} +EXPORT_SYMBOL(__closure_wake_up); + +/** + * closure_wait - add a closure to a waitlist + * @waitlist: will own a ref on @cl, which will be released when + * closure_wake_up() is called on @waitlist. + * @cl: closure pointer. + * + */ +bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) +{ + if (atomic_read(&cl->remaining) & CLOSURE_WAITING) + return false; + + closure_set_waiting(cl, _RET_IP_); + atomic_add(CLOSURE_WAITING + 1, &cl->remaining); + llist_add(&cl->list, &waitlist->list); + + return true; +} +EXPORT_SYMBOL(closure_wait); + +struct closure_syncer { + struct task_struct *task; + int done; +}; + +static void closure_sync_fn(struct closure *cl) +{ + struct closure_syncer *s = cl->s; + struct task_struct *p; + + rcu_read_lock(); + p = READ_ONCE(s->task); + s->done = 1; + wake_up_process(p); + rcu_read_unlock(); +} + +void __sched __closure_sync(struct closure *cl) +{ + struct closure_syncer s = { .task = current }; + + cl->s = &s; + continue_at(cl, closure_sync_fn, NULL); + + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (s.done) + break; + schedule(); + } + + __set_current_state(TASK_RUNNING); +} +EXPORT_SYMBOL(__closure_sync); + +#ifdef CONFIG_DEBUG_CLOSURES + +static LIST_HEAD(closure_list); +static DEFINE_SPINLOCK(closure_list_lock); + +void closure_debug_create(struct closure *cl) +{ + unsigned long flags; + + BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE); + cl->magic = CLOSURE_MAGIC_ALIVE; + + spin_lock_irqsave(&closure_list_lock, flags); + list_add(&cl->all, &closure_list); + spin_unlock_irqrestore(&closure_list_lock, flags); +} +EXPORT_SYMBOL(closure_debug_create); + +void closure_debug_destroy(struct closure *cl) +{ + unsigned long flags; + + BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE); + cl->magic = CLOSURE_MAGIC_DEAD; + + spin_lock_irqsave(&closure_list_lock, flags); + list_del(&cl->all); + spin_unlock_irqrestore(&closure_list_lock, flags); +} +EXPORT_SYMBOL(closure_debug_destroy); + +static int debug_show(struct seq_file *f, void *data) +{ + struct closure *cl; + + spin_lock_irq(&closure_list_lock); + + list_for_each_entry(cl, &closure_list, all) { + int r = atomic_read(&cl->remaining); + + seq_printf(f, "%p: %pS -> %pS p %p r %i ", + cl, (void *) cl->ip, cl->fn, cl->parent, + r & CLOSURE_REMAINING_MASK); + + seq_printf(f, "%s%s\n", + test_bit(WORK_STRUCT_PENDING_BIT, + work_data_bits(&cl->work)) ? "Q" : "", + r & CLOSURE_RUNNING ? "R" : ""); + + if (r & CLOSURE_WAITING) + seq_printf(f, " W %pS\n", + (void *) cl->waiting_on); + + seq_puts(f, "\n"); + } + + spin_unlock_irq(&closure_list_lock); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(debug); + +static int __init closure_debug_init(void) +{ + debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops); + return 0; +} +late_initcall(closure_debug_init) + +#endif diff --git a/lib/cpumask.c b/lib/cpumask.c index a7fd02b5ae26..34335c1e7265 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -146,9 +146,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node) /* Wrap: we always want a cpu. */ i %= num_online_cpus(); - cpu = (node == NUMA_NO_NODE) ? - cpumask_nth(i, cpu_online_mask) : - sched_numa_find_nth_cpu(cpu_online_mask, i, node); + cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node); WARN_ON(cpu >= nr_cpu_ids); return cpu; diff --git a/lib/errname.c b/lib/errname.c index 67739b174a8c..dd1b998552cd 100644 --- a/lib/errname.c +++ b/lib/errname.c @@ -228,3 +228,4 @@ const char *errname(int err) return err > 0 ? name + 1 : name; } +EXPORT_SYMBOL(errname); diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c index f25eb111c051..41f1bcdc4488 100644 --- a/lib/generic-radix-tree.c +++ b/lib/generic-radix-tree.c @@ -1,4 +1,5 @@ +#include <linux/atomic.h> #include <linux/export.h> #include <linux/generic-radix-tree.h> #include <linux/gfp.h> @@ -166,6 +167,10 @@ void *__genradix_iter_peek(struct genradix_iter *iter, struct genradix_root *r; struct genradix_node *n; unsigned level, i; + + if (iter->offset == SIZE_MAX) + return NULL; + restart: r = READ_ONCE(radix->root); if (!r) @@ -184,10 +189,17 @@ restart: (GENRADIX_ARY - 1); while (!n->children[i]) { + size_t objs_per_ptr = genradix_depth_size(level); + + if (iter->offset + objs_per_ptr < iter->offset) { + iter->offset = SIZE_MAX; + iter->pos = SIZE_MAX; + return NULL; + } + i++; - iter->offset = round_down(iter->offset + - genradix_depth_size(level), - genradix_depth_size(level)); + iter->offset = round_down(iter->offset + objs_per_ptr, + objs_per_ptr); iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; if (i == GENRADIX_ARY) @@ -201,6 +213,64 @@ restart: } EXPORT_SYMBOL(__genradix_iter_peek); +void *__genradix_iter_peek_prev(struct genradix_iter *iter, + struct __genradix *radix, + size_t objs_per_page, + size_t obj_size_plus_page_remainder) +{ + struct genradix_root *r; + struct genradix_node *n; + unsigned level, i; + + if (iter->offset == SIZE_MAX) + return NULL; + +restart: + r = READ_ONCE(radix->root); + if (!r) + return NULL; + + n = genradix_root_to_node(r); + level = genradix_root_to_depth(r); + + if (ilog2(iter->offset) >= genradix_depth_shift(level)) { + iter->offset = genradix_depth_size(level); + iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + + iter->offset -= obj_size_plus_page_remainder; + iter->pos--; + } + + while (level) { + level--; + + i = (iter->offset >> genradix_depth_shift(level)) & + (GENRADIX_ARY - 1); + + while (!n->children[i]) { + size_t objs_per_ptr = genradix_depth_size(level); + + iter->offset = round_down(iter->offset, objs_per_ptr); + iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page; + + if (!iter->offset) + return NULL; + + iter->offset -= obj_size_plus_page_remainder; + iter->pos--; + + if (!i) + goto restart; + --i; + } + + n = n->children[i]; + } + + return &n->data[iter->offset & (PAGE_SIZE - 1)]; +} +EXPORT_SYMBOL(__genradix_iter_peek_prev); + static void genradix_free_recurse(struct genradix_node *n, unsigned level) { if (level) { diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 27234a820eeb..de7d11cf4c63 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-only -#include <crypto/hash.h> #include <linux/export.h> #include <linux/bvec.h> #include <linux/fault-inject-usercopy.h> @@ -10,192 +9,71 @@ #include <linux/vmalloc.h> #include <linux/splice.h> #include <linux/compat.h> -#include <net/checksum.h> #include <linux/scatterlist.h> #include <linux/instrumented.h> +#include <linux/iov_iter.h> -/* covers ubuf and kbuf alike */ -#define iterate_buf(i, n, base, len, off, __p, STEP) { \ - size_t __maybe_unused off = 0; \ - len = n; \ - base = __p + i->iov_offset; \ - len -= (STEP); \ - i->iov_offset += len; \ - n = len; \ -} - -/* covers iovec and kvec alike */ -#define iterate_iovec(i, n, base, len, off, __p, STEP) { \ - size_t off = 0; \ - size_t skip = i->iov_offset; \ - do { \ - len = min(n, __p->iov_len - skip); \ - if (likely(len)) { \ - base = __p->iov_base + skip; \ - len -= (STEP); \ - off += len; \ - skip += len; \ - n -= len; \ - if (skip < __p->iov_len) \ - break; \ - } \ - __p++; \ - skip = 0; \ - } while (n); \ - i->iov_offset = skip; \ - n = off; \ -} - -#define iterate_bvec(i, n, base, len, off, p, STEP) { \ - size_t off = 0; \ - unsigned skip = i->iov_offset; \ - while (n) { \ - unsigned offset = p->bv_offset + skip; \ - unsigned left; \ - void *kaddr = kmap_local_page(p->bv_page + \ - offset / PAGE_SIZE); \ - base = kaddr + offset % PAGE_SIZE; \ - len = min(min(n, (size_t)(p->bv_len - skip)), \ - (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ - left = (STEP); \ - kunmap_local(kaddr); \ - len -= left; \ - off += len; \ - skip += len; \ - if (skip == p->bv_len) { \ - skip = 0; \ - p++; \ - } \ - n -= len; \ - if (left) \ - break; \ - } \ - i->iov_offset = skip; \ - n = off; \ -} - -#define iterate_xarray(i, n, base, len, __off, STEP) { \ - __label__ __out; \ - size_t __off = 0; \ - struct folio *folio; \ - loff_t start = i->xarray_start + i->iov_offset; \ - pgoff_t index = start / PAGE_SIZE; \ - XA_STATE(xas, i->xarray, index); \ - \ - len = PAGE_SIZE - offset_in_page(start); \ - rcu_read_lock(); \ - xas_for_each(&xas, folio, ULONG_MAX) { \ - unsigned left; \ - size_t offset; \ - if (xas_retry(&xas, folio)) \ - continue; \ - if (WARN_ON(xa_is_value(folio))) \ - break; \ - if (WARN_ON(folio_test_hugetlb(folio))) \ - break; \ - offset = offset_in_folio(folio, start + __off); \ - while (offset < folio_size(folio)) { \ - base = kmap_local_folio(folio, offset); \ - len = min(n, len); \ - left = (STEP); \ - kunmap_local(base); \ - len -= left; \ - __off += len; \ - n -= len; \ - if (left || n == 0) \ - goto __out; \ - offset += len; \ - len = PAGE_SIZE; \ - } \ - } \ -__out: \ - rcu_read_unlock(); \ - i->iov_offset += __off; \ - n = __off; \ -} - -#define __iterate_and_advance(i, n, base, len, off, I, K) { \ - if (unlikely(i->count < n)) \ - n = i->count; \ - if (likely(n)) { \ - if (likely(iter_is_ubuf(i))) { \ - void __user *base; \ - size_t len; \ - iterate_buf(i, n, base, len, off, \ - i->ubuf, (I)) \ - } else if (likely(iter_is_iovec(i))) { \ - const struct iovec *iov = iter_iov(i); \ - void __user *base; \ - size_t len; \ - iterate_iovec(i, n, base, len, off, \ - iov, (I)) \ - i->nr_segs -= iov - iter_iov(i); \ - i->__iov = iov; \ - } else if (iov_iter_is_bvec(i)) { \ - const struct bio_vec *bvec = i->bvec; \ - void *base; \ - size_t len; \ - iterate_bvec(i, n, base, len, off, \ - bvec, (K)) \ - i->nr_segs -= bvec - i->bvec; \ - i->bvec = bvec; \ - } else if (iov_iter_is_kvec(i)) { \ - const struct kvec *kvec = i->kvec; \ - void *base; \ - size_t len; \ - iterate_iovec(i, n, base, len, off, \ - kvec, (K)) \ - i->nr_segs -= kvec - i->kvec; \ - i->kvec = kvec; \ - } else if (iov_iter_is_xarray(i)) { \ - void *base; \ - size_t len; \ - iterate_xarray(i, n, base, len, off, \ - (K)) \ - } \ - i->count -= n; \ - } \ -} -#define iterate_and_advance(i, n, base, len, off, I, K) \ - __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) - -static int copyout(void __user *to, const void *from, size_t n) +static __always_inline +size_t copy_to_user_iter(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) { if (should_fail_usercopy()) - return n; - if (access_ok(to, n)) { - instrument_copy_to_user(to, from, n); - n = raw_copy_to_user(to, from, n); + return len; + if (access_ok(iter_to, len)) { + from += progress; + instrument_copy_to_user(iter_to, from, len); + len = raw_copy_to_user(iter_to, from, len); } - return n; + return len; } -static int copyout_nofault(void __user *to, const void *from, size_t n) +static __always_inline +size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) { - long res; + ssize_t res; if (should_fail_usercopy()) - return n; - - res = copy_to_user_nofault(to, from, n); + return len; - return res < 0 ? n : res; + from += progress; + res = copy_to_user_nofault(iter_to, from, len); + return res < 0 ? len : res; } -static int copyin(void *to, const void __user *from, size_t n) +static __always_inline +size_t copy_from_user_iter(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) { - size_t res = n; + size_t res = len; if (should_fail_usercopy()) - return n; - if (access_ok(from, n)) { - instrument_copy_from_user_before(to, from, n); - res = raw_copy_from_user(to, from, n); - instrument_copy_from_user_after(to, from, n, res); + return len; + if (access_ok(iter_from, len)) { + to += progress; + instrument_copy_from_user_before(to, iter_from, len); + res = raw_copy_from_user(to, iter_from, len); + instrument_copy_from_user_after(to, iter_from, len, res); } return res; } +static __always_inline +size_t memcpy_to_iter(void *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + memcpy(iter_to, from + progress, len); + return 0; +} + +static __always_inline +size_t memcpy_from_iter(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + memcpy(to + progress, iter_from, len); + return 0; +} + /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator @@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, .iter_type = ITER_IOVEC, .copy_mc = false, .nofault = false, - .user_backed = true, .data_source = direction, .__iov = iov, .nr_segs = nr_segs, @@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -static __wsum csum_and_memcpy(void *to, const void *from, size_t len, - __wsum sum, size_t off) -{ - __wsum next = csum_partial_copy_nocheck(from, to, len); - return csum_block_add(sum, next, off); -} - size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; if (user_backed_iter(i)) might_fault(); - iterate_and_advance(i, bytes, base, len, off, - copyout(base, addr + off, len), - memcpy(base, addr + off, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, (void *)addr, + copy_to_user_iter, memcpy_to_iter); } EXPORT_SYMBOL(_copy_to_iter); #ifdef CONFIG_ARCH_HAS_COPY_MC -static int copyout_mc(void __user *to, const void *from, size_t n) -{ - if (access_ok(to, n)) { - instrument_copy_to_user(to, from, n); - n = copy_mc_to_user((__force void *) to, from, n); +static __always_inline +size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + if (access_ok(iter_to, len)) { + from += progress; + instrument_copy_to_user(iter_to, from, len); + len = copy_mc_to_user(iter_to, from, len); } - return n; + return len; +} + +static __always_inline +size_t memcpy_to_iter_mc(void *iter_to, size_t progress, + size_t len, void *from, void *priv2) +{ + return copy_mc_to_kernel(iter_to, from + progress, len); } /** @@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return 0; if (user_backed_iter(i)) might_fault(); - __iterate_and_advance(i, bytes, base, len, off, - copyout_mc(base, addr + off, len), - copy_mc_to_kernel(base, addr + off, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, (void *)addr, + copy_to_user_iter_mc, memcpy_to_iter_mc); } EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ -static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from, - size_t size) +static __always_inline +size_t memcpy_from_iter_mc(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return copy_mc_to_kernel(to + progress, iter_from, len); +} + +static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i) +{ + if (unlikely(i->count < bytes)) + bytes = i->count; + if (unlikely(!bytes)) + return 0; + return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc); +} + +static __always_inline +size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (iov_iter_is_copy_mc(i)) - return (void *)copy_mc_to_kernel(to, from, size); - return memcpy(to, from, size); + if (unlikely(iov_iter_is_copy_mc(i))) + return __copy_from_iter_mc(addr, bytes, i); + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter, memcpy_from_iter); } size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) @@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) if (user_backed_iter(i)) might_fault(); - iterate_and_advance(i, bytes, base, len, off, - copyin(addr + off, base, len), - memcpy_from_iter(i, addr + off, base, len) - ) - - return bytes; + return __copy_from_iter(addr, bytes, i); } EXPORT_SYMBOL(_copy_from_iter); +static __always_inline +size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); +} + size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) return 0; - iterate_and_advance(i, bytes, base, len, off, - __copy_from_user_inatomic_nocache(addr + off, base, len), - memcpy(addr + off, base, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter_nocache, + memcpy_from_iter); } EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +static __always_inline +size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + return __copy_from_user_flushcache(to + progress, iter_from, len); +} + +static __always_inline +size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress, + size_t len, void *to, void *priv2) +{ + memcpy_flushcache(to + progress, iter_from, len); + return 0; +} + /** * _copy_from_iter_flushcache - write destination through cpu cache * @addr: destination kernel address @@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) if (WARN_ON_ONCE(!i->data_source)) return 0; - iterate_and_advance(i, bytes, base, len, off, - __copy_from_user_flushcache(addr + off, base, len), - memcpy_flushcache(addr + off, base, len) - ) - - return bytes; + return iterate_and_advance(i, bytes, addr, + copy_from_user_iter_flushcache, + memcpy_from_iter_flushcache); } EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); #endif @@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte void *kaddr = kmap_local_page(page); size_t n = min(bytes, (size_t)PAGE_SIZE - offset); - iterate_and_advance(i, n, base, len, off, - copyout_nofault(base, kaddr + offset + off, len), - memcpy(base, kaddr + offset + off, len) - ) + n = iterate_and_advance(i, bytes, kaddr, + copy_to_user_iter_nofault, + memcpy_to_iter); kunmap_local(kaddr); res += n; bytes -= n; @@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_from_iter); -size_t iov_iter_zero(size_t bytes, struct iov_iter *i) +static __always_inline +size_t zero_to_user_iter(void __user *iter_to, size_t progress, + size_t len, void *priv, void *priv2) { - iterate_and_advance(i, bytes, base, len, count, - clear_user(base, len), - memset(base, 0, len) - ) + return clear_user(iter_to, len); +} + +static __always_inline +size_t zero_to_iter(void *iter_to, size_t progress, + size_t len, void *priv, void *priv2) +{ + memset(iter_to, 0, len); + return 0; +} - return bytes; +size_t iov_iter_zero(size_t bytes, struct iov_iter *i) +{ + return iterate_and_advance(i, bytes, NULL, + zero_to_user_iter, zero_to_iter); } EXPORT_SYMBOL(iov_iter_zero); @@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset, } p = kmap_atomic(page) + offset; - iterate_and_advance(i, n, base, len, off, - copyin(p + off, base, len), - memcpy_from_iter(i, p + off, base, len) - ) + n = __copy_from_iter(p, n, i); kunmap_atomic(p); copied += n; offset += n; @@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, } EXPORT_SYMBOL(iov_iter_get_pages_alloc2); -size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, - struct iov_iter *i) -{ - __wsum sum, next; - sum = *csum; - if (WARN_ON_ONCE(!i->data_source)) - return 0; - - iterate_and_advance(i, bytes, base, len, off, ({ - next = csum_and_copy_from_user(base, addr + off, len); - sum = csum_block_add(sum, next, off); - next ? 0 : len; - }), ({ - sum = csum_and_memcpy(addr + off, base, len, sum, off); - }) - ) - *csum = sum; - return bytes; -} -EXPORT_SYMBOL(csum_and_copy_from_iter); - -size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, - struct iov_iter *i) -{ - struct csum_state *csstate = _csstate; - __wsum sum, next; - - if (WARN_ON_ONCE(i->data_source)) - return 0; - if (unlikely(iov_iter_is_discard(i))) { - // can't use csum_memcpy() for that one - data is not copied - csstate->csum = csum_block_add(csstate->csum, - csum_partial(addr, bytes, 0), - csstate->off); - csstate->off += bytes; - return bytes; - } - - sum = csum_shift(csstate->csum, csstate->off); - iterate_and_advance(i, bytes, base, len, off, ({ - next = csum_and_copy_to_user(addr + off, base, len); - sum = csum_block_add(sum, next, off); - next ? 0 : len; - }), ({ - sum = csum_and_memcpy(base, addr + off, len, sum, off); - }) - ) - csstate->csum = csum_shift(sum, csstate->off); - csstate->off += bytes; - return bytes; -} -EXPORT_SYMBOL(csum_and_copy_to_iter); - -size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, - struct iov_iter *i) -{ -#ifdef CONFIG_CRYPTO_HASH - struct ahash_request *hash = hashp; - struct scatterlist sg; - size_t copied; - - copied = copy_to_iter(addr, bytes, i); - sg_init_one(&sg, addr, copied); - ahash_request_set_crypt(hash, &sg, NULL, copied); - crypto_ahash_update(hash); - return copied; -#else - return 0; -#endif -} -EXPORT_SYMBOL(hash_and_copy_to_iter); - static int iov_npages(const struct iov_iter *i, int maxpages) { size_t skip = i->iov_offset, size = i->count; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 7c44b7ae4c5c..fb9a2f06dd1e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -254,10 +254,10 @@ static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem) int buffer_size = sizeof(env->buf) - env->buflen; int len; - len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size); - if (len >= buffer_size) { - pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n", - buffer_size, len); + len = strscpy(&env->buf[env->buflen], subsystem, buffer_size); + if (len < 0) { + pr_warn("%s: insufficient buffer space (%u left) for %s\n", + __func__, buffer_size, subsystem); return -ENOMEM; } diff --git a/lib/llist.c b/lib/llist.c index 6e668fa5a2c6..f21d0cfbbaaa 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -66,6 +66,34 @@ struct llist_node *llist_del_first(struct llist_head *head) EXPORT_SYMBOL_GPL(llist_del_first); /** + * llist_del_first_this - delete given entry of lock-less list if it is first + * @head: the head for your lock-less list + * @this: a list entry. + * + * If head of the list is given entry, delete and return %true else + * return %false. + * + * Multiple callers can safely call this concurrently with multiple + * llist_add() callers, providing all the callers offer a different @this. + */ +bool llist_del_first_this(struct llist_head *head, + struct llist_node *this) +{ + struct llist_node *entry, *next; + + /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */ + entry = smp_load_acquire(&head->first); + do { + if (entry != this) + return false; + next = READ_ONCE(entry->next); + } while (!try_cmpxchg(&head->first, &entry, next)); + + return true; +} +EXPORT_SYMBOL_GPL(llist_del_first_this); + +/** * llist_reverse_order - reverse order of a llist chain * @head: first item of the list to be reversed * diff --git a/lib/lwq.c b/lib/lwq.c new file mode 100644 index 000000000000..57d080a4d53d --- /dev/null +++ b/lib/lwq.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Light-weight single-linked queue. + * + * Entries are enqueued to the head of an llist, with no blocking. + * This can happen in any context. + * + * Entries are dequeued using a spinlock to protect against multiple + * access. The llist is staged in reverse order, and refreshed + * from the llist when it exhausts. + * + * This is particularly suitable when work items are queued in BH or + * IRQ context, and where work items are handled one at a time by + * dedicated threads. + */ +#include <linux/rcupdate.h> +#include <linux/lwq.h> + +struct llist_node *__lwq_dequeue(struct lwq *q) +{ + struct llist_node *this; + + if (lwq_empty(q)) + return NULL; + spin_lock(&q->lock); + this = q->ready; + if (!this && !llist_empty(&q->new)) { + /* ensure queue doesn't appear transiently lwq_empty */ + smp_store_release(&q->ready, (void *)1); + this = llist_reverse_order(llist_del_all(&q->new)); + if (!this) + q->ready = NULL; + } + if (this) + q->ready = llist_next(this); + spin_unlock(&q->lock); + return this; +} +EXPORT_SYMBOL_GPL(__lwq_dequeue); + +/** + * lwq_dequeue_all - dequeue all currently enqueued objects + * @q: the queue to dequeue from + * + * Remove and return a linked list of llist_nodes of all the objects that were + * in the queue. The first on the list will be the object that was least + * recently enqueued. + */ +struct llist_node *lwq_dequeue_all(struct lwq *q) +{ + struct llist_node *r, *t, **ep; + + if (lwq_empty(q)) + return NULL; + + spin_lock(&q->lock); + r = q->ready; + q->ready = NULL; + t = llist_del_all(&q->new); + spin_unlock(&q->lock); + ep = &r; + while (*ep) + ep = &(*ep)->next; + *ep = llist_reverse_order(t); + return r; +} +EXPORT_SYMBOL_GPL(lwq_dequeue_all); + +#if IS_ENABLED(CONFIG_LWQ_TEST) + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait_bit.h> +#include <linux/kthread.h> +#include <linux/delay.h> +struct tnode { + struct lwq_node n; + int i; + int c; +}; + +static int lwq_exercise(void *qv) +{ + struct lwq *q = qv; + int cnt; + struct tnode *t; + + for (cnt = 0; cnt < 10000; cnt++) { + wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL); + t->c++; + if (lwq_enqueue(&t->n, q)) + wake_up_var(q); + } + while (!kthread_should_stop()) + schedule_timeout_idle(1); + return 0; +} + +static int lwq_test(void) +{ + int i; + struct lwq q; + struct llist_node *l, **t1, *t2; + struct tnode *t; + struct task_struct *threads[8]; + + printk(KERN_INFO "testing lwq....\n"); + lwq_init(&q); + printk(KERN_INFO " lwq: run some threads\n"); + for (i = 0; i < ARRAY_SIZE(threads); i++) + threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i); + for (i = 0; i < 100; i++) { + t = kmalloc(sizeof(*t), GFP_KERNEL); + if (!t) + break; + t->i = i; + t->c = 0; + if (lwq_enqueue(&t->n, &q)) + wake_up_var(&q); + } + /* wait for threads to exit */ + for (i = 0; i < ARRAY_SIZE(threads); i++) + if (!IS_ERR_OR_NULL(threads[i])) + kthread_stop(threads[i]); + printk(KERN_INFO " lwq: dequeue first 50:"); + for (i = 0; i < 50 ; i++) { + if (i && (i % 10) == 0) { + printk(KERN_CONT "\n"); + printk(KERN_INFO " lwq: ... "); + } + t = lwq_dequeue(&q, struct tnode, n); + if (t) + printk(KERN_CONT " %d(%d)", t->i, t->c); + kfree(t); + } + printk(KERN_CONT "\n"); + l = lwq_dequeue_all(&q); + printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n"); + lwq_for_each_safe(t, t1, t2, &l, n) { + if ((t->i % 3) == 0) { + t->i = -1; + kfree(t); + t = NULL; + } + } + if (l) + lwq_enqueue_batch(l, &q); + printk(KERN_INFO " lwq: dequeue remaining:"); + while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) { + printk(KERN_CONT " %d", t->i); + kfree(t); + } + printk(KERN_CONT "\n"); + return 0; +} + +module_init(lwq_test); +#endif /* CONFIG_LWQ_TEST*/ diff --git a/lib/rcuref.c b/lib/rcuref.c index 5ec00a4a64d1..97f300eca927 100644 --- a/lib/rcuref.c +++ b/lib/rcuref.c @@ -248,7 +248,7 @@ bool rcuref_put_slowpath(rcuref_t *ref) * require a retry. If this fails the caller is not * allowed to deconstruct the object. */ - if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF) + if (!atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD)) return false; /* diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 9982344cca34..7713f73e66b0 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -31,9 +31,11 @@ * giving the size in the required units. @buf should have room for * at least 9 bytes and will always be zero terminated. * + * Return value: number of characters of output that would have been written + * (which may be greater than len, if output was truncated). */ -void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, - char *buf, int len) +int string_get_size(u64 size, u64 blk_size, const enum string_size_units units, + char *buf, int len) { static const char *const units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" @@ -126,8 +128,8 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, else unit = units_str[units][i]; - snprintf(buf, len, "%u%s %s", (u32)size, - tmp, unit); + return snprintf(buf, len, "%u%s %s", (u32)size, + tmp, unit); } EXPORT_SYMBOL(string_get_size); |