diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/dma-noop.c | 21 | ||||
-rw-r--r-- | lib/dma-virt.c | 12 | ||||
-rw-r--r-- | lib/errseq.c | 208 | ||||
-rw-r--r-- | lib/flex_proportions.c | 6 | ||||
-rw-r--r-- | lib/iov_iter.c | 116 | ||||
-rw-r--r-- | lib/percpu_counter.c | 4 | ||||
-rw-r--r-- | lib/strnlen_user.c | 34 | ||||
-rw-r--r-- | lib/usercopy.c | 10 | ||||
-rw-r--r-- | lib/vsprintf.c | 136 |
11 files changed, 456 insertions, 96 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index d2fd2623721e..6762529ad9e4 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -556,6 +556,9 @@ config ARCH_HAS_SG_CHAIN config ARCH_HAS_PMEM_API bool +config ARCH_HAS_UACCESS_FLUSHCACHE + bool + config ARCH_HAS_MMIO_FLUSH bool diff --git a/lib/Makefile b/lib/Makefile index 7fb6ab799b8e..5a008329324e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -38,7 +38,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ - once.o refcount.o usercopy.o + once.o refcount.o usercopy.o errseq.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += hexdump.o diff --git a/lib/dma-noop.c b/lib/dma-noop.c index de26c8b68f34..acc4190e2731 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> +#include <linux/pfn.h> static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size, ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) - *dma_handle = virt_to_phys(ret); + *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + return ret; } @@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return page_to_phys(page) + offset; + return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, @@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { + dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); - sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; sg_dma_len(sg) = sg->length; } return nents; } -static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static int dma_noop_supported(struct device *dev, u64 mask) -{ - return 1; -} - const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, - .mapping_error = dma_noop_mapping_error, - .dma_supported = dma_noop_supported, }; EXPORT_SYMBOL(dma_noop_ops); diff --git a/lib/dma-virt.c b/lib/dma-virt.c index dcd4df1f7174..5c4f11329721 100644 --- a/lib/dma-virt.c +++ b/lib/dma-virt.c @@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} - -static int dma_virt_supported(struct device *dev, u64 mask) -{ - return true; -} - const struct dma_map_ops dma_virt_ops = { .alloc = dma_virt_alloc, .free = dma_virt_free, .map_page = dma_virt_map_page, .map_sg = dma_virt_map_sg, - .mapping_error = dma_virt_mapping_error, - .dma_supported = dma_virt_supported, }; EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/errseq.c b/lib/errseq.c new file mode 100644 index 000000000000..841fa24e6e00 --- /dev/null +++ b/lib/errseq.c @@ -0,0 +1,208 @@ +#include <linux/err.h> +#include <linux/bug.h> +#include <linux/atomic.h> +#include <linux/errseq.h> + +/* + * An errseq_t is a way of recording errors in one place, and allowing any + * number of "subscribers" to tell whether it has changed since a previous + * point where it was sampled. + * + * It's implemented as an unsigned 32-bit value. The low order bits are + * designated to hold an error code (between 0 and -MAX_ERRNO). The upper bits + * are used as a counter. This is done with atomics instead of locking so that + * these functions can be called from any context. + * + * The general idea is for consumers to sample an errseq_t value. That value + * can later be used to tell whether any new errors have occurred since that + * sampling was done. + * + * Note that there is a risk of collisions if new errors are being recorded + * frequently, since we have so few bits to use as a counter. + * + * To mitigate this, one bit is used as a flag to tell whether the value has + * been sampled since a new value was recorded. That allows us to avoid bumping + * the counter if no one has sampled it since the last time an error was + * recorded. + * + * A new errseq_t should always be zeroed out. A errseq_t value of all zeroes + * is the special (but common) case where there has never been an error. An all + * zero value thus serves as the "epoch" if one wishes to know whether there + * has ever been an error set since it was first initialized. + */ + +/* The low bits are designated for error code (max of MAX_ERRNO) */ +#define ERRSEQ_SHIFT ilog2(MAX_ERRNO + 1) + +/* This bit is used as a flag to indicate whether the value has been seen */ +#define ERRSEQ_SEEN (1 << ERRSEQ_SHIFT) + +/* The lowest bit of the counter */ +#define ERRSEQ_CTR_INC (1 << (ERRSEQ_SHIFT + 1)) + +/** + * __errseq_set - set a errseq_t for later reporting + * @eseq: errseq_t field that should be set + * @err: error to set + * + * This function sets the error in *eseq, and increments the sequence counter + * if the last sequence was sampled at some point in the past. + * + * Any error set will always overwrite an existing error. + * + * Most callers will want to use the errseq_set inline wrapper to efficiently + * handle the common case where err is 0. + * + * We do return an errseq_t here, primarily for debugging purposes. The return + * value should not be used as a previously sampled value in later calls as it + * will not have the SEEN flag set. + */ +errseq_t __errseq_set(errseq_t *eseq, int err) +{ + errseq_t cur, old; + + /* MAX_ERRNO must be able to serve as a mask */ + BUILD_BUG_ON_NOT_POWER_OF_2(MAX_ERRNO + 1); + + /* + * Ensure the error code actually fits where we want it to go. If it + * doesn't then just throw a warning and don't record anything. We + * also don't accept zero here as that would effectively clear a + * previous error. + */ + old = READ_ONCE(*eseq); + + if (WARN(unlikely(err == 0 || (unsigned int)-err > MAX_ERRNO), + "err = %d\n", err)) + return old; + + for (;;) { + errseq_t new; + + /* Clear out error bits and set new error */ + new = (old & ~(MAX_ERRNO|ERRSEQ_SEEN)) | -err; + + /* Only increment if someone has looked at it */ + if (old & ERRSEQ_SEEN) + new += ERRSEQ_CTR_INC; + + /* If there would be no change, then call it done */ + if (new == old) { + cur = new; + break; + } + + /* Try to swap the new value into place */ + cur = cmpxchg(eseq, old, new); + + /* + * Call it success if we did the swap or someone else beat us + * to it for the same value. + */ + if (likely(cur == old || cur == new)) + break; + + /* Raced with an update, try again */ + old = cur; + } + return cur; +} +EXPORT_SYMBOL(__errseq_set); + +/** + * errseq_sample - grab current errseq_t value + * @eseq: pointer to errseq_t to be sampled + * + * This function allows callers to sample an errseq_t value, marking it as + * "seen" if required. + */ +errseq_t errseq_sample(errseq_t *eseq) +{ + errseq_t old = READ_ONCE(*eseq); + errseq_t new = old; + + /* + * For the common case of no errors ever having been set, we can skip + * marking the SEEN bit. Once an error has been set, the value will + * never go back to zero. + */ + if (old != 0) { + new |= ERRSEQ_SEEN; + if (old != new) + cmpxchg(eseq, old, new); + } + return new; +} +EXPORT_SYMBOL(errseq_sample); + +/** + * errseq_check - has an error occurred since a particular sample point? + * @eseq: pointer to errseq_t value to be checked + * @since: previously-sampled errseq_t from which to check + * + * Grab the value that eseq points to, and see if it has changed "since" + * the given value was sampled. The "since" value is not advanced, so there + * is no need to mark the value as seen. + * + * Returns the latest error set in the errseq_t or 0 if it hasn't changed. + */ +int errseq_check(errseq_t *eseq, errseq_t since) +{ + errseq_t cur = READ_ONCE(*eseq); + + if (likely(cur == since)) + return 0; + return -(cur & MAX_ERRNO); +} +EXPORT_SYMBOL(errseq_check); + +/** + * errseq_check_and_advance - check an errseq_t and advance to current value + * @eseq: pointer to value being checked and reported + * @since: pointer to previously-sampled errseq_t to check against and advance + * + * Grab the eseq value, and see whether it matches the value that "since" + * points to. If it does, then just return 0. + * + * If it doesn't, then the value has changed. Set the "seen" flag, and try to + * swap it into place as the new eseq value. Then, set that value as the new + * "since" value, and return whatever the error portion is set to. + * + * Note that no locking is provided here for concurrent updates to the "since" + * value. The caller must provide that if necessary. Because of this, callers + * may want to do a lockless errseq_check before taking the lock and calling + * this. + */ +int errseq_check_and_advance(errseq_t *eseq, errseq_t *since) +{ + int err = 0; + errseq_t old, new; + + /* + * Most callers will want to use the inline wrapper to check this, + * so that the common case of no error is handled without needing + * to take the lock that protects the "since" value. + */ + old = READ_ONCE(*eseq); + if (old != *since) { + /* + * Set the flag and try to swap it into place if it has + * changed. + * + * We don't care about the outcome of the swap here. If the + * swap doesn't occur, then it has either been updated by a + * writer who is altering the value in some way (updating + * counter or resetting the error), or another reader who is + * just setting the "seen" flag. Either outcome is OK, and we + * can advance "since" and return an error based on what we + * have. + */ + new = old | ERRSEQ_SEEN; + if (new != old) + cmpxchg(eseq, old, new); + *since = new; + err = -(new & MAX_ERRNO); + } + return err; +} +EXPORT_SYMBOL(errseq_check_and_advance); diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); - __percpu_counter_add(&pl->events, + percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, return; } else fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f835964c9485..52c8dd6d8e82 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -130,6 +130,24 @@ } \ } +static int copyout(void __user *to, const void *from, size_t n) +{ + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); + n = raw_copy_to_user(to, from, n); + } + return n; +} + +static int copyin(void *to, const void __user *from, size_t n) +{ + if (access_ok(VERIFY_READ, from, n)) { + kasan_check_write(to, n); + n = raw_copy_from_user(to, from, n); + } + return n; +} + static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { @@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b from = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b kaddr = kmap(page); from = kaddr + offset; - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t to = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t kaddr = kmap(page); to = kaddr + offset; - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter(addr, bytes, i); + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, - v.iov_len), + copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), memcpy_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) @@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_to_iter); +EXPORT_SYMBOL(_copy_to_iter); -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len), + copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) @@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter); +EXPORT_SYMBOL(_copy_from_iter); -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; + if (iter_is_iovec(i)) + might_fault(); iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user((to += v.iov_len) - v.iov_len, + if (copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), @@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full); +EXPORT_SYMBOL(_copy_from_iter_full); -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -613,9 +637,31 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter_nocache); +EXPORT_SYMBOL(_copy_from_iter_nocache); -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +{ + char *to = addr; + if (unlikely(i->type & ITER_PIPE)) { + WARN_ON(1); + return 0; + } + iterate_and_advance(i, bytes, v, + __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len), + memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len), + memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, + v.iov_len) + ) + + return bytes; +} +EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); +#endif + +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -637,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full_nocache); +EXPORT_SYMBOL(_copy_from_iter_full_nocache); + +static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) +{ + size_t v = n + offset; + if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) + return true; + WARN_ON(1); + return false; +} size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); @@ -657,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); - size_t wanted = copy_from_iter(kaddr + offset, bytes, i); + size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; } else @@ -700,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) if (unlikely(i->type & ITER_PIPE)) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, - __clear_user(v.iov_base, v.iov_len), + clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) ) @@ -713,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr = kmap_atomic(page), *p = kaddr + offset; + if (unlikely(!page_copy_sane(page, offset, bytes))) { + kunmap_atomic(kaddr); + return 0; + } if (unlikely(i->type & ITER_PIPE)) { kunmap_atomic(kaddr); WARN_ON(1); return 0; } iterate_all_kinds(i, bytes, v, - __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, - v.iov_base, v.iov_len), + copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..8ee7e5ec21be 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; @@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } preempt_enable(); } -EXPORT_SYMBOL(__percpu_counter_add); +EXPORT_SYMBOL(percpu_counter_add_batch); /* * Add up all the per-cpu counts, return the result. This is a more accurate diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 8e105ed4df12..a5f567747ced 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count) return 0; } EXPORT_SYMBOL(strnlen_user); - -/** - * strlen_user: - Get the size of a user string INCLUDING final NUL. - * @str: The string to measure. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -long strlen_user(const char __user *str) -{ - unsigned long max_addr, src_addr; - - max_addr = user_addr_max(); - src_addr = (unsigned long)str; - if (likely(src_addr < max_addr)) { - unsigned long max = max_addr - src_addr; - long retval; - - user_access_begin(); - retval = do_strnlen_user(str, ~0ul, max); - user_access_end(); - return retval; - } - return 0; -} -EXPORT_SYMBOL(strlen_user); diff --git a/lib/usercopy.c b/lib/usercopy.c index 1b6010a3beb8..f5d9f08ee032 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -6,8 +6,11 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) { + kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); + } if (unlikely(res)) memset(to + (n - res), 0, res); return res; @@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user); #ifndef INLINE_COPY_TO_USER unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) { - if (likely(access_ok(VERIFY_WRITE, to, n))) + might_fault(); + if (likely(access_ok(VERIFY_WRITE, to, n))) { + kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); + } return n; } EXPORT_SYMBOL(_copy_to_user); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 9f37d6208e99..86c3385b9eb3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -31,6 +31,7 @@ #include <linux/dcache.h> #include <linux/cred.h> #include <linux/uuid.h> +#include <linux/of.h> #include <net/addrconf.h> #ifdef CONFIG_BLOCK #include <linux/blkdev.h> @@ -1470,6 +1471,126 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt) return format_flags(buf, end, flags, names); } +static const char *device_node_name_for_depth(const struct device_node *np, int depth) +{ + for ( ; np && depth; depth--) + np = np->parent; + + return kbasename(np->full_name); +} + +static noinline_for_stack +char *device_node_gen_full_name(const struct device_node *np, char *buf, char *end) +{ + int depth; + const struct device_node *parent = np->parent; + static const struct printf_spec strspec = { + .field_width = -1, + .precision = -1, + }; + + /* special case for root node */ + if (!parent) + return string(buf, end, "/", strspec); + + for (depth = 0; parent->parent; depth++) + parent = parent->parent; + + for ( ; depth >= 0; depth--) { + buf = string(buf, end, "/", strspec); + buf = string(buf, end, device_node_name_for_depth(np, depth), + strspec); + } + return buf; +} + +static noinline_for_stack +char *device_node_string(char *buf, char *end, struct device_node *dn, + struct printf_spec spec, const char *fmt) +{ + char tbuf[sizeof("xxxx") + 1]; + const char *p; + int ret; + char *buf_start = buf; + struct property *prop; + bool has_mult, pass; + static const struct printf_spec num_spec = { + .flags = SMALL, + .field_width = -1, + .precision = -1, + .base = 10, + }; + + struct printf_spec str_spec = spec; + str_spec.field_width = -1; + + if (!IS_ENABLED(CONFIG_OF)) + return string(buf, end, "(!OF)", spec); + + if ((unsigned long)dn < PAGE_SIZE) + return string(buf, end, "(null)", spec); + + /* simple case without anything any more format specifiers */ + fmt++; + if (fmt[0] == '\0' || strcspn(fmt,"fnpPFcC") > 0) + fmt = "f"; + + for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { + if (pass) { + if (buf < end) + *buf = ':'; + buf++; + } + + switch (*fmt) { + case 'f': /* full_name */ + buf = device_node_gen_full_name(dn, buf, end); + break; + case 'n': /* name */ + buf = string(buf, end, dn->name, str_spec); + break; + case 'p': /* phandle */ + buf = number(buf, end, (unsigned int)dn->phandle, num_spec); + break; + case 'P': /* path-spec */ + p = kbasename(of_node_full_name(dn)); + if (!p[1]) + p = "/"; + buf = string(buf, end, p, str_spec); + break; + case 'F': /* flags */ + tbuf[0] = of_node_check_flag(dn, OF_DYNAMIC) ? 'D' : '-'; + tbuf[1] = of_node_check_flag(dn, OF_DETACHED) ? 'd' : '-'; + tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-'; + tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-'; + tbuf[4] = 0; + buf = string(buf, end, tbuf, str_spec); + break; + case 'c': /* major compatible string */ + ret = of_property_read_string(dn, "compatible", &p); + if (!ret) + buf = string(buf, end, p, str_spec); + break; + case 'C': /* full compatible string */ + has_mult = false; + of_property_for_each_string(dn, "compatible", prop, p) { + if (has_mult) + buf = string(buf, end, ",", str_spec); + buf = string(buf, end, "\"", str_spec); + buf = string(buf, end, p, str_spec); + buf = string(buf, end, "\"", str_spec); + + has_mult = true; + } + break; + default: + break; + } + } + + return widen_string(buf, buf - buf_start, end, spec); +} + int kptr_restrict __read_mostly; /* @@ -1566,6 +1687,16 @@ int kptr_restrict __read_mostly; * p page flags (see struct page) given as pointer to unsigned long * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t * v vma flags (VM_*) given as pointer to unsigned long + * - 'O' For a kobject based struct. Must be one of the following: + * - 'OF[fnpPcCF]' For a device tree object + * Without any optional arguments prints the full_name + * f device node full_name + * n device node name + * p device node phandle + * P device node path spec (name + @unit) + * F device node flags + * c major compatible string + * C full compatible string * * ** Please update also Documentation/printk-formats.txt when making changes ** * @@ -1721,6 +1852,11 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, case 'G': return flags_string(buf, end, ptr, fmt); + case 'O': + switch (fmt[1]) { + case 'F': + return device_node_string(buf, end, ptr, spec, fmt + 1); + } } spec.flags |= SMALL; if (spec.field_width == -1) { |