diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-02-09 12:05:25 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-02-09 12:25:40 -0800 |
commit | 8697a258ae24703267d2a37d91ab757c91ef027e (patch) | |
tree | 7a1798898ac64c72e6cf77d2320f036c963a16c1 /lib | |
parent | 0b34d68049b09821499b93d50b5a9d7d2ca449f6 (diff) | |
parent | 35674e787518768626d3a0ffce1c13a7eeed922d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
net/devlink/leftover.c / net/core/devlink.c:
565b4824c39f ("devlink: change port event netdev notifier from per-net to global")
f05bd8ebeb69 ("devlink: move code to a dedicated directory")
687125b5799c ("devlink: split out core code")
https://lore.kernel.org/all/20230208094657.379f2b1a@canb.auug.org.au/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 3 | ||||
-rw-r--r-- | lib/dec_and_lock.c | 31 | ||||
-rw-r--r-- | lib/maple_tree.c | 22 | ||||
-rw-r--r-- | lib/test_maple_tree.c | 89 |
4 files changed, 133 insertions, 12 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 36c34c9909fa..3509115a0e85 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -763,6 +763,7 @@ config DEBUG_KMEMLEAK select KALLSYMS select CRC32 select STACKDEPOT + select STACKDEPOT_ALWAYS_INIT if !DEBUG_KMEMLEAK_DEFAULT_OFF help Say Y here if you want to enable the memory leak detector. The memory allocation/freeing is traced in a way @@ -1216,7 +1217,7 @@ config SCHED_DEBUG depends on DEBUG_KERNEL && PROC_FS default y help - If you say Y here, the /proc/sched_debug file will be provided + If you say Y here, the /sys/kernel/debug/sched file will be provided that can help debug the scheduler. The runtime overhead of this option is minimal. diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c index 9555b68bb774..1dcca8f2e194 100644 --- a/lib/dec_and_lock.c +++ b/lib/dec_and_lock.c @@ -49,3 +49,34 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, return 0; } EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); + +int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + + /* Otherwise do it the slow way */ + raw_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + raw_spin_unlock(lock); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_raw_lock); + +int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, + unsigned long *flags) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + + /* Otherwise do it the slow way */ + raw_spin_lock_irqsave(lock, *flags); + if (atomic_dec_and_test(atomic)) + return 1; + raw_spin_unlock_irqrestore(lock, *flags); + return 0; +} +EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 26e2045d3cda..5a976393c9ae 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -670,12 +670,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn, unsigned char piv) { struct maple_node *node = mte_to_node(mn); + enum maple_type type = mte_node_type(mn); - if (piv >= mt_pivots[piv]) { + if (piv >= mt_pivots[type]) { WARN_ON(1); return 0; } - switch (mte_node_type(mn)) { + switch (type) { case maple_arange_64: return node->ma64.pivot[piv]; case maple_range_64: @@ -4887,7 +4888,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) unsigned long *pivots, *gaps; void __rcu **slots; unsigned long gap = 0; - unsigned long max, min, index; + unsigned long max, min; unsigned char offset; if (unlikely(mas_is_err(mas))) @@ -4909,8 +4910,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) min = mas_safe_min(mas, pivots, --offset); max = mas_safe_pivot(mas, pivots, offset, type); - index = mas->index; - while (index <= max) { + while (mas->index <= max) { gap = 0; if (gaps) gap = gaps[offset]; @@ -4941,10 +4941,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) min = mas_safe_min(mas, pivots, offset); } - if (unlikely(index > max)) { - mas_set_err(mas, -EBUSY); - return false; - } + if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) + goto no_space; if (unlikely(ma_is_leaf(type))) { mas->offset = offset; @@ -4961,9 +4959,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) return false; ascend: - if (mte_is_root(mas->node)) - mas_set_err(mas, -EBUSY); + if (!mte_is_root(mas->node)) + return false; +no_space: + mas_set_err(mas, -EBUSY); return false; } diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 497fc93ccf9e..ec847bf4dcb4 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -2517,6 +2517,91 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt) mt_set_non_kernel(0); } +static noinline void check_empty_area_window(struct maple_tree *mt) +{ + unsigned long i, nr_entries = 20; + MA_STATE(mas, mt, 0, 0); + + for (i = 1; i <= nr_entries; i++) + mtree_store_range(mt, i*10, i*10 + 9, + xa_mk_value(i), GFP_KERNEL); + + /* Create another hole besides the one at 0 */ + mtree_store_range(mt, 160, 169, NULL, GFP_KERNEL); + + /* Check lower bounds that don't fit */ + rcu_read_lock(); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 10) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 6, 90, 5) != -EBUSY); + + /* Check lower bound that does fit */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 90, 5) != 0); + MT_BUG_ON(mt, mas.index != 5); + MT_BUG_ON(mt, mas.last != 9); + rcu_read_unlock(); + + /* Check one gap that doesn't fit and one that does */ + rcu_read_lock(); + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 5, 217, 9) != 0); + MT_BUG_ON(mt, mas.index != 161); + MT_BUG_ON(mt, mas.last != 169); + + /* Check one gap that does fit above the min */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 3) != 0); + MT_BUG_ON(mt, mas.index != 216); + MT_BUG_ON(mt, mas.last != 218); + + /* Check size that doesn't fit any gap */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 218, 16) != -EBUSY); + + /* + * Check size that doesn't fit the lower end of the window but + * does fit the gap + */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 167, 200, 4) != -EBUSY); + + /* + * Check size that doesn't fit the upper end of the window but + * does fit the gap + */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area_rev(&mas, 100, 162, 4) != -EBUSY); + + /* Check mas_empty_area forward */ + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 9) != 0); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 8); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 4) != 0); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 3); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 100, 11) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY); + + mas_reset(&mas); + mas_empty_area(&mas, 100, 165, 3); + + mas_reset(&mas); + MT_BUG_ON(mt, mas_empty_area(&mas, 100, 163, 6) != -EBUSY); + rcu_read_unlock(); +} + static DEFINE_MTREE(tree); static int maple_tree_seed(void) { @@ -2765,6 +2850,10 @@ static int maple_tree_seed(void) check_bnode_min_spanning(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_empty_area_window(&tree); + mtree_destroy(&tree); + #if defined(BENCH) skip: #endif |