summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/debugobjects.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 7cea91e193a8..5ce473ad499b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -70,10 +70,10 @@ static HLIST_HEAD(obj_to_free);
* made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
* can be off.
*/
-static int obj_pool_min_free = ODEBUG_POOL_SIZE;
-static int obj_pool_free = ODEBUG_POOL_SIZE;
+static int __data_racy obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int __data_racy obj_pool_free = ODEBUG_POOL_SIZE;
static int obj_pool_used;
-static int obj_pool_max_used;
+static int __data_racy obj_pool_max_used;
static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
@@ -84,9 +84,9 @@ static int __data_racy debug_objects_fixups __read_mostly;
static int __data_racy debug_objects_warnings __read_mostly;
static int __data_racy debug_objects_enabled __read_mostly
= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int __data_racy debug_objects_pool_size __read_mostly
+static int debug_objects_pool_size __ro_after_init
= ODEBUG_POOL_SIZE;
-static int __data_racy debug_objects_pool_min_level __read_mostly
+static int debug_objects_pool_min_level __ro_after_init
= ODEBUG_POOL_MIN_LEVEL;
static const struct debug_obj_descr *descr_test __read_mostly;
@@ -95,8 +95,8 @@ static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
*/
-static int debug_objects_allocated;
-static int debug_objects_freed;
+static int __data_racy debug_objects_allocated;
+static int __data_racy debug_objects_freed;
static void free_obj_work(struct work_struct *work);
static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
@@ -135,20 +135,19 @@ static void fill_pool(void)
return;
/*
- * Reuse objs from the global free list; they will be reinitialized
- * when allocating.
+ * Reuse objs from the global obj_to_free list; they will be
+ * reinitialized when allocating.
*
- * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
- * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
- * sections.
+ * obj_nr_tofree is checked locklessly; the READ_ONCE() pairs with
+ * the WRITE_ONCE() in pool_lock critical sections.
*/
- while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
+ if (READ_ONCE(obj_nr_tofree)) {
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
- while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
+ while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);