summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-09-18 06:59:44 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2024-09-18 06:59:44 +0200
commit85a77db95af4915b235b3cffb7eff9a1a2206d14 (patch)
tree8af6e1f0d48f6bafad99e291e68df1faa7f4eb52
parent78567e2bc723b444228644d2e34ae5255d4ab8a0 (diff)
parentb4722b8593b8815785bbadf87f13c88e89a0ebef (diff)
Merge tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "Nothing major: - workqueue.panic_on_stall boot param added - alloc_workqueue_lockdep_map() added (used by DRM) - Other cleanusp and doc updates" * tag 'wq-for-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: kernel/workqueue.c: fix DEFINE_PER_CPU_SHARED_ALIGNED expansion workqueue: Fix another htmldocs build warning workqueue: fix null-ptr-deref on __alloc_workqueue() error workqueue: Don't call va_start / va_end twice workqueue: Fix htmldocs build warning workqueue: Add interface for user-defined workqueue lockdep map workqueue: Change workqueue lockdep map to pointer workqueue: Split alloc_workqueue into internal function and lockdep init Documentation: kernel-parameters: add workqueue.panic_on_stall workqueue: add cmdline parameter workqueue.panic_on_stall
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--include/linux/workqueue.h41
-rw-r--r--kernel/workqueue.c107
3 files changed, 130 insertions, 25 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index e5489df3ea37..726c52f52f50 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -7386,6 +7386,13 @@
it can be updated at runtime by writing to the
corresponding sysfs file.
+ workqueue.panic_on_stall=<uint>
+ Panic when workqueue stall is detected by
+ CONFIG_WQ_WATCHDOG. It sets the number times of the
+ stall to trigger panic.
+
+ The default is 0, which disables the panic on stall.
+
workqueue.cpu_intensive_thresh_us=
Per-cpu work items which run for longer than this
threshold are automatically considered CPU intensive
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4eb8f9563136..59c2695e12e7 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu);
__printf(1, 4) struct workqueue_struct *
alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+#ifdef CONFIG_LOCKDEP
+/**
+ * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags
+ * @max_active: max in-flight work items, 0 for default
+ * @lockdep_map: user-defined lockdep_map
+ * @...: args for @fmt
+ *
+ * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for
+ * workqueues created with the same purpose and to avoid leaking a lockdep_map
+ * on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+__printf(1, 5) struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
+ struct lockdep_map *lockdep_map, ...);
+
+/**
+ * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with
+ * user-defined lockdep_map
+ *
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @lockdep_map: user-defined lockdep_map
+ * @args: args for @fmt
+ *
+ * Same as alloc_ordered_workqueue but with the a user-define lockdep_map.
+ * Useful for workqueues created with the same purpose and to avoid leaking a
+ * lockdep_map on each workqueue creation.
+ *
+ * RETURNS:
+ * Pointer to the allocated workqueue on success, %NULL on failure.
+ */
+#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
+ alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
+ 1, lockdep_map, ##args)
+#endif
+
/**
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6f2545037e57..9949ffad8df0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -364,7 +364,8 @@ struct workqueue_struct {
#ifdef CONFIG_LOCKDEP
char *lock_name;
struct lock_class_key key;
- struct lockdep_map lockdep_map;
+ struct lockdep_map __lockdep_map;
+ struct lockdep_map *lockdep_map;
#endif
char name[WQ_NAME_LEN]; /* I: workqueue name */
@@ -476,16 +477,13 @@ static bool wq_debug_force_rr_cpu = false;
module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* to raise softirq for the BH worker pools on other CPUs */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS],
- bh_pool_irq_works);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_work [NR_STD_WORKER_POOLS], bh_pool_irq_works);
/* the BH worker pools */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- bh_worker_pools);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], bh_worker_pools);
/* the per-cpu worker pools */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- cpu_worker_pools);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
static DEFINE_IDR(worker_pool_idr); /* PR: idr of all pools */
@@ -3203,7 +3201,7 @@ __acquires(&pool->lock)
lockdep_start_depth = lockdep_depth(current);
/* see drain_dead_softirq_workfn() */
if (!bh_draining)
- lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_acquire(pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
/*
* Strictly speaking we should mark the invariant state without holding
@@ -3237,7 +3235,7 @@ __acquires(&pool->lock)
pwq->stats[PWQ_STAT_COMPLETED]++;
lock_map_release(&lockdep_map);
if (!bh_draining)
- lock_map_release(&pwq->wq->lockdep_map);
+ lock_map_release(pwq->wq->lockdep_map);
if (unlikely((worker->task && in_atomic()) ||
lockdep_depth(current) != lockdep_start_depth ||
@@ -3873,11 +3871,14 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
{
#ifdef CONFIG_LOCKDEP
+ if (unlikely(!wq->lockdep_map))
+ return;
+
if (wq->flags & WQ_BH)
local_bh_disable();
- lock_map_acquire(&wq->lockdep_map);
- lock_map_release(&wq->lockdep_map);
+ lock_map_acquire(wq->lockdep_map);
+ lock_map_release(wq->lockdep_map);
if (wq->flags & WQ_BH)
local_bh_enable();
@@ -3911,7 +3912,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1,
- .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
+ .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, (*wq->lockdep_map)),
};
int next_color;
@@ -4776,16 +4777,23 @@ static void wq_init_lockdep(struct workqueue_struct *wq)
lock_name = wq->name;
wq->lock_name = lock_name;
- lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
+ wq->lockdep_map = &wq->__lockdep_map;
+ lockdep_init_map(wq->lockdep_map, lock_name, &wq->key, 0);
}
static void wq_unregister_lockdep(struct workqueue_struct *wq)
{
+ if (wq->lockdep_map != &wq->__lockdep_map)
+ return;
+
lockdep_unregister_key(&wq->key);
}
static void wq_free_lockdep(struct workqueue_struct *wq)
{
+ if (wq->lockdep_map != &wq->__lockdep_map)
+ return;
+
if (wq->lock_name != wq->name)
kfree(wq->lock_name);
}
@@ -5619,12 +5627,10 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
} while (activated);
}
-__printf(1, 4)
-struct workqueue_struct *alloc_workqueue(const char *fmt,
- unsigned int flags,
- int max_active, ...)
+static struct workqueue_struct *__alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, va_list args)
{
- va_list args;
struct workqueue_struct *wq;
size_t wq_size;
int name_len;
@@ -5656,9 +5662,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
goto err_free_wq;
}
- va_start(args, max_active);
name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
- va_end(args);
if (name_len >= WQ_NAME_LEN)
pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
@@ -5688,12 +5692,11 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
INIT_LIST_HEAD(&wq->flusher_overflow);
INIT_LIST_HEAD(&wq->maydays);
- wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list);
if (flags & WQ_UNBOUND) {
if (alloc_node_nr_active(wq->node_nr_active) < 0)
- goto err_unreg_lockdep;
+ goto err_free_wq;
}
/*
@@ -5732,9 +5735,6 @@ err_unlock_free_node_nr_active:
kthread_flush_worker(pwq_release_worker);
free_node_nr_active(wq->node_nr_active);
}
-err_unreg_lockdep:
- wq_unregister_lockdep(wq);
- wq_free_lockdep(wq);
err_free_wq:
free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
@@ -5745,8 +5745,49 @@ err_destroy:
destroy_workqueue(wq);
return NULL;
}
+
+__printf(1, 4)
+struct workqueue_struct *alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args;
+
+ va_start(args, max_active);
+ wq = __alloc_workqueue(fmt, flags, max_active, args);
+ va_end(args);
+ if (!wq)
+ return NULL;
+
+ wq_init_lockdep(wq);
+
+ return wq;
+}
EXPORT_SYMBOL_GPL(alloc_workqueue);
+#ifdef CONFIG_LOCKDEP
+__printf(1, 5)
+struct workqueue_struct *
+alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags,
+ int max_active, struct lockdep_map *lockdep_map, ...)
+{
+ struct workqueue_struct *wq;
+ va_list args;
+
+ va_start(args, lockdep_map);
+ wq = __alloc_workqueue(fmt, flags, max_active, args);
+ va_end(args);
+ if (!wq)
+ return NULL;
+
+ wq->lockdep_map = lockdep_map;
+
+ return wq;
+}
+EXPORT_SYMBOL_GPL(alloc_workqueue_lockdep_map);
+#endif
+
static bool pwq_busy(struct pool_workqueue *pwq)
{
int i;
@@ -7406,6 +7447,9 @@ static struct timer_list wq_watchdog_timer;
static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
+static unsigned int wq_panic_on_stall;
+module_param_named(panic_on_stall, wq_panic_on_stall, uint, 0644);
+
/*
* Show workers that might prevent the processing of pending work items.
* The only candidates are CPU-bound workers in the running state.
@@ -7457,6 +7501,16 @@ static void show_cpu_pools_hogs(void)
rcu_read_unlock();
}
+static void panic_on_wq_watchdog(void)
+{
+ static unsigned int wq_stall;
+
+ if (wq_panic_on_stall) {
+ wq_stall++;
+ BUG_ON(wq_stall >= wq_panic_on_stall);
+ }
+}
+
static void wq_watchdog_reset_touched(void)
{
int cpu;
@@ -7529,6 +7583,9 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
if (cpu_pool_stall)
show_cpu_pools_hogs();
+ if (lockup_detected)
+ panic_on_wq_watchdog();
+
wq_watchdog_reset_touched();
mod_timer(&wq_watchdog_timer, jiffies + thresh);
}