diff options
author | Oleg Nesterov <oleg@redhat.com> | 2023-10-30 16:57:10 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-10 17:21:25 -0800 |
commit | 61a7a5e25fe79b6c43f1c49705a0294be113c4a5 (patch) | |
tree | 247d5acb3d1621879885943b83802dfc064687a9 | |
parent | a9a1d6ad668f2ea0b5a77d0c4c7a41446d0801a8 (diff) |
introduce for_other_threads(p, t)
Cosmetic, but imho it makes the usage look more clear and simple, the new
helper doesn't require to initialize "t".
After this change while_each_thread() has only 3 users, and it is only
used in the do/while loops.
Link: https://lkml.kernel.org/r/20231030155710.GA9095@redhat.com
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Christian Brauner <brauner@kernel.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | fs/exec.c | 3 | ||||
-rw-r--r-- | include/linux/sched/signal.h | 3 | ||||
-rw-r--r-- | kernel/signal.c | 11 |
3 files changed, 8 insertions, 9 deletions
diff --git a/fs/exec.c b/fs/exec.c index 4aa19b24f281..ee43597cb453 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1578,11 +1578,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm) * will be able to manipulate the current directory, etc. * It would be nice to force an unshare instead... */ - t = p; n_fs = 1; spin_lock(&p->fs->lock); rcu_read_lock(); - while_each_thread(p, t) { + for_other_threads(p, t) { if (t->fs == p->fs) n_fs++; } diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3499c1a8b929..41d6759d6a4a 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -646,6 +646,9 @@ extern bool current_is_single_threaded(void); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) +#define for_other_threads(p, t) \ + for (t = p; (t = next_thread(t)) != p; ) + #define __for_each_thread(signal, t) \ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \ lockdep_is_held(&tasklist_lock)) diff --git a/kernel/signal.c b/kernel/signal.c index 47a7602dfe8d..5aa216e841a2 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1376,12 +1376,12 @@ int force_sig_info(struct kernel_siginfo *info) */ int zap_other_threads(struct task_struct *p) { - struct task_struct *t = p; + struct task_struct *t; int count = 0; p->signal->group_stop_count = 0; - while_each_thread(p, t) { + for_other_threads(p, t) { task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); /* Don't require de_thread to wait for the vhost_worker */ if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER) @@ -2465,12 +2465,10 @@ static bool do_signal_stop(int signr) sig->group_exit_code = signr; sig->group_stop_count = 0; - if (task_set_jobctl_pending(current, signr | gstop)) sig->group_stop_count++; - t = current; - while_each_thread(current, t) { + for_other_threads(current, t) { /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, @@ -2966,8 +2964,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) if (sigisemptyset(&retarget)) return; - t = tsk; - while_each_thread(tsk, t) { + for_other_threads(tsk, t) { if (t->flags & PF_EXITING) continue; |