diff options
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r-- | kernel/trace/bpf_trace.c | 110 |
1 files changed, 86 insertions, 24 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f86c78961708..949a3870946c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -802,6 +802,8 @@ struct send_signal_irq_work { struct task_struct *task; u32 sig; enum pid_type type; + bool has_siginfo; + struct kernel_siginfo info; }; static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); @@ -809,27 +811,46 @@ static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work); static void do_bpf_send_signal(struct irq_work *entry) { struct send_signal_irq_work *work; + struct kernel_siginfo *siginfo; work = container_of(entry, struct send_signal_irq_work, irq_work); - group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); + siginfo = work->has_siginfo ? &work->info : SEND_SIG_PRIV; + + group_send_sig_info(work->sig, siginfo, work->task, work->type); put_task_struct(work->task); } -static int bpf_send_signal_common(u32 sig, enum pid_type type) +static int bpf_send_signal_common(u32 sig, enum pid_type type, struct task_struct *task, u64 value) { struct send_signal_irq_work *work = NULL; + struct kernel_siginfo info; + struct kernel_siginfo *siginfo; + + if (!task) { + task = current; + siginfo = SEND_SIG_PRIV; + } else { + clear_siginfo(&info); + info.si_signo = sig; + info.si_errno = 0; + info.si_code = SI_KERNEL; + info.si_pid = 0; + info.si_uid = 0; + info.si_value.sival_ptr = (void *)(unsigned long)value; + siginfo = &info; + } /* Similar to bpf_probe_write_user, task needs to be * in a sound condition and kernel memory access be * permitted in order to send signal to the current * task. */ - if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING))) + if (unlikely(task->flags & (PF_KTHREAD | PF_EXITING))) return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; /* Task should not be pid=1 to avoid kernel panic. */ - if (unlikely(is_global_init(current))) + if (unlikely(is_global_init(task))) return -EPERM; if (irqs_disabled()) { @@ -847,19 +868,22 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) * to the irq_work. The current task may change when queued * irq works get executed. */ - work->task = get_task_struct(current); + work->task = get_task_struct(task); + work->has_siginfo = siginfo == &info; + if (work->has_siginfo) + copy_siginfo(&work->info, &info); work->sig = sig; work->type = type; irq_work_queue(&work->irq_work); return 0; } - return group_send_sig_info(sig, SEND_SIG_PRIV, current, type); + return group_send_sig_info(sig, siginfo, task, type); } BPF_CALL_1(bpf_send_signal, u32, sig) { - return bpf_send_signal_common(sig, PIDTYPE_TGID); + return bpf_send_signal_common(sig, PIDTYPE_TGID, NULL, 0); } static const struct bpf_func_proto bpf_send_signal_proto = { @@ -871,7 +895,7 @@ static const struct bpf_func_proto bpf_send_signal_proto = { BPF_CALL_1(bpf_send_signal_thread, u32, sig) { - return bpf_send_signal_common(sig, PIDTYPE_PID); + return bpf_send_signal_common(sig, PIDTYPE_PID, NULL, 0); } static const struct bpf_func_proto bpf_send_signal_thread_proto = { @@ -1557,6 +1581,17 @@ static inline bool is_kprobe_session(const struct bpf_prog *prog) return prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION; } +static inline bool is_uprobe_multi(const struct bpf_prog *prog) +{ + return prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI || + prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; +} + +static inline bool is_uprobe_session(const struct bpf_prog *prog) +{ + return prog->expected_attach_type == BPF_TRACE_UPROBE_SESSION; +} + static const struct bpf_func_proto * kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { @@ -1574,13 +1609,13 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_func_ip: if (is_kprobe_multi(prog)) return &bpf_get_func_ip_proto_kprobe_multi; - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + if (is_uprobe_multi(prog)) return &bpf_get_func_ip_proto_uprobe_multi; return &bpf_get_func_ip_proto_kprobe; case BPF_FUNC_get_attach_cookie: if (is_kprobe_multi(prog)) return &bpf_get_attach_cookie_proto_kmulti; - if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI) + if (is_uprobe_multi(prog)) return &bpf_get_attach_cookie_proto_umulti; return &bpf_get_attach_cookie_proto_trace; default: @@ -3072,6 +3107,7 @@ struct bpf_uprobe { u64 cookie; struct uprobe *uprobe; struct uprobe_consumer consumer; + bool session; }; struct bpf_uprobe_multi_link { @@ -3084,7 +3120,7 @@ struct bpf_uprobe_multi_link { }; struct bpf_uprobe_multi_run_ctx { - struct bpf_run_ctx run_ctx; + struct bpf_session_run_ctx session_ctx; unsigned long entry_ip; struct bpf_uprobe *uprobe; }; @@ -3195,17 +3231,22 @@ static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { static int uprobe_prog_run(struct bpf_uprobe *uprobe, unsigned long entry_ip, - struct pt_regs *regs) + struct pt_regs *regs, + bool is_return, void *data) { struct bpf_uprobe_multi_link *link = uprobe->link; struct bpf_uprobe_multi_run_ctx run_ctx = { + .session_ctx = { + .is_return = is_return, + .data = data, + }, .entry_ip = entry_ip, .uprobe = uprobe, }; struct bpf_prog *prog = link->link.prog; bool sleepable = prog->sleepable; struct bpf_run_ctx *old_run_ctx; - int err = 0; + int err; if (link->task && !same_thread_group(current, link->task)) return 0; @@ -3217,7 +3258,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, migrate_disable(); - old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx); err = bpf_prog_run(link->link.prog, regs); bpf_reset_run_ctx(old_run_ctx); @@ -3244,9 +3285,13 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, __u64 *data) { struct bpf_uprobe *uprobe; + int ret; uprobe = container_of(con, struct bpf_uprobe, consumer); - return uprobe_prog_run(uprobe, instruction_pointer(regs), regs); + ret = uprobe_prog_run(uprobe, instruction_pointer(regs), regs, false, data); + if (uprobe->session) + return ret ? UPROBE_HANDLER_IGNORE : 0; + return 0; } static int @@ -3256,14 +3301,16 @@ uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, s struct bpf_uprobe *uprobe; uprobe = container_of(con, struct bpf_uprobe, consumer); - return uprobe_prog_run(uprobe, func, regs); + uprobe_prog_run(uprobe, func, regs, true, data); + return 0; } static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx) { struct bpf_uprobe_multi_run_ctx *run_ctx; - run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, + session_ctx.run_ctx); return run_ctx->entry_ip; } @@ -3271,7 +3318,8 @@ static u64 bpf_uprobe_multi_cookie(struct bpf_run_ctx *ctx) { struct bpf_uprobe_multi_run_ctx *run_ctx; - run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, run_ctx); + run_ctx = container_of(current->bpf_ctx, struct bpf_uprobe_multi_run_ctx, + session_ctx.run_ctx); return run_ctx->uprobe->cookie; } @@ -3295,7 +3343,7 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr if (sizeof(u64) != sizeof(void *)) return -EOPNOTSUPP; - if (prog->expected_attach_type != BPF_TRACE_UPROBE_MULTI) + if (!is_uprobe_multi(prog)) return -EINVAL; flags = attr->link_create.uprobe_multi.flags; @@ -3371,11 +3419,12 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr uprobes[i].link = link; - if (flags & BPF_F_UPROBE_MULTI_RETURN) - uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; - else + if (!(flags & BPF_F_UPROBE_MULTI_RETURN)) uprobes[i].consumer.handler = uprobe_multi_link_handler; - + if (flags & BPF_F_UPROBE_MULTI_RETURN || is_uprobe_session(prog)) + uprobes[i].consumer.ret_handler = uprobe_multi_link_ret_handler; + if (is_uprobe_session(prog)) + uprobes[i].session = true; if (pid) uprobes[i].consumer.filter = uprobe_multi_link_filter; } @@ -3464,7 +3513,7 @@ static int bpf_kprobe_multi_filter(const struct bpf_prog *prog, u32 kfunc_id) if (!btf_id_set8_contains(&kprobe_multi_kfunc_set_ids, kfunc_id)) return 0; - if (!is_kprobe_session(prog)) + if (!is_kprobe_session(prog) && !is_uprobe_session(prog)) return -EACCES; return 0; @@ -3482,3 +3531,16 @@ static int __init bpf_kprobe_multi_kfuncs_init(void) } late_initcall(bpf_kprobe_multi_kfuncs_init); + +__bpf_kfunc_start_defs(); + +__bpf_kfunc int bpf_send_signal_task(struct task_struct *task, int sig, enum pid_type type, + u64 value) +{ + if (type != PIDTYPE_PID && type != PIDTYPE_TGID) + return -EINVAL; + + return bpf_send_signal_common(sig, type, task, value); +} + +__bpf_kfunc_end_defs(); |