summaryrefslogtreecommitdiff
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2020-05-15 17:29:42 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-05-15 17:29:46 +0200
commited24a7a852b542911479383d5c80b9a2b4bb8caa (patch)
tree6315790b1ff6943b35aab55167ec86996184b770 /kernel/bpf/syscall.c
parent0ee52c0f6c67e187ff1906f6048af7c96df320c7 (diff)
parent81626001187609b9c49696a5b48d5abcf0e5f9be (diff)
Merge branch 'bpf-cap'
Alexei Starovoitov says: ==================== v6->v7: - permit SK_REUSEPORT program type under CAP_BPF as suggested by Marek Majkowski. It's equivalent to SOCKET_FILTER which is unpriv. v5->v6: - split allow_ptr_leaks into four flags. - retain bpf_jit_limit under cap_sys_admin. - fixed few other issues spotted by Daniel. v4->v5: Split BPF operations that are allowed under CAP_SYS_ADMIN into combination of CAP_BPF, CAP_PERFMON, CAP_NET_ADMIN and keep some of them under CAP_SYS_ADMIN. The user process has to have - CAP_BPF to create maps, do other sys_bpf() commands and load SK_REUSEPORT progs. Note: dev_map, sock_hash, sock_map map types still require CAP_NET_ADMIN. That could be relaxed in the future. - CAP_BPF and CAP_PERFMON to load tracing programs. - CAP_BPF and CAP_NET_ADMIN to load networking programs. (or CAP_SYS_ADMIN for backward compatibility). CAP_BPF solves three main goals: 1. provides isolation to user space processes that drop CAP_SYS_ADMIN and switch to CAP_BPF. More on this below. This is the major difference vs v4 set back from Sep 2019. 2. makes networking BPF progs more secure, since CAP_BPF + CAP_NET_ADMIN prevents pointer leaks and arbitrary kernel memory access. 3. enables fuzzers to exercise all of the verifier logic. Eventually finding bugs and making BPF infra more secure. Currently fuzzers run in unpriv. They will be able to run with CAP_BPF. The patchset is long overdue follow-up from the last plumbers conference. Comparing to what was discussed at LPC the CAP* checks at attach time are gone. For tracing progs the CAP_SYS_ADMIN check was done at load time only. There was no check at attach time. For networking and cgroup progs CAP_SYS_ADMIN was required at load time and CAP_NET_ADMIN at attach time, but there are several ways to bypass CAP_NET_ADMIN: - if networking prog is using tail_call writing FD into prog_array will effectively attach it, but bpf_map_update_elem is an unprivileged operation. - freplace prog with CAP_SYS_ADMIN can replace networking prog Consolidating all CAP checks at load time makes security model similar to open() syscall. Once the user got an FD it can do everything with it. read/write/poll don't check permissions. The same way when bpf_prog_load command returns an FD the user can do everything (including attaching, detaching, and bpf_test_run). The important design decision is to allow ID->FD transition for CAP_SYS_ADMIN only. What it means that user processes can run with CAP_BPF and CAP_NET_ADMIN and they will not be able to affect each other unless they pass FDs via scm_rights or via pinning in bpffs. ID->FD is a mechanism for human override and introspection. An admin can do 'sudo bpftool prog ...'. It's possible to enforce via LSM that only bpftool binary does bpf syscall with CAP_SYS_ADMIN and the rest of user space processes do bpf syscall with CAP_BPF isolating bpf objects (progs, maps, links) that are owned by such processes from each other. Another significant change from LPC is that the verifier checks are split into four flags. The allow_ptr_leaks flag allows pointer manipulations. The bpf_capable flag enables all modern verifier features like bpf-to-bpf calls, BTF, bounded loops, dead code elimination, etc. All the goodness. The bypass_spec_v1 flag enables indirect stack access from bpf programs and disables speculative analysis and bpf array mitigations. The bypass_spec_v4 flag disables store sanitation. That allows networking progs with CAP_BPF + CAP_NET_ADMIN enjoy modern verifier features while being more secure. Some networking progs may need CAP_BPF + CAP_NET_ADMIN + CAP_PERFMON, since subtracting pointers (like skb->data_end - skb->data) is a pointer leak, but the verifier may get smarter in the future. ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c89
1 files changed, 68 insertions, 21 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index de2a75500233..79bcd8d056d2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1534,7 +1534,7 @@ static int map_freeze(const union bpf_attr *attr)
err = -EBUSY;
goto err_put;
}
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!bpf_capable()) {
err = -EPERM;
goto err_put;
}
@@ -2009,6 +2009,55 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
}
}
+static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
+{
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_LWT_IN:
+ case BPF_PROG_TYPE_LWT_OUT:
+ case BPF_PROG_TYPE_LWT_XMIT:
+ case BPF_PROG_TYPE_LWT_SEG6LOCAL:
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_SK_MSG:
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_CGROUP_DEVICE:
+ case BPF_PROG_TYPE_CGROUP_SOCK:
+ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ case BPF_PROG_TYPE_CGROUP_SYSCTL:
+ case BPF_PROG_TYPE_SOCK_OPS:
+ case BPF_PROG_TYPE_EXT: /* extends any prog */
+ return true;
+ case BPF_PROG_TYPE_CGROUP_SKB:
+ /* always unpriv */
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ /* equivalent to SOCKET_FILTER. need CAP_BPF only */
+ default:
+ return false;
+ }
+}
+
+static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
+{
+ switch (prog_type) {
+ case BPF_PROG_TYPE_KPROBE:
+ case BPF_PROG_TYPE_TRACEPOINT:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
+ case BPF_PROG_TYPE_EXT: /* extends any prog */
+ return true;
+ default:
+ return false;
+ }
+}
+
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
@@ -2031,7 +2080,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
(attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
- !capable(CAP_SYS_ADMIN))
+ !bpf_capable())
return -EPERM;
/* copy eBPF program license from user space */
@@ -2044,11 +2093,16 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
is_gpl = license_is_gpl_compatible(license);
if (attr->insn_cnt == 0 ||
- attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
+ attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
return -E2BIG;
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
- !capable(CAP_SYS_ADMIN))
+ !bpf_capable())
+ return -EPERM;
+
+ if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (is_perfmon_prog_type(type) && !perfmon_capable())
return -EPERM;
bpf_prog_load_fixup_attach_type(attr);
@@ -2682,6 +2736,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
case BPF_PROG_TYPE_CGROUP_SKB:
+ if (!capable(CAP_NET_ADMIN))
+ /* cg-skb progs can be loaded by unpriv user.
+ * check permissions at attach time.
+ */
+ return -EPERM;
return prog->enforce_expected_attach_type &&
prog->expected_attach_type != attach_type ?
-EINVAL : 0;
@@ -2747,9 +2806,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
struct bpf_prog *prog;
int ret;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (CHECK_ATTR(BPF_PROG_ATTACH))
return -EINVAL;
@@ -2804,9 +2860,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (CHECK_ATTR(BPF_PROG_DETACH))
return -EINVAL;
@@ -2819,6 +2872,8 @@ static int bpf_prog_detach(const union bpf_attr *attr)
case BPF_PROG_TYPE_LIRC_MODE2:
return lirc_prog_detach(attr);
case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
return skb_flow_dissector_bpf_prog_detach(attr);
case BPF_PROG_TYPE_CGROUP_DEVICE:
case BPF_PROG_TYPE_CGROUP_SKB:
@@ -2882,8 +2937,6 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
struct bpf_prog *prog;
int ret = -ENOTSUPP;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (CHECK_ATTR(BPF_PROG_TEST_RUN))
return -EINVAL;
@@ -3184,7 +3237,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.run_time_ns = stats.nsecs;
info.run_cnt = stats.cnt;
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!bpf_capable()) {
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
@@ -3543,7 +3596,7 @@ static int bpf_btf_load(const union bpf_attr *attr)
if (CHECK_ATTR(BPF_BTF_LOAD))
return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
+ if (!bpf_capable())
return -EPERM;
return btf_new_fd(attr);
@@ -3766,9 +3819,6 @@ static int link_create(union bpf_attr *attr)
struct bpf_prog *prog;
int ret;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (CHECK_ATTR(BPF_LINK_CREATE))
return -EINVAL;
@@ -3817,9 +3867,6 @@ static int link_update(union bpf_attr *attr)
u32 flags;
int ret;
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
if (CHECK_ATTR(BPF_LINK_UPDATE))
return -EINVAL;
@@ -3988,7 +4035,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
union bpf_attr attr;
int err;
- if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
+ if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
return -EPERM;
err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);