From 9fa02892857ae2b3b699630e5ede28f72106e7e7 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 21 Feb 2023 10:05:18 -0800 Subject: selftests/bpf: Fix BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL for empty flow label Kernel's flow dissector continues to parse the packet when the (optional) IPv6 flow label is empty even when instructed to stop (via BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL). Do the same in our reference BPF reimplementation. Signed-off-by: Stanislav Fomichev Acked-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230221180518.2139026-1-sdf@google.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/flow_dissector.c | 24 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_flow.c | 2 +- 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 7acca37a3d2b..c4773173a4e4 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -345,6 +345,30 @@ struct test tests[] = { .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, .retval = BPF_OK, }, + { + .name = "ipv6-empty-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0x00, 0x00, 0x00 }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .retval = BPF_OK, + }, { .name = "ipip-encap", .pkt.ipip = { diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index a20c5ed5e454..b04e092fac94 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -337,7 +337,7 @@ PROG(IPV6)(struct __sk_buff *skb) keys->ip_proto = ip6h->nexthdr; keys->flow_label = ip6_flowlabel(ip6h); - if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) + if (keys->flow_label && keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) return export_flow_keys(keys, BPF_OK); return parse_ipv6_proto(skb, ip6h->nexthdr); -- cgit v1.2.3-70-g09d2 From d0093aaefa35b80990c05a424dad2396ba4549d7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 22 Feb 2023 15:29:58 -1000 Subject: selftests/bpf: Add a test case for bpf_cgroup_from_id() Add a test case for bpf_cgroup_from_id. Signed-off-by: Tejun Heo Link: https://lore.kernel.org/r/Y/bBlt+tPozcQgws@slm.duckdns.org Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/cgrp_kfunc.c | 1 + .../selftests/bpf/progs/cgrp_kfunc_common.h | 1 + .../selftests/bpf/progs/cgrp_kfunc_success.c | 42 ++++++++++++++++++++++ 3 files changed, 44 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c index b3f7985c8504..adda85f97058 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c @@ -84,6 +84,7 @@ static const char * const success_tests[] = { "test_cgrp_xchg_release", "test_cgrp_get_release", "test_cgrp_get_ancestors", + "test_cgrp_from_id", }; void test_cgrp_kfunc(void) diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h index 7d30855bfe78..2f8de933b957 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -24,6 +24,7 @@ struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym; void bpf_cgroup_release(struct cgroup *p) __ksym; struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) { diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c index 0c23ea32df9f..42e13aebdd62 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -168,3 +168,45 @@ int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp, const char *path) return 0; } + +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_from_id, struct cgroup *cgrp, const char *path) +{ + struct cgroup *parent, *res; + u64 parent_cgid; + + if (!is_test_kfunc_task()) + return 0; + + /* @cgrp's ID is not visible yet, let's test with the parent */ + parent = bpf_cgroup_ancestor(cgrp, cgrp->level - 1); + if (!parent) { + err = 1; + return 0; + } + + parent_cgid = parent->kn->id; + bpf_cgroup_release(parent); + + res = bpf_cgroup_from_id(parent_cgid); + if (!res) { + err = 2; + return 0; + } + + bpf_cgroup_release(res); + + if (res != parent) { + err = 3; + return 0; + } + + res = bpf_cgroup_from_id((u64)-1); + if (res) { + bpf_cgroup_release(res); + err = 4; + return 0; + } + + return 0; +} -- cgit v1.2.3-70-g09d2 From b61987d37cbee3c44e80304598c60b163553926b Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 24 Feb 2023 14:13:42 +0800 Subject: selftests/bpf: move SYS() macro into the test_progs.h A lot of tests defined SYS() macro to run system calls with goto label. Let's move this macro to test_progs.h and add configurable "goto_label" as the first arg. Suggested-by: Martin KaFai Lau Signed-off-by: Hangbin Liu Link: https://lore.kernel.org/r/20230224061343.506571-2-liuhangbin@gmail.com Signed-off-by: Martin KaFai Lau --- .../selftests/bpf/prog_tests/decap_sanity.c | 16 +--- tools/testing/selftests/bpf/prog_tests/empty_skb.c | 25 +++--- .../testing/selftests/bpf/prog_tests/fib_lookup.c | 28 +++--- .../testing/selftests/bpf/prog_tests/tc_redirect.c | 100 ++++++++++----------- .../testing/selftests/bpf/prog_tests/test_tunnel.c | 71 ++++++--------- .../testing/selftests/bpf/prog_tests/xdp_bonding.c | 40 ++++----- .../selftests/bpf/prog_tests/xdp_do_redirect.c | 30 +++---- .../selftests/bpf/prog_tests/xdp_metadata.c | 23 ++--- .../selftests/bpf/prog_tests/xdp_synproxy.c | 41 ++++----- tools/testing/selftests/bpf/prog_tests/xfrm_info.c | 67 ++++++-------- tools/testing/selftests/bpf/test_progs.h | 15 ++++ 11 files changed, 193 insertions(+), 263 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index 2853883b7cbb..5c0ebe6ba866 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -10,14 +10,6 @@ #include "network_helpers.h" #include "decap_sanity.skel.h" -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto fail; \ - }) - #define NS_TEST "decap_sanity_ns" #define IPV6_IFACE_ADDR "face::1" #define UDP_TEST_PORT 7777 @@ -37,9 +29,9 @@ void test_decap_sanity(void) if (!ASSERT_OK_PTR(skel, "skel open_and_load")) return; - SYS("ip netns add %s", NS_TEST); - SYS("ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR); - SYS("ip -net %s link set dev lo up", NS_TEST); + SYS(fail, "ip netns add %s", NS_TEST); + SYS(fail, "ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR); + SYS(fail, "ip -net %s link set dev lo up", NS_TEST); nstoken = open_netns(NS_TEST); if (!ASSERT_OK_PTR(nstoken, "open_netns")) @@ -80,6 +72,6 @@ fail: bpf_tc_hook_destroy(&qdisc_hook); close_netns(nstoken); } - system("ip netns del " NS_TEST " &> /dev/null"); + SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); decap_sanity__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/empty_skb.c b/tools/testing/selftests/bpf/prog_tests/empty_skb.c index 32dd731e9070..3b77d8a422db 100644 --- a/tools/testing/selftests/bpf/prog_tests/empty_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/empty_skb.c @@ -4,11 +4,6 @@ #include #include "empty_skb.skel.h" -#define SYS(cmd) ({ \ - if (!ASSERT_OK(system(cmd), (cmd))) \ - goto out; \ -}) - void test_empty_skb(void) { LIBBPF_OPTS(bpf_test_run_opts, tattr); @@ -93,18 +88,18 @@ void test_empty_skb(void) }, }; - SYS("ip netns add empty_skb"); + SYS(out, "ip netns add empty_skb"); tok = open_netns("empty_skb"); - SYS("ip link add veth0 type veth peer veth1"); - SYS("ip link set dev veth0 up"); - SYS("ip link set dev veth1 up"); - SYS("ip addr add 10.0.0.1/8 dev veth0"); - SYS("ip addr add 10.0.0.2/8 dev veth1"); + SYS(out, "ip link add veth0 type veth peer veth1"); + SYS(out, "ip link set dev veth0 up"); + SYS(out, "ip link set dev veth1 up"); + SYS(out, "ip addr add 10.0.0.1/8 dev veth0"); + SYS(out, "ip addr add 10.0.0.2/8 dev veth1"); veth_ifindex = if_nametoindex("veth0"); - SYS("ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2"); - SYS("ip link set ipip0 up"); - SYS("ip addr add 192.168.1.1/16 dev ipip0"); + SYS(out, "ip link add ipip0 type ipip local 10.0.0.1 remote 10.0.0.2"); + SYS(out, "ip link set ipip0 up"); + SYS(out, "ip addr add 192.168.1.1/16 dev ipip0"); ipip_ifindex = if_nametoindex("ipip0"); bpf_obj = empty_skb__open_and_load(); @@ -142,5 +137,5 @@ out: empty_skb__destroy(bpf_obj); if (tok) close_netns(tok); - system("ip netns del empty_skb"); + SYS_NOFAIL("ip netns del empty_skb"); } diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index 61ccddccf485..429393caf612 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -8,14 +8,6 @@ #include "network_helpers.h" #include "fib_lookup.skel.h" -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto fail; \ - }) - #define NS_TEST "fib_lookup_ns" #define IPV6_IFACE_ADDR "face::face" #define IPV6_NUD_FAILED_ADDR "face::1" @@ -59,16 +51,16 @@ static int setup_netns(void) { int err; - SYS("ip link add veth1 type veth peer name veth2"); - SYS("ip link set dev veth1 up"); + SYS(fail, "ip link add veth1 type veth peer name veth2"); + SYS(fail, "ip link set dev veth1 up"); - SYS("ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR); - SYS("ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR); - SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC); + SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR); + SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR); + SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC); - SYS("ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR); - SYS("ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); - SYS("ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); + SYS(fail, "ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR); + SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); + SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1"); if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)")) @@ -140,7 +132,7 @@ void test_fib_lookup(void) return; prog_fd = bpf_program__fd(skel->progs.fib_lookup); - SYS("ip netns add %s", NS_TEST); + SYS(fail, "ip netns add %s", NS_TEST); nstoken = open_netns(NS_TEST); if (!ASSERT_OK_PTR(nstoken, "open_netns")) @@ -182,6 +174,6 @@ void test_fib_lookup(void) fail: if (nstoken) close_netns(nstoken); - system("ip netns del " NS_TEST " &> /dev/null"); + SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); fib_lookup__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index bca5e6839ac4..6ee22c3b251a 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -137,24 +137,16 @@ static int get_ifaddr(const char *name, char *ifaddr) return 0; } -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto fail; \ - }) - static int netns_setup_links_and_routes(struct netns_setup_result *result) { struct nstoken *nstoken = NULL; char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {}; - SYS("ip link add veth_src type veth peer name veth_src_fwd"); - SYS("ip link add veth_dst type veth peer name veth_dst_fwd"); + SYS(fail, "ip link add veth_src type veth peer name veth_src_fwd"); + SYS(fail, "ip link add veth_dst type veth peer name veth_dst_fwd"); - SYS("ip link set veth_dst_fwd address " MAC_DST_FWD); - SYS("ip link set veth_dst address " MAC_DST); + SYS(fail, "ip link set veth_dst_fwd address " MAC_DST_FWD); + SYS(fail, "ip link set veth_dst address " MAC_DST); if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr)) goto fail; @@ -175,27 +167,27 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) if (!ASSERT_GT(result->ifindex_veth_dst_fwd, 0, "ifindex_veth_dst_fwd")) goto fail; - SYS("ip link set veth_src netns " NS_SRC); - SYS("ip link set veth_src_fwd netns " NS_FWD); - SYS("ip link set veth_dst_fwd netns " NS_FWD); - SYS("ip link set veth_dst netns " NS_DST); + SYS(fail, "ip link set veth_src netns " NS_SRC); + SYS(fail, "ip link set veth_src_fwd netns " NS_FWD); + SYS(fail, "ip link set veth_dst_fwd netns " NS_FWD); + SYS(fail, "ip link set veth_dst netns " NS_DST); /** setup in 'src' namespace */ nstoken = open_netns(NS_SRC); if (!ASSERT_OK_PTR(nstoken, "setns src")) goto fail; - SYS("ip addr add " IP4_SRC "/32 dev veth_src"); - SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad"); - SYS("ip link set dev veth_src up"); + SYS(fail, "ip addr add " IP4_SRC "/32 dev veth_src"); + SYS(fail, "ip addr add " IP6_SRC "/128 dev veth_src nodad"); + SYS(fail, "ip link set dev veth_src up"); - SYS("ip route add " IP4_DST "/32 dev veth_src scope global"); - SYS("ip route add " IP4_NET "/16 dev veth_src scope global"); - SYS("ip route add " IP6_DST "/128 dev veth_src scope global"); + SYS(fail, "ip route add " IP4_DST "/32 dev veth_src scope global"); + SYS(fail, "ip route add " IP4_NET "/16 dev veth_src scope global"); + SYS(fail, "ip route add " IP6_DST "/128 dev veth_src scope global"); - SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s", + SYS(fail, "ip neigh add " IP4_DST " dev veth_src lladdr %s", veth_src_fwd_addr); - SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s", + SYS(fail, "ip neigh add " IP6_DST " dev veth_src lladdr %s", veth_src_fwd_addr); close_netns(nstoken); @@ -209,15 +201,15 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) * needs v4 one in order to start ARP probing. IP4_NET route is added * to the endpoints so that the ARP processing will reply. */ - SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd"); - SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd"); - SYS("ip link set dev veth_src_fwd up"); - SYS("ip link set dev veth_dst_fwd up"); + SYS(fail, "ip addr add " IP4_SLL "/32 dev veth_src_fwd"); + SYS(fail, "ip addr add " IP4_DLL "/32 dev veth_dst_fwd"); + SYS(fail, "ip link set dev veth_src_fwd up"); + SYS(fail, "ip link set dev veth_dst_fwd up"); - SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global"); - SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global"); - SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global"); - SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global"); + SYS(fail, "ip route add " IP4_SRC "/32 dev veth_src_fwd scope global"); + SYS(fail, "ip route add " IP6_SRC "/128 dev veth_src_fwd scope global"); + SYS(fail, "ip route add " IP4_DST "/32 dev veth_dst_fwd scope global"); + SYS(fail, "ip route add " IP6_DST "/128 dev veth_dst_fwd scope global"); close_netns(nstoken); @@ -226,16 +218,16 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result) if (!ASSERT_OK_PTR(nstoken, "setns dst")) goto fail; - SYS("ip addr add " IP4_DST "/32 dev veth_dst"); - SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad"); - SYS("ip link set dev veth_dst up"); + SYS(fail, "ip addr add " IP4_DST "/32 dev veth_dst"); + SYS(fail, "ip addr add " IP6_DST "/128 dev veth_dst nodad"); + SYS(fail, "ip link set dev veth_dst up"); - SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global"); - SYS("ip route add " IP4_NET "/16 dev veth_dst scope global"); - SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global"); + SYS(fail, "ip route add " IP4_SRC "/32 dev veth_dst scope global"); + SYS(fail, "ip route add " IP4_NET "/16 dev veth_dst scope global"); + SYS(fail, "ip route add " IP6_SRC "/128 dev veth_dst scope global"); - SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD); - SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS(fail, "ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS(fail, "ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD); close_netns(nstoken); @@ -375,7 +367,7 @@ done: static int test_ping(int family, const char *addr) { - SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr); + SYS(fail, "ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping_command(family), addr); return 0; fail: return -1; @@ -953,7 +945,7 @@ static int tun_open(char *name) if (!ASSERT_OK(err, "ioctl TUNSETIFF")) goto fail; - SYS("ip link set dev %s up", name); + SYS(fail, "ip link set dev %s up", name); return fd; fail: @@ -1076,23 +1068,23 @@ static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) XGRESS_FILTER_ADD(&qdisc_veth_dst_fwd, BPF_TC_EGRESS, skel->progs.tc_chk, 0); /* Setup route and neigh tables */ - SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24"); - SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24"); + SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24"); + SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24"); - SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad"); - SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad"); + SYS(fail, "ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad"); + SYS(fail, "ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad"); - SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global"); - SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD + SYS(fail, "ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global"); + SYS(fail, "ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD " dev tun_src scope global"); - SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global"); - SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global"); - SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD + SYS(fail, "ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global"); + SYS(fail, "ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global"); + SYS(fail, "ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD " dev tun_src scope global"); - SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global"); + SYS(fail, "ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global"); - SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); - SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS(fail, "ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS(fail, "ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) goto fail; diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index 07ad457f3370..47f1d482fe39 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -91,30 +91,15 @@ #define PING_ARGS "-i 0.01 -c 3 -w 10 -q" -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto fail; \ - }) - -#define SYS_NOFAIL(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - system(cmd); \ - }) - static int config_device(void) { - SYS("ip netns add at_ns0"); - SYS("ip link add veth0 address " MAC_VETH1 " type veth peer name veth1"); - SYS("ip link set veth0 netns at_ns0"); - SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1"); - SYS("ip link set dev veth1 up mtu 1500"); - SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0"); - SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500"); + SYS(fail, "ip netns add at_ns0"); + SYS(fail, "ip link add veth0 address " MAC_VETH1 " type veth peer name veth1"); + SYS(fail, "ip link set veth0 netns at_ns0"); + SYS(fail, "ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1"); + SYS(fail, "ip link set dev veth1 up mtu 1500"); + SYS(fail, "ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0"); + SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up mtu 1500"); return 0; fail: @@ -132,23 +117,23 @@ static void cleanup(void) static int add_vxlan_tunnel(void) { /* at_ns0 namespace */ - SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789", + SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789", VXLAN_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up", VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24", VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s", + SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s", IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0", + SYS(fail, "ip netns exec at_ns0 ip neigh add %s lladdr %s dev veth0", IP4_ADDR2_VETH1, MAC_VETH1); /* root namespace */ - SYS("ip link add dev %s type vxlan external gbp dstport 4789", + SYS(fail, "ip link add dev %s type vxlan external gbp dstport 4789", VXLAN_TUNL_DEV1); - SYS("ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); - SYS("ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); - SYS("ip neigh add %s lladdr %s dev %s", + SYS(fail, "ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + SYS(fail, "ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS(fail, "ip neigh add %s lladdr %s dev %s", IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1); return 0; @@ -165,26 +150,26 @@ static void delete_vxlan_tunnel(void) static int add_ip6vxlan_tunnel(void) { - SYS("ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0", + SYS(fail, "ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0", IP6_ADDR_VETH0); - SYS("ip netns exec at_ns0 ip link set dev veth0 up"); - SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1); - SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1); - SYS("ip link set dev veth1 up"); + SYS(fail, "ip netns exec at_ns0 ip link set dev veth0 up"); + SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS(fail, "ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS(fail, "ip link set dev veth1 up"); /* at_ns0 namespace */ - SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789", + SYS(fail, "ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789", IP6VXLAN_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + SYS(fail, "ip netns exec at_ns0 ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); - SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + SYS(fail, "ip netns exec at_ns0 ip link set dev %s address %s up", IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); /* root namespace */ - SYS("ip link add dev %s type vxlan external dstport 4789", + SYS(fail, "ip link add dev %s type vxlan external dstport 4789", IP6VXLAN_TUNL_DEV1); - SYS("ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); - SYS("ip link set dev %s address %s up", + SYS(fail, "ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS(fail, "ip link set dev %s address %s up", IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); return 0; @@ -205,7 +190,7 @@ static void delete_ip6vxlan_tunnel(void) static int test_ping(int family, const char *addr) { - SYS("%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); + SYS(fail, "%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); return 0; fail: return -1; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index 5e3a26b15ec6..d19f79048ff6 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -141,41 +141,33 @@ static const char * const xmit_policy_names[] = { static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy, int bond_both_attach) { -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - return -1; \ - }) - - SYS("ip netns add ns_dst"); - SYS("ip link add veth1_1 type veth peer name veth2_1 netns ns_dst"); - SYS("ip link add veth1_2 type veth peer name veth2_2 netns ns_dst"); - - SYS("ip link add bond1 type bond mode %s xmit_hash_policy %s", + SYS(fail, "ip netns add ns_dst"); + SYS(fail, "ip link add veth1_1 type veth peer name veth2_1 netns ns_dst"); + SYS(fail, "ip link add veth1_2 type veth peer name veth2_2 netns ns_dst"); + + SYS(fail, "ip link add bond1 type bond mode %s xmit_hash_policy %s", mode_names[mode], xmit_policy_names[xmit_policy]); - SYS("ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none"); - SYS("ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s", + SYS(fail, "ip link set bond1 up address " BOND1_MAC_STR " addrgenmode none"); + SYS(fail, "ip -netns ns_dst link add bond2 type bond mode %s xmit_hash_policy %s", mode_names[mode], xmit_policy_names[xmit_policy]); - SYS("ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none"); + SYS(fail, "ip -netns ns_dst link set bond2 up address " BOND2_MAC_STR " addrgenmode none"); - SYS("ip link set veth1_1 master bond1"); + SYS(fail, "ip link set veth1_1 master bond1"); if (bond_both_attach == BOND_BOTH_AND_ATTACH) { - SYS("ip link set veth1_2 master bond1"); + SYS(fail, "ip link set veth1_2 master bond1"); } else { - SYS("ip link set veth1_2 up addrgenmode none"); + SYS(fail, "ip link set veth1_2 up addrgenmode none"); if (xdp_attach(skeletons, skeletons->xdp_dummy->progs.xdp_dummy_prog, "veth1_2")) return -1; } - SYS("ip -netns ns_dst link set veth2_1 master bond2"); + SYS(fail, "ip -netns ns_dst link set veth2_1 master bond2"); if (bond_both_attach == BOND_BOTH_AND_ATTACH) - SYS("ip -netns ns_dst link set veth2_2 master bond2"); + SYS(fail, "ip -netns ns_dst link set veth2_2 master bond2"); else - SYS("ip -netns ns_dst link set veth2_2 up addrgenmode none"); + SYS(fail, "ip -netns ns_dst link set veth2_2 up addrgenmode none"); /* Load a dummy program on sending side as with veth peer needs to have a * XDP program loaded as well. @@ -194,8 +186,8 @@ static int bonding_setup(struct skeletons *skeletons, int mode, int xmit_policy, } return 0; - -#undef SYS +fail: + return -1; } static void bonding_cleanup(struct skeletons *skeletons) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 2666c84dbd01..856cbc29e6a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -12,14 +12,6 @@ #include #include "test_xdp_do_redirect.skel.h" -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto out; \ - }) - struct udp_packet { struct ethhdr eth; struct ipv6hdr iph; @@ -126,19 +118,19 @@ void test_xdp_do_redirect(void) * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP * payload. */ - SYS("ip netns add testns"); + SYS(out, "ip netns add testns"); nstoken = open_netns("testns"); if (!ASSERT_OK_PTR(nstoken, "setns")) goto out; - SYS("ip link add veth_src type veth peer name veth_dst"); - SYS("ip link set dev veth_src address 00:11:22:33:44:55"); - SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb"); - SYS("ip link set dev veth_src up"); - SYS("ip link set dev veth_dst up"); - SYS("ip addr add dev veth_src fc00::1/64"); - SYS("ip addr add dev veth_dst fc00::2/64"); - SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); + SYS(out, "ip link add veth_src type veth peer name veth_dst"); + SYS(out, "ip link set dev veth_src address 00:11:22:33:44:55"); + SYS(out, "ip link set dev veth_dst address 66:77:88:99:aa:bb"); + SYS(out, "ip link set dev veth_src up"); + SYS(out, "ip link set dev veth_dst up"); + SYS(out, "ip addr add dev veth_src fc00::1/64"); + SYS(out, "ip addr add dev veth_dst fc00::2/64"); + SYS(out, "ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); /* We enable forwarding in the test namespace because that will cause * the packets that go through the kernel stack (with XDP_PASS) to be @@ -151,7 +143,7 @@ void test_xdp_do_redirect(void) * code didn't have this, so we keep the test behaviour to make sure the * bug doesn't resurface. */ - SYS("sysctl -qw net.ipv6.conf.all.forwarding=1"); + SYS(out, "sysctl -qw net.ipv6.conf.all.forwarding=1"); ifindex_src = if_nametoindex("veth_src"); ifindex_dst = if_nametoindex("veth_dst"); @@ -225,6 +217,6 @@ out_tc: out: if (nstoken) close_netns(nstoken); - system("ip netns del testns"); + SYS_NOFAIL("ip netns del testns"); test_xdp_do_redirect__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c index aa4beae99f4f..490e851dc27d 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_metadata.c @@ -34,11 +34,6 @@ #define PREFIX_LEN "8" #define FAMILY AF_INET -#define SYS(cmd) ({ \ - if (!ASSERT_OK(system(cmd), (cmd))) \ - goto out; \ -}) - struct xsk { void *umem_area; struct xsk_umem *umem; @@ -298,16 +293,16 @@ void test_xdp_metadata(void) /* Setup new networking namespace, with a veth pair. */ - SYS("ip netns add xdp_metadata"); + SYS(out, "ip netns add xdp_metadata"); tok = open_netns("xdp_metadata"); - SYS("ip link add numtxqueues 1 numrxqueues 1 " TX_NAME + SYS(out, "ip link add numtxqueues 1 numrxqueues 1 " TX_NAME " type veth peer " RX_NAME " numtxqueues 1 numrxqueues 1"); - SYS("ip link set dev " TX_NAME " address 00:00:00:00:00:01"); - SYS("ip link set dev " RX_NAME " address 00:00:00:00:00:02"); - SYS("ip link set dev " TX_NAME " up"); - SYS("ip link set dev " RX_NAME " up"); - SYS("ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME); - SYS("ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME); + SYS(out, "ip link set dev " TX_NAME " address 00:00:00:00:00:01"); + SYS(out, "ip link set dev " RX_NAME " address 00:00:00:00:00:02"); + SYS(out, "ip link set dev " TX_NAME " up"); + SYS(out, "ip link set dev " RX_NAME " up"); + SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME); + SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME); rx_ifindex = if_nametoindex(RX_NAME); tx_ifindex = if_nametoindex(TX_NAME); @@ -405,5 +400,5 @@ out: xdp_metadata__destroy(bpf_obj); if (tok) close_netns(tok); - system("ip netns del xdp_metadata"); + SYS_NOFAIL("ip netns del xdp_metadata"); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c index c72083885b6d..8b50a992d233 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c @@ -8,11 +8,6 @@ #define CMD_OUT_BUF_SIZE 1023 -#define SYS(cmd) ({ \ - if (!ASSERT_OK(system(cmd), (cmd))) \ - goto out; \ -}) - #define SYS_OUT(cmd, ...) ({ \ char buf[1024]; \ snprintf(buf, sizeof(buf), (cmd), ##__VA_ARGS__); \ @@ -69,37 +64,37 @@ static void test_synproxy(bool xdp) char buf[CMD_OUT_BUF_SIZE]; size_t size; - SYS("ip netns add synproxy"); + SYS(out, "ip netns add synproxy"); - SYS("ip link add tmp0 type veth peer name tmp1"); - SYS("ip link set tmp1 netns synproxy"); - SYS("ip link set tmp0 up"); - SYS("ip addr replace 198.18.0.1/24 dev tmp0"); + SYS(out, "ip link add tmp0 type veth peer name tmp1"); + SYS(out, "ip link set tmp1 netns synproxy"); + SYS(out, "ip link set tmp0 up"); + SYS(out, "ip addr replace 198.18.0.1/24 dev tmp0"); /* When checksum offload is enabled, the XDP program sees wrong * checksums and drops packets. */ - SYS("ethtool -K tmp0 tx off"); + SYS(out, "ethtool -K tmp0 tx off"); if (xdp) /* Workaround required for veth. */ - SYS("ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null"); + SYS(out, "ip link set tmp0 xdp object xdp_dummy.bpf.o section xdp 2> /dev/null"); ns = open_netns("synproxy"); if (!ASSERT_OK_PTR(ns, "setns")) goto out; - SYS("ip link set lo up"); - SYS("ip link set tmp1 up"); - SYS("ip addr replace 198.18.0.2/24 dev tmp1"); - SYS("sysctl -w net.ipv4.tcp_syncookies=2"); - SYS("sysctl -w net.ipv4.tcp_timestamps=1"); - SYS("sysctl -w net.netfilter.nf_conntrack_tcp_loose=0"); - SYS("iptables-legacy -t raw -I PREROUTING \ + SYS(out, "ip link set lo up"); + SYS(out, "ip link set tmp1 up"); + SYS(out, "ip addr replace 198.18.0.2/24 dev tmp1"); + SYS(out, "sysctl -w net.ipv4.tcp_syncookies=2"); + SYS(out, "sysctl -w net.ipv4.tcp_timestamps=1"); + SYS(out, "sysctl -w net.netfilter.nf_conntrack_tcp_loose=0"); + SYS(out, "iptables-legacy -t raw -I PREROUTING \ -i tmp1 -p tcp -m tcp --syn --dport 8080 -j CT --notrack"); - SYS("iptables-legacy -t filter -A INPUT \ + SYS(out, "iptables-legacy -t filter -A INPUT \ -i tmp1 -p tcp -m tcp --dport 8080 -m state --state INVALID,UNTRACKED \ -j SYNPROXY --sack-perm --timestamp --wscale 7 --mss 1460"); - SYS("iptables-legacy -t filter -A INPUT \ + SYS(out, "iptables-legacy -t filter -A INPUT \ -i tmp1 -m state --state INVALID -j DROP"); ctrl_file = SYS_OUT("./xdp_synproxy --iface tmp1 --ports 8080 \ @@ -170,8 +165,8 @@ out: if (ns) close_netns(ns); - system("ip link del tmp0"); - system("ip netns del synproxy"); + SYS_NOFAIL("ip link del tmp0"); + SYS_NOFAIL("ip netns del synproxy"); } void test_xdp_synproxy(void) diff --git a/tools/testing/selftests/bpf/prog_tests/xfrm_info.c b/tools/testing/selftests/bpf/prog_tests/xfrm_info.c index 8b03c9bb4862..d37f5394e199 100644 --- a/tools/testing/selftests/bpf/prog_tests/xfrm_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xfrm_info.c @@ -69,21 +69,6 @@ "proto esp aead 'rfc4106(gcm(aes))' " \ "0xe4d8f4b4da1df18a3510b3781496daa82488b713 128 mode tunnel " -#define SYS(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - if (!ASSERT_OK(system(cmd), cmd)) \ - goto fail; \ - }) - -#define SYS_NOFAIL(fmt, ...) \ - ({ \ - char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ - system(cmd); \ - }) - static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) { LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, .priority = 1, @@ -126,23 +111,23 @@ static void cleanup(void) static int config_underlay(void) { - SYS("ip netns add " NS0); - SYS("ip netns add " NS1); - SYS("ip netns add " NS2); + SYS(fail, "ip netns add " NS0); + SYS(fail, "ip netns add " NS1); + SYS(fail, "ip netns add " NS2); /* NS0 <-> NS1 [veth01 <-> veth10] */ - SYS("ip link add veth01 netns " NS0 " type veth peer name veth10 netns " NS1); - SYS("ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01"); - SYS("ip -net " NS0 " link set dev veth01 up"); - SYS("ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10"); - SYS("ip -net " NS1 " link set dev veth10 up"); + SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10 netns " NS1); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01"); + SYS(fail, "ip -net " NS0 " link set dev veth01 up"); + SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10"); + SYS(fail, "ip -net " NS1 " link set dev veth10 up"); /* NS0 <-> NS2 [veth02 <-> veth20] */ - SYS("ip link add veth02 netns " NS0 " type veth peer name veth20 netns " NS2); - SYS("ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02"); - SYS("ip -net " NS0 " link set dev veth02 up"); - SYS("ip -net " NS2 " addr add " IP4_ADDR_VETH20 "/24 dev veth20"); - SYS("ip -net " NS2 " link set dev veth20 up"); + SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20 netns " NS2); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02"); + SYS(fail, "ip -net " NS0 " link set dev veth02 up"); + SYS(fail, "ip -net " NS2 " addr add " IP4_ADDR_VETH20 "/24 dev veth20"); + SYS(fail, "ip -net " NS2 " link set dev veth20 up"); return 0; fail: @@ -153,20 +138,20 @@ static int setup_xfrm_tunnel_ns(const char *ns, const char *ipv4_local, const char *ipv4_remote, int if_id) { /* State: local -> remote */ - SYS("ip -net %s xfrm state add src %s dst %s spi 1 " + SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 " ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_local, ipv4_remote, if_id); /* State: local <- remote */ - SYS("ip -net %s xfrm state add src %s dst %s spi 1 " + SYS(fail, "ip -net %s xfrm state add src %s dst %s spi 1 " ESP_DUMMY_PARAMS "if_id %d", ns, ipv4_remote, ipv4_local, if_id); /* Policy: local -> remote */ - SYS("ip -net %s xfrm policy add dir out src 0.0.0.0/0 dst 0.0.0.0/0 " + SYS(fail, "ip -net %s xfrm policy add dir out src 0.0.0.0/0 dst 0.0.0.0/0 " "if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns, if_id, ipv4_local, ipv4_remote, if_id); /* Policy: local <- remote */ - SYS("ip -net %s xfrm policy add dir in src 0.0.0.0/0 dst 0.0.0.0/0 " + SYS(fail, "ip -net %s xfrm policy add dir in src 0.0.0.0/0 dst 0.0.0.0/0 " "if_id %d tmpl src %s dst %s proto esp mode tunnel if_id %d", ns, if_id, ipv4_remote, ipv4_local, if_id); @@ -274,16 +259,16 @@ static int config_overlay(void) if (!ASSERT_OK(setup_xfrmi_external_dev(NS0), "xfrmi")) goto fail; - SYS("ip -net " NS0 " addr add 192.168.1.100/24 dev ipsec0"); - SYS("ip -net " NS0 " link set dev ipsec0 up"); + SYS(fail, "ip -net " NS0 " addr add 192.168.1.100/24 dev ipsec0"); + SYS(fail, "ip -net " NS0 " link set dev ipsec0 up"); - SYS("ip -net " NS1 " link add ipsec0 type xfrm if_id %d", IF_ID_1); - SYS("ip -net " NS1 " addr add 192.168.1.200/24 dev ipsec0"); - SYS("ip -net " NS1 " link set dev ipsec0 up"); + SYS(fail, "ip -net " NS1 " link add ipsec0 type xfrm if_id %d", IF_ID_1); + SYS(fail, "ip -net " NS1 " addr add 192.168.1.200/24 dev ipsec0"); + SYS(fail, "ip -net " NS1 " link set dev ipsec0 up"); - SYS("ip -net " NS2 " link add ipsec0 type xfrm if_id %d", IF_ID_2); - SYS("ip -net " NS2 " addr add 192.168.1.200/24 dev ipsec0"); - SYS("ip -net " NS2 " link set dev ipsec0 up"); + SYS(fail, "ip -net " NS2 " link add ipsec0 type xfrm if_id %d", IF_ID_2); + SYS(fail, "ip -net " NS2 " addr add 192.168.1.200/24 dev ipsec0"); + SYS(fail, "ip -net " NS2 " link set dev ipsec0 up"); return 0; fail: @@ -294,7 +279,7 @@ static int test_xfrm_ping(struct xfrm_info *skel, u32 if_id) { skel->bss->req_if_id = if_id; - SYS("ping -i 0.01 -c 3 -w 10 -q 192.168.1.200 > /dev/null"); + SYS(fail, "ping -i 0.01 -c 3 -w 10 -q 192.168.1.200 > /dev/null"); if (!ASSERT_EQ(skel->bss->resp_if_id, if_id, "if_id")) goto fail; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index d5d51ec97ec8..9fbdc57c5b57 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -376,6 +376,21 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define SYS(goto_label, fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto goto_label; \ + }) + +#define SYS_NOFAIL(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + system(cmd); \ + }) + static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; -- cgit v1.2.3-70-g09d2 From 02d6a057c7bee44902c843949de6bbd439e33092 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 24 Feb 2023 14:13:43 +0800 Subject: selftests/bpf: run mptcp in a dedicated netns The current mptcp test is run in init netns. If the user or default system config disabled mptcp, the test will fail. Let's run the mptcp test in a dedicated netns to avoid none kernel default mptcp setting. Suggested-by: Martin KaFai Lau Signed-off-by: Hangbin Liu Acked-by: Matthieu Baerts Link: https://lore.kernel.org/r/20230224061343.506571-3-liuhangbin@gmail.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/prog_tests/mptcp.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index 59f08d6d1d53..cd0c42fff7c0 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -7,6 +7,8 @@ #include "network_helpers.h" #include "mptcp_sock.skel.h" +#define NS_TEST "mptcp_ns" + #ifndef TCP_CA_NAME_MAX #define TCP_CA_NAME_MAX 16 #endif @@ -138,12 +140,20 @@ out: static void test_base(void) { + struct nstoken *nstoken = NULL; int server_fd, cgroup_fd; cgroup_fd = test__join_cgroup("/mptcp"); if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) return; + SYS(fail, "ip netns add %s", NS_TEST); + SYS(fail, "ip -net %s link set dev lo up", NS_TEST); + + nstoken = open_netns(NS_TEST); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto fail; + /* without MPTCP */ server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (!ASSERT_GE(server_fd, 0, "start_server")) @@ -157,13 +167,18 @@ with_mptcp: /* with MPTCP */ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0); if (!ASSERT_GE(server_fd, 0, "start_mptcp_server")) - goto close_cgroup_fd; + goto fail; ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp"); close(server_fd); -close_cgroup_fd: +fail: + if (nstoken) + close_netns(nstoken); + + SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); + close(cgroup_fd); } -- cgit v1.2.3-70-g09d2 From 84c22fa83f9cb97061359f97d85f53a9ccde02c4 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Fri, 24 Feb 2023 18:36:55 +0800 Subject: selftests/bpf: Use __NR_prlimit64 instead of __NR_getrlimit in user_ringbuf test After commit 80d7da1cac62 ("asm-generic: Drop getrlimit and setrlimit syscalls from default list"), new architectures won't need to include getrlimit and setrlimit, they are superseded with prlimit64. In order to maintain compatibility for the new architectures, such as LoongArch which does not define __NR_getrlimit, it is better to use __NR_prlimit64 instead of __NR_getrlimit in user_ringbuf test to fix the following build error: TEST-OBJ [test_progs] user_ringbuf.test.o tools/testing/selftests/bpf/prog_tests/user_ringbuf.c: In function 'kick_kernel_cb': tools/testing/selftests/bpf/prog_tests/user_ringbuf.c:593:17: error: '__NR_getrlimit' undeclared (first use in this function) 593 | syscall(__NR_getrlimit); | ^~~~~~~~~~~~~~ tools/testing/selftests/bpf/prog_tests/user_ringbuf.c:593:17: note: each undeclared identifier is reported only once for each function it appears in make: *** [Makefile:573: tools/testing/selftests/bpf/user_ringbuf.test.o] Error 1 make: Leaving directory 'tools/testing/selftests/bpf' Signed-off-by: Tiezhu Yang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1677235015-21717-4-git-send-email-yangtiezhu@loongson.cn --- tools/testing/selftests/bpf/prog_tests/user_ringbuf.c | 2 +- tools/testing/selftests/bpf/progs/user_ringbuf_success.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c index 3a13e102c149..e51721df14fc 100644 --- a/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/user_ringbuf.c @@ -590,7 +590,7 @@ static void *kick_kernel_cb(void *arg) /* Kick the kernel, causing it to drain the ring buffer and then wake * up the test thread waiting on epoll. */ - syscall(__NR_getrlimit); + syscall(__NR_prlimit64); return NULL; } diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c index b39093dd5715..0ade1110613b 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c @@ -202,7 +202,7 @@ do_nothing_cb(struct bpf_dynptr *dynptr, void *context) return 0; } -SEC("fentry/" SYS_PREFIX "sys_getrlimit") +SEC("fentry/" SYS_PREFIX "sys_prlimit64") int test_user_ringbuf_epoll(void *ctx) { long num_samples; -- cgit v1.2.3-70-g09d2 From 11e456cae91e9044cb12c2b037b52c9b268925f7 Mon Sep 17 00:00:00 2001 From: Rong Tao Date: Fri, 24 Feb 2023 23:10:02 +0800 Subject: selftests/bpf: Fix compilation errors: Assign a value to a constant Commit bc292ab00f6c("mm: introduce vma->vm_flags wrapper functions") turns the vm_flags into a const variable. Added bpf_find_vma test in commit f108662b27c9("selftests/bpf: Add tests for bpf_find_vma") to assign values to variables that declare const in find_vma_fail1.c programs, which is an error to the compiler and does not test BPF verifiers. It is better to replace 'const vm_flags_t vm_flags' with 'unsigned long vm_start' for testing. $ make -C tools/testing/selftests/bpf/ -j8 ... progs/find_vma_fail1.c:16:16: error: cannot assign to non-static data member 'vm_flags' with const-qualified type 'const vm_flags_t' (aka 'const unsigned long') vma->vm_flags |= 0x55; ~~~~~~~~~~~~~ ^ ../tools/testing/selftests/bpf/tools/include/vmlinux.h:1898:20: note: non-static data member 'vm_flags' declared const here const vm_flags_t vm_flags; ~~~~~~~~~~~`~~~~~~^~~~~~~~ Signed-off-by: Rong Tao Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/tencent_CB281722B3C1BD504C16CDE586CACC2BE706@qq.com --- tools/testing/selftests/bpf/progs/find_vma_fail1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/find_vma_fail1.c b/tools/testing/selftests/bpf/progs/find_vma_fail1.c index b3b326b8e2d1..47d5dedff554 100644 --- a/tools/testing/selftests/bpf/progs/find_vma_fail1.c +++ b/tools/testing/selftests/bpf/progs/find_vma_fail1.c @@ -13,7 +13,7 @@ static long write_vma(struct task_struct *task, struct vm_area_struct *vma, struct callback_ctx *data) { /* writing to vma, which is illegal */ - vma->vm_flags |= 0x55; + vma->vm_start = 0xffffffffff600000; return 0; } -- cgit v1.2.3-70-g09d2 From cfa7b011894d689cccfa88a25da324fa5c34e4ed Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Wed, 1 Mar 2023 07:49:53 -0800 Subject: selftests/bpf: tests for using dynptrs to parse skb and xdp buffers Test skb and xdp dynptr functionality in the following ways: 1) progs/test_cls_redirect_dynptr.c * Rewrite "progs/test_cls_redirect.c" test to use dynptrs to parse skb data * This is a great example of how dynptrs can be used to simplify a lot of the parsing logic for non-statically known values. When measuring the user + system time between the original version vs. using dynptrs, and averaging the time for 10 runs (using "time ./test_progs -t cls_redirect"): original version: 0.092 sec with dynptrs: 0.078 sec 2) progs/test_xdp_dynptr.c * Rewrite "progs/test_xdp.c" test to use dynptrs to parse xdp data When measuring the user + system time between the original version vs. using dynptrs, and averaging the time for 10 runs (using "time ./test_progs -t xdp_attach"): original version: 0.118 sec with dynptrs: 0.094 sec 3) progs/test_l4lb_noinline_dynptr.c * Rewrite "progs/test_l4lb_noinline.c" test to use dynptrs to parse skb data When measuring the user + system time between the original version vs. using dynptrs, and averaging the time for 10 runs (using "time ./test_progs -t l4lb_all"): original version: 0.062 sec with dynptrs: 0.081 sec For number of processed verifier instructions: original version: 6268 insns with dynptrs: 2588 insns 4) progs/test_parse_tcp_hdr_opt_dynptr.c * Add sample code for parsing tcp hdr opt lookup using dynptrs. This logic is lifted from a real-world use case of packet parsing in katran [0], a layer 4 load balancer. The original version "progs/test_parse_tcp_hdr_opt.c" (not using dynptrs) is included here as well, for comparison. When measuring the user + system time between the original version vs. using dynptrs, and averaging the time for 10 runs (using "time ./test_progs -t parse_tcp_hdr_opt"): original version: 0.031 sec with dynptrs: 0.045 sec 5) progs/dynptr_success.c * Add test case "test_skb_readonly" for testing attempts at writes on a prog type with read-only skb ctx. * Add "test_dynptr_skb_data" for testing that bpf_dynptr_data isn't supported for skb progs. 6) progs/dynptr_fail.c * Add test cases "skb_invalid_data_slice{1,2,3,4}" and "xdp_invalid_data_slice{1,2}" for testing that helpers that modify the underlying packet buffer automatically invalidate the associated data slice. * Add test cases "skb_invalid_ctx" and "xdp_invalid_ctx" for testing that prog types that do not support bpf_dynptr_from_skb/xdp don't have access to the API. * Add test case "dynptr_slice_var_len{1,2}" for testing that variable-sized len can't be passed in to bpf_dynptr_slice * Add test case "skb_invalid_slice_write" for testing that writes to a read-only data slice are rejected by the verifier. * Add test case "data_slice_out_of_bounds_skb" for testing that writes to an area outside the slice are rejected. * Add test case "invalid_slice_rdwr_rdonly" for testing that prog types that don't allow writes to packet data don't accept any calls to bpf_dynptr_slice_rdwr. [0] https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h Signed-off-by: Joanne Koong Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230301154953.641654-11-joannelkoong@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.s390x | 2 + tools/testing/selftests/bpf/bpf_kfuncs.h | 38 + .../selftests/bpf/prog_tests/cls_redirect.c | 25 + tools/testing/selftests/bpf/prog_tests/dynptr.c | 74 +- tools/testing/selftests/bpf/prog_tests/l4lb_all.c | 2 + .../selftests/bpf/prog_tests/parse_tcp_hdr_opt.c | 93 ++ .../testing/selftests/bpf/prog_tests/xdp_attach.c | 11 +- tools/testing/selftests/bpf/progs/dynptr_fail.c | 287 +++++- tools/testing/selftests/bpf/progs/dynptr_success.c | 55 +- .../selftests/bpf/progs/test_cls_redirect_dynptr.c | 980 +++++++++++++++++++++ .../bpf/progs/test_l4lb_noinline_dynptr.c | 487 ++++++++++ .../selftests/bpf/progs/test_parse_tcp_hdr_opt.c | 119 +++ .../bpf/progs/test_parse_tcp_hdr_opt_dynptr.c | 114 +++ .../testing/selftests/bpf/progs/test_xdp_dynptr.c | 257 ++++++ tools/testing/selftests/bpf/test_tcp_hdr_options.h | 1 + 15 files changed, 2522 insertions(+), 23 deletions(-) create mode 100644 tools/testing/selftests/bpf/bpf_kfuncs.h create mode 100644 tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c create mode 100644 tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c create mode 100644 tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c create mode 100644 tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c create mode 100644 tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_dynptr.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index b89eb87034e4..a02a085e7f32 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -4,6 +4,8 @@ bloom_filter_map # failed to find kernel BTF type ID of bpf_cookie # failed to open_and_load program: -524 (trampoline) bpf_loop # attaches to __x64_sys_nanosleep cgrp_local_storage # prog_attach unexpected error: -524 (trampoline) +dynptr/test_dynptr_skb_data +dynptr/test_skb_readonly fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h new file mode 100644 index 000000000000..8c993ec8ceea --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -0,0 +1,38 @@ +#ifndef __BPF_KFUNCS__ +#define __BPF_KFUNCS__ + +/* Description + * Initializes an skb-type dynptr + * Returns + * Error code + */ +extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; + +/* Description + * Initializes an xdp-type dynptr + * Returns + * Error code + */ +extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; + +/* Description + * Obtain a read-only pointer to the dynptr's data + * Returns + * Either a direct pointer to the dynptr data or a pointer to the user-provided + * buffer if unable to obtain a direct pointer + */ +extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, + void *buffer, __u32 buffer__szk) __ksym; + +/* Description + * Obtain a read-write pointer to the dynptr's data + * Returns + * Either a direct pointer to the dynptr data or a pointer to the user-provided + * buffer if unable to obtain a direct pointer + */ +extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset, + void *buffer, __u32 buffer__szk) __ksym; + +#endif diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c index 224f016b0a53..2a55f717fc07 100644 --- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c @@ -13,6 +13,7 @@ #include "progs/test_cls_redirect.h" #include "test_cls_redirect.skel.h" +#include "test_cls_redirect_dynptr.skel.h" #include "test_cls_redirect_subprogs.skel.h" #define ENCAP_IP INADDR_LOOPBACK @@ -446,6 +447,28 @@ cleanup: close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0])); } +static void test_cls_redirect_dynptr(void) +{ + struct test_cls_redirect_dynptr *skel; + int err; + + skel = test_cls_redirect_dynptr__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP); + skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT); + + err = test_cls_redirect_dynptr__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + test_cls_redirect_common(skel->progs.cls_redirect); + +cleanup: + test_cls_redirect_dynptr__destroy(skel); +} + static void test_cls_redirect_inlined(void) { struct test_cls_redirect *skel; @@ -496,4 +519,6 @@ void test_cls_redirect(void) test_cls_redirect_inlined(); if (test__start_subtest("cls_redirect_subprogs")) test_cls_redirect_subprogs(); + if (test__start_subtest("cls_redirect_dynptr")) + test_cls_redirect_dynptr(); } diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index b99264ec0d9c..d176c34a7d2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -2,20 +2,32 @@ /* Copyright (c) 2022 Facebook */ #include +#include #include "dynptr_fail.skel.h" #include "dynptr_success.skel.h" -static const char * const success_tests[] = { - "test_read_write", - "test_data_slice", - "test_ringbuf", +enum test_setup_type { + SETUP_SYSCALL_SLEEP, + SETUP_SKB_PROG, }; -static void verify_success(const char *prog_name) +static struct { + const char *prog_name; + enum test_setup_type type; +} success_tests[] = { + {"test_read_write", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_data", SETUP_SYSCALL_SLEEP}, + {"test_ringbuf", SETUP_SYSCALL_SLEEP}, + {"test_skb_readonly", SETUP_SKB_PROG}, + {"test_dynptr_skb_data", SETUP_SKB_PROG}, +}; + +static void verify_success(const char *prog_name, enum test_setup_type setup_type) { struct dynptr_success *skel; struct bpf_program *prog; struct bpf_link *link; + int err; skel = dynptr_success__open(); if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) @@ -23,23 +35,53 @@ static void verify_success(const char *prog_name) skel->bss->pid = getpid(); - dynptr_success__load(skel); - if (!ASSERT_OK_PTR(skel, "dynptr_success__load")) - goto cleanup; - prog = bpf_object__find_program_by_name(skel->obj, prog_name); if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) goto cleanup; - link = bpf_program__attach(prog); - if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + bpf_program__set_autoload(prog, true); + + err = dynptr_success__load(skel); + if (!ASSERT_OK(err, "dynptr_success__load")) goto cleanup; - usleep(1); + switch (setup_type) { + case SETUP_SYSCALL_SLEEP: + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; - ASSERT_EQ(skel->bss->err, 0, "err"); + usleep(1); + + bpf_link__destroy(link); + break; + case SETUP_SKB_PROG: + { + int prog_fd; + char buf[64]; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); - bpf_link__destroy(link); + prog_fd = bpf_program__fd(prog); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto cleanup; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + if (!ASSERT_OK(err, "test_run")) + goto cleanup; + + break; + } + } + + ASSERT_EQ(skel->bss->err, 0, "err"); cleanup: dynptr_success__destroy(skel); @@ -50,10 +92,10 @@ void test_dynptr(void) int i; for (i = 0; i < ARRAY_SIZE(success_tests); i++) { - if (!test__start_subtest(success_tests[i])) + if (!test__start_subtest(success_tests[i].prog_name)) continue; - verify_success(success_tests[i]); + verify_success(success_tests[i].prog_name, success_tests[i].type); } RUN_TESTS(dynptr_fail); diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 9c1a18573ffd..1eab286b14fe 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -93,4 +93,6 @@ void test_l4lb_all(void) test_l4lb("test_l4lb.bpf.o"); if (test__start_subtest("l4lb_noinline")) test_l4lb("test_l4lb_noinline.bpf.o"); + if (test__start_subtest("l4lb_noinline_dynptr")) + test_l4lb("test_l4lb_noinline_dynptr.bpf.o"); } diff --git a/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c new file mode 100644 index 000000000000..daa952711d8f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "test_parse_tcp_hdr_opt.skel.h" +#include "test_parse_tcp_hdr_opt_dynptr.skel.h" +#include "test_tcp_hdr_options.h" + +struct test_pkt { + struct ipv6_packet pk6_v6; + u8 options[16]; +} __packed; + +struct test_pkt pkt = { + .pk6_v6.eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .pk6_v6.iph.nexthdr = IPPROTO_TCP, + .pk6_v6.iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .pk6_v6.tcp.urg_ptr = 123, + .pk6_v6.tcp.doff = 9, /* 16 bytes of options */ + + .options = { + TCPOPT_MSS, 4, 0x05, 0xB4, TCPOPT_NOP, TCPOPT_NOP, + 0, 6, 0xBB, 0xBB, 0xBB, 0xBB, TCPOPT_EOL + }, +}; + +static void test_parse_opt(void) +{ + struct test_parse_tcp_hdr_opt *skel; + struct bpf_program *prog; + char buf[128]; + int err; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt, + .data_size_in = sizeof(pkt), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 3, + ); + + skel = test_parse_tcp_hdr_opt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr; + prog = skel->progs.xdp_ingress_v6; + + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval"); + ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id"); + + test_parse_tcp_hdr_opt__destroy(skel); +} + +static void test_parse_opt_dynptr(void) +{ + struct test_parse_tcp_hdr_opt_dynptr *skel; + struct bpf_program *prog; + char buf[128]; + int err; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt, + .data_size_in = sizeof(pkt), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 3, + ); + + skel = test_parse_tcp_hdr_opt_dynptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + pkt.options[6] = skel->rodata->tcp_hdr_opt_kind_tpr; + prog = skel->progs.xdp_ingress_v6; + + err = bpf_prog_test_run_opts(bpf_program__fd(prog), &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "ipv6 test_run retval"); + ASSERT_EQ(skel->bss->server_id, 0xBBBBBBBB, "server id"); + + test_parse_tcp_hdr_opt_dynptr__destroy(skel); +} + +void test_parse_tcp_hdr_opt(void) +{ + if (test__start_subtest("parse_tcp_hdr_opt")) + test_parse_opt(); + if (test__start_subtest("parse_tcp_hdr_opt_dynptr")) + test_parse_opt_dynptr(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index d4cd9f873c14..fa3cac5488f5 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -4,11 +4,10 @@ #define IFINDEX_LO 1 #define XDP_FLAGS_REPLACE (1U << 4) -void serial_test_xdp_attach(void) +static void test_xdp_attach(const char *file) { __u32 duration = 0, id1, id2, id0 = 0, len; struct bpf_object *obj1, *obj2, *obj3; - const char *file = "./test_xdp.bpf.o"; struct bpf_prog_info info = {}; int err, fd1, fd2, fd3; LIBBPF_OPTS(bpf_xdp_attach_opts, opts); @@ -85,3 +84,11 @@ out_2: out_1: bpf_object__close(obj1); } + +void serial_test_xdp_attach(void) +{ + if (test__start_subtest("xdp_attach")) + test_xdp_attach("./test_xdp.bpf.o"); + if (test__start_subtest("xdp_attach_dynptr")) + test_xdp_attach("./test_xdp_dynptr.bpf.o"); +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index aa5b69354b91..20ce920d891d 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -5,7 +5,9 @@ #include #include #include +#include #include "bpf_misc.h" +#include "bpf_kfuncs.h" char _license[] SEC("license") = "GPL"; @@ -244,6 +246,27 @@ done: return 0; } +/* A data slice can't be accessed out of bounds */ +SEC("?tc") +__failure __msg("value is outside of the allowed memory range") +int data_slice_out_of_bounds_skb(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + /* this should fail */ + *(__u8*)(hdr + 1) = 1; + + return SK_PASS; +} + SEC("?raw_tp") __failure __msg("value is outside of the allowed memory range") int data_slice_out_of_bounds_map_value(void *ctx) @@ -399,7 +422,6 @@ int invalid_helper2(void *ctx) /* this should fail */ bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0); - return 0; } @@ -1044,6 +1066,193 @@ int dynptr_read_into_slot(void *ctx) return 0; } +/* bpf_dynptr_slice()s are read-only and cannot be written to */ +SEC("?tc") +__failure __msg("R0 cannot write into rdonly_mem") +int skb_invalid_slice_write(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever a helper changes packet data */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice1(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + val = hdr->h_proto; + + return SK_PASS; +} + +/* The read-write data slice is invalidated whenever a helper changes packet data */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice2(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 123; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever bpf_dynptr_write() is called */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice3(struct __sk_buff *skb) +{ + char write_data[64] = "hello there, world!!"; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + + /* this should fail */ + val = hdr->h_proto; + + return SK_PASS; +} + +/* The read-write data slice is invalidated whenever bpf_dynptr_write() is called */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int skb_invalid_data_slice4(struct __sk_buff *skb) +{ + char write_data[64] = "hello there, world!!"; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 123; + + bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + + /* this should fail */ + hdr->h_proto = 1; + + return SK_PASS; +} + +/* The read-only data slice is invalidated whenever a helper changes packet data */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int xdp_invalid_data_slice1(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + hdr = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + val = hdr->h_proto; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + val = hdr->h_proto; + + return XDP_PASS; +} + +/* The read-write data slice is invalidated whenever a helper changes packet data */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int xdp_invalid_data_slice2(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!hdr) + return SK_DROP; + + hdr->h_proto = 9; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + hdr->h_proto = 1; + + return XDP_PASS; +} + +/* Only supported prog type can create skb-type dynptrs */ +SEC("?raw_tp") +__failure __msg("calling kernel function bpf_dynptr_from_skb is not allowed") +int skb_invalid_ctx(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_skb(ctx, 0, &ptr); + + return 0; +} + /* Reject writes to dynptr slot for uninit arg */ SEC("?raw_tp") __failure __msg("potential write to dynptr at off=-16") @@ -1061,6 +1270,61 @@ int uninit_write_into_slot(void *ctx) return 0; } +/* Only supported prog type can create xdp-type dynptrs */ +SEC("?raw_tp") +__failure __msg("calling kernel function bpf_dynptr_from_xdp is not allowed") +int xdp_invalid_ctx(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_from_xdp(ctx, 0, &ptr); + + return 0; +} + +__u32 hdr_size = sizeof(struct ethhdr); +/* Can't pass in variable-sized len to bpf_dynptr_slice */ +SEC("?tc") +__failure __msg("unbounded memory access") +int dynptr_slice_var_len1(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + struct ethhdr *hdr; + char buffer[sizeof(*hdr)] = {}; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + /* this should fail */ + hdr = bpf_dynptr_slice(&ptr, 0, buffer, hdr_size); + if (!hdr) + return SK_DROP; + + return SK_PASS; +} + +/* Can't pass in variable-sized len to bpf_dynptr_slice */ +SEC("?tc") +__failure __msg("must be a known constant") +int dynptr_slice_var_len2(struct __sk_buff *skb) +{ + char buffer[sizeof(struct ethhdr)] = {}; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + if (hdr_size <= sizeof(buffer)) { + /* this should fail */ + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, hdr_size); + if (!hdr) + return SK_DROP; + hdr->h_proto = 12; + } + + return SK_PASS; +} + static int callback(__u32 index, void *data) { *(__u32 *)data = 123; @@ -1092,3 +1356,24 @@ int invalid_data_slices(void *ctx) return 0; } + +/* Program types that don't allow writes to packet data should fail if + * bpf_dynptr_slice_rdwr is called + */ +SEC("cgroup_skb/ingress") +__failure __msg("the prog does not allow writes to packet data") +int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) +{ + char buffer[sizeof(struct ethhdr)] = {}; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + /* this should fail since cgroup_skb doesn't allow + * changing packet data + */ + hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index 35db7c6c1fc7..c8358a7c7924 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -5,6 +5,7 @@ #include #include #include "bpf_misc.h" +#include "bpf_kfuncs.h" #include "errno.h" char _license[] SEC("license") = "GPL"; @@ -30,7 +31,7 @@ struct { __type(value, __u32); } array_map SEC(".maps"); -SEC("tp/syscalls/sys_enter_nanosleep") +SEC("?tp/syscalls/sys_enter_nanosleep") int test_read_write(void *ctx) { char write_data[64] = "hello there, world!!"; @@ -61,8 +62,8 @@ int test_read_write(void *ctx) return 0; } -SEC("tp/syscalls/sys_enter_nanosleep") -int test_data_slice(void *ctx) +SEC("?tp/syscalls/sys_enter_nanosleep") +int test_dynptr_data(void *ctx) { __u32 key = 0, val = 235, *map_val; struct bpf_dynptr ptr; @@ -131,7 +132,7 @@ static int ringbuf_callback(__u32 index, void *data) return 0; } -SEC("tp/syscalls/sys_enter_nanosleep") +SEC("?tp/syscalls/sys_enter_nanosleep") int test_ringbuf(void *ctx) { struct bpf_dynptr ptr; @@ -163,3 +164,49 @@ done: bpf_ringbuf_discard_dynptr(&ptr, 0); return 0; } + +SEC("?cgroup_skb/egress") +int test_skb_readonly(struct __sk_buff *skb) +{ + __u8 write_data[2] = {1, 2}; + struct bpf_dynptr ptr; + __u64 *data; + int ret; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* since cgroup skbs are read only, writes should fail */ + ret = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0); + if (ret != -EINVAL) { + err = 2; + return 1; + } + + return 1; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_data(struct __sk_buff *skb) +{ + __u8 write_data[2] = {1, 2}; + struct bpf_dynptr ptr; + __u64 *data; + int ret; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This should return NULL. Must use bpf_dynptr_slice API */ + data = bpf_dynptr_data(&ptr, 0, 1); + if (data) { + err = 2; + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c new file mode 100644 index 000000000000..f45a7095de7a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -0,0 +1,980 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2019, 2020 Cloudflare + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "test_cls_redirect.h" +#include "bpf_kfuncs.h" + +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) + +#define IP_OFFSET_MASK (0x1FFF) +#define IP_MF (0x2000) + +char _license[] SEC("license") = "Dual BSD/GPL"; + +/** + * Destination port and IP used for UDP encapsulation. + */ +volatile const __be16 ENCAPSULATION_PORT; +volatile const __be32 ENCAPSULATION_IP; + +typedef struct { + uint64_t processed_packets_total; + uint64_t l3_protocol_packets_total_ipv4; + uint64_t l3_protocol_packets_total_ipv6; + uint64_t l4_protocol_packets_total_tcp; + uint64_t l4_protocol_packets_total_udp; + uint64_t accepted_packets_total_syn; + uint64_t accepted_packets_total_syn_cookies; + uint64_t accepted_packets_total_last_hop; + uint64_t accepted_packets_total_icmp_echo_request; + uint64_t accepted_packets_total_established; + uint64_t forwarded_packets_total_gue; + uint64_t forwarded_packets_total_gre; + + uint64_t errors_total_unknown_l3_proto; + uint64_t errors_total_unknown_l4_proto; + uint64_t errors_total_malformed_ip; + uint64_t errors_total_fragmented_ip; + uint64_t errors_total_malformed_icmp; + uint64_t errors_total_unwanted_icmp; + uint64_t errors_total_malformed_icmp_pkt_too_big; + uint64_t errors_total_malformed_tcp; + uint64_t errors_total_malformed_udp; + uint64_t errors_total_icmp_echo_replies; + uint64_t errors_total_malformed_encapsulation; + uint64_t errors_total_encap_adjust_failed; + uint64_t errors_total_encap_buffer_too_small; + uint64_t errors_total_redirect_loop; + uint64_t errors_total_encap_mtu_violate; +} metrics_t; + +typedef enum { + INVALID = 0, + UNKNOWN, + ECHO_REQUEST, + SYN, + SYN_COOKIE, + ESTABLISHED, +} verdict_t; + +typedef struct { + uint16_t src, dst; +} flow_ports_t; + +_Static_assert( + sizeof(flow_ports_t) != + offsetofend(struct bpf_sock_tuple, ipv4.dport) - + offsetof(struct bpf_sock_tuple, ipv4.sport) - 1, + "flow_ports_t must match sport and dport in struct bpf_sock_tuple"); +_Static_assert( + sizeof(flow_ports_t) != + offsetofend(struct bpf_sock_tuple, ipv6.dport) - + offsetof(struct bpf_sock_tuple, ipv6.sport) - 1, + "flow_ports_t must match sport and dport in struct bpf_sock_tuple"); + +struct iphdr_info { + void *hdr; + __u64 len; +}; + +typedef int ret_t; + +/* This is a bit of a hack. We need a return value which allows us to + * indicate that the regular flow of the program should continue, + * while allowing functions to use XDP_PASS and XDP_DROP, etc. + */ +static const ret_t CONTINUE_PROCESSING = -1; + +/* Convenience macro to call functions which return ret_t. + */ +#define MAYBE_RETURN(x) \ + do { \ + ret_t __ret = x; \ + if (__ret != CONTINUE_PROCESSING) \ + return __ret; \ + } while (0) + +static bool ipv4_is_fragment(const struct iphdr *ip) +{ + uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK); + return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0; +} + +static int pkt_parse_ipv4(struct bpf_dynptr *dynptr, __u64 *offset, struct iphdr *iphdr) +{ + if (bpf_dynptr_read(iphdr, sizeof(*iphdr), dynptr, *offset, 0)) + return -1; + + *offset += sizeof(*iphdr); + + if (iphdr->ihl < 5) + return -1; + + /* skip ipv4 options */ + *offset += (iphdr->ihl - 5) * 4; + + return 0; +} + +/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */ +static bool pkt_parse_icmp_l4_ports(struct bpf_dynptr *dynptr, __u64 *offset, flow_ports_t *ports) +{ + if (bpf_dynptr_read(ports, sizeof(*ports), dynptr, *offset, 0)) + return false; + + *offset += sizeof(*ports); + + /* Ports in the L4 headers are reversed, since we are parsing an ICMP + * payload which is going towards the eyeball. + */ + uint16_t dst = ports->src; + ports->src = ports->dst; + ports->dst = dst; + return true; +} + +static uint16_t pkt_checksum_fold(uint32_t csum) +{ + /* The highest reasonable value for an IPv4 header + * checksum requires two folds, so we just do that always. + */ + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + return (uint16_t)~csum; +} + +static void pkt_ipv4_checksum(struct iphdr *iph) +{ + iph->check = 0; + + /* An IP header without options is 20 bytes. Two of those + * are the checksum, which we always set to zero. Hence, + * the maximum accumulated value is 18 / 2 * 0xffff = 0x8fff7, + * which fits in 32 bit. + */ + _Static_assert(sizeof(struct iphdr) == 20, "iphdr must be 20 bytes"); + uint32_t acc = 0; + uint16_t *ipw = (uint16_t *)iph; + + for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) + acc += ipw[i]; + + iph->check = pkt_checksum_fold(acc); +} + +static bool pkt_skip_ipv6_extension_headers(struct bpf_dynptr *dynptr, __u64 *offset, + const struct ipv6hdr *ipv6, uint8_t *upper_proto, + bool *is_fragment) +{ + /* We understand five extension headers. + * https://tools.ietf.org/html/rfc8200#section-4.1 states that all + * headers should occur once, except Destination Options, which may + * occur twice. Hence we give up after 6 headers. + */ + struct { + uint8_t next; + uint8_t len; + } exthdr = { + .next = ipv6->nexthdr, + }; + *is_fragment = false; + + for (int i = 0; i < 6; i++) { + switch (exthdr.next) { + case IPPROTO_FRAGMENT: + *is_fragment = true; + /* NB: We don't check that hdrlen == 0 as per spec. */ + /* fallthrough; */ + + case IPPROTO_HOPOPTS: + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: + case IPPROTO_MH: + if (bpf_dynptr_read(&exthdr, sizeof(exthdr), dynptr, *offset, 0)) + return false; + + /* hdrlen is in 8-octet units, and excludes the first 8 octets. */ + *offset += (exthdr.len + 1) * 8; + + /* Decode next header */ + break; + + default: + /* The next header is not one of the known extension + * headers, treat it as the upper layer header. + * + * This handles IPPROTO_NONE. + * + * Encapsulating Security Payload (50) and Authentication + * Header (51) also end up here (and will trigger an + * unknown proto error later). They have a custom header + * format and seem too esoteric to care about. + */ + *upper_proto = exthdr.next; + return true; + } + } + + /* We never found an upper layer header. */ + return false; +} + +static int pkt_parse_ipv6(struct bpf_dynptr *dynptr, __u64 *offset, struct ipv6hdr *ipv6, + uint8_t *proto, bool *is_fragment) +{ + if (bpf_dynptr_read(ipv6, sizeof(*ipv6), dynptr, *offset, 0)) + return -1; + + *offset += sizeof(*ipv6); + + if (!pkt_skip_ipv6_extension_headers(dynptr, offset, ipv6, proto, is_fragment)) + return -1; + + return 0; +} + +/* Global metrics, per CPU + */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, unsigned int); + __type(value, metrics_t); +} metrics_map SEC(".maps"); + +static metrics_t *get_global_metrics(void) +{ + uint64_t key = 0; + return bpf_map_lookup_elem(&metrics_map, &key); +} + +static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap) +{ + const int payload_off = + sizeof(*encap) + + sizeof(struct in_addr) * encap->unigue.hop_count; + int32_t encap_overhead = payload_off - sizeof(struct ethhdr); + + /* Changing the ethertype if the encapsulated packet is ipv6 */ + if (encap->gue.proto_ctype == IPPROTO_IPV6) + encap->eth.h_proto = bpf_htons(ETH_P_IPV6); + + if (bpf_skb_adjust_room(skb, -encap_overhead, BPF_ADJ_ROOM_MAC, + BPF_F_ADJ_ROOM_FIXED_GSO | + BPF_F_ADJ_ROOM_NO_CSUM_RESET) || + bpf_csum_level(skb, BPF_CSUM_LEVEL_DEC)) + return TC_ACT_SHOT; + + return bpf_redirect(skb->ifindex, BPF_F_INGRESS); +} + +static ret_t forward_with_gre(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + encap_headers_t *encap, struct in_addr *next_hop, + metrics_t *metrics) +{ + const int payload_off = + sizeof(*encap) + + sizeof(struct in_addr) * encap->unigue.hop_count; + int32_t encap_overhead = + payload_off - sizeof(struct ethhdr) - sizeof(struct iphdr); + int32_t delta = sizeof(struct gre_base_hdr) - encap_overhead; + __u8 encap_buffer[sizeof(encap_gre_t)] = {}; + uint16_t proto = ETH_P_IP; + uint32_t mtu_len = 0; + encap_gre_t *encap_gre; + + metrics->forwarded_packets_total_gre++; + + /* Loop protection: the inner packet's TTL is decremented as a safeguard + * against any forwarding loop. As the only interesting field is the TTL + * hop limit for IPv6, it is easier to use bpf_skb_load_bytes/bpf_skb_store_bytes + * as they handle the split packets if needed (no need for the data to be + * in the linear section). + */ + if (encap->gue.proto_ctype == IPPROTO_IPV6) { + proto = ETH_P_IPV6; + uint8_t ttl; + int rc; + + rc = bpf_skb_load_bytes( + skb, payload_off + offsetof(struct ipv6hdr, hop_limit), + &ttl, 1); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (ttl == 0) { + metrics->errors_total_redirect_loop++; + return TC_ACT_SHOT; + } + + ttl--; + rc = bpf_skb_store_bytes( + skb, payload_off + offsetof(struct ipv6hdr, hop_limit), + &ttl, 1, 0); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + } else { + uint8_t ttl; + int rc; + + rc = bpf_skb_load_bytes( + skb, payload_off + offsetof(struct iphdr, ttl), &ttl, + 1); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (ttl == 0) { + metrics->errors_total_redirect_loop++; + return TC_ACT_SHOT; + } + + /* IPv4 also has a checksum to patch. While the TTL is only one byte, + * this function only works for 2 and 4 bytes arguments (the result is + * the same). + */ + rc = bpf_l3_csum_replace( + skb, payload_off + offsetof(struct iphdr, check), ttl, + ttl - 1, 2); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + ttl--; + rc = bpf_skb_store_bytes( + skb, payload_off + offsetof(struct iphdr, ttl), &ttl, 1, + 0); + if (rc != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + } + + if (bpf_check_mtu(skb, skb->ifindex, &mtu_len, delta, 0)) { + metrics->errors_total_encap_mtu_violate++; + return TC_ACT_SHOT; + } + + if (bpf_skb_adjust_room(skb, delta, BPF_ADJ_ROOM_NET, + BPF_F_ADJ_ROOM_FIXED_GSO | + BPF_F_ADJ_ROOM_NO_CSUM_RESET) || + bpf_csum_level(skb, BPF_CSUM_LEVEL_INC)) { + metrics->errors_total_encap_adjust_failed++; + return TC_ACT_SHOT; + } + + if (bpf_skb_pull_data(skb, sizeof(encap_gre_t))) { + metrics->errors_total_encap_buffer_too_small++; + return TC_ACT_SHOT; + } + + encap_gre = bpf_dynptr_slice_rdwr(dynptr, 0, encap_buffer, sizeof(encap_buffer)); + if (!encap_gre) { + metrics->errors_total_encap_buffer_too_small++; + return TC_ACT_SHOT; + } + + encap_gre->ip.protocol = IPPROTO_GRE; + encap_gre->ip.daddr = next_hop->s_addr; + encap_gre->ip.saddr = ENCAPSULATION_IP; + encap_gre->ip.tot_len = + bpf_htons(bpf_ntohs(encap_gre->ip.tot_len) + delta); + encap_gre->gre.flags = 0; + encap_gre->gre.protocol = bpf_htons(proto); + pkt_ipv4_checksum((void *)&encap_gre->ip); + + if (encap_gre == encap_buffer) + bpf_dynptr_write(dynptr, 0, encap_buffer, sizeof(encap_buffer), 0); + + return bpf_redirect(skb->ifindex, 0); +} + +static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + encap_headers_t *encap, struct in_addr *next_hop, + metrics_t *metrics) +{ + /* swap L2 addresses */ + /* This assumes that packets are received from a router. + * So just swapping the MAC addresses here will make the packet go back to + * the router, which will send it to the appropriate machine. + */ + unsigned char temp[ETH_ALEN]; + memcpy(temp, encap->eth.h_dest, sizeof(temp)); + memcpy(encap->eth.h_dest, encap->eth.h_source, + sizeof(encap->eth.h_dest)); + memcpy(encap->eth.h_source, temp, sizeof(encap->eth.h_source)); + + if (encap->unigue.next_hop == encap->unigue.hop_count - 1 && + encap->unigue.last_hop_gre) { + return forward_with_gre(skb, dynptr, encap, next_hop, metrics); + } + + metrics->forwarded_packets_total_gue++; + uint32_t old_saddr = encap->ip.saddr; + encap->ip.saddr = encap->ip.daddr; + encap->ip.daddr = next_hop->s_addr; + if (encap->unigue.next_hop < encap->unigue.hop_count) { + encap->unigue.next_hop++; + } + + /* Remove ip->saddr, add next_hop->s_addr */ + const uint64_t off = offsetof(typeof(*encap), ip.check); + int ret = bpf_l3_csum_replace(skb, off, old_saddr, next_hop->s_addr, 4); + if (ret < 0) { + return TC_ACT_SHOT; + } + + return bpf_redirect(skb->ifindex, 0); +} + +static ret_t skip_next_hops(__u64 *offset, int n) +{ + __u32 res; + switch (n) { + case 1: + *offset += sizeof(struct in_addr); + case 0: + return CONTINUE_PROCESSING; + + default: + return TC_ACT_SHOT; + } +} + +/* Get the next hop from the GLB header. + * + * Sets next_hop->s_addr to 0 if there are no more hops left. + * pkt is positioned just after the variable length GLB header + * iff the call is successful. + */ +static ret_t get_next_hop(struct bpf_dynptr *dynptr, __u64 *offset, encap_headers_t *encap, + struct in_addr *next_hop) +{ + if (encap->unigue.next_hop > encap->unigue.hop_count) + return TC_ACT_SHOT; + + /* Skip "used" next hops. */ + MAYBE_RETURN(skip_next_hops(offset, encap->unigue.next_hop)); + + if (encap->unigue.next_hop == encap->unigue.hop_count) { + /* No more next hops, we are at the end of the GLB header. */ + next_hop->s_addr = 0; + return CONTINUE_PROCESSING; + } + + if (bpf_dynptr_read(next_hop, sizeof(*next_hop), dynptr, *offset, 0)) + return TC_ACT_SHOT; + + *offset += sizeof(*next_hop); + + /* Skip the remainig next hops (may be zero). */ + return skip_next_hops(offset, encap->unigue.hop_count - encap->unigue.next_hop - 1); +} + +/* Fill a bpf_sock_tuple to be used with the socket lookup functions. + * This is a kludge that let's us work around verifier limitations: + * + * fill_tuple(&t, foo, sizeof(struct iphdr), 123, 321) + * + * clang will substitue a costant for sizeof, which allows the verifier + * to track it's value. Based on this, it can figure out the constant + * return value, and calling code works while still being "generic" to + * IPv4 and IPv6. + */ +static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph, + uint64_t iphlen, uint16_t sport, uint16_t dport) +{ + switch (iphlen) { + case sizeof(struct iphdr): { + struct iphdr *ipv4 = (struct iphdr *)iph; + tuple->ipv4.daddr = ipv4->daddr; + tuple->ipv4.saddr = ipv4->saddr; + tuple->ipv4.sport = sport; + tuple->ipv4.dport = dport; + return sizeof(tuple->ipv4); + } + + case sizeof(struct ipv6hdr): { + struct ipv6hdr *ipv6 = (struct ipv6hdr *)iph; + memcpy(&tuple->ipv6.daddr, &ipv6->daddr, + sizeof(tuple->ipv6.daddr)); + memcpy(&tuple->ipv6.saddr, &ipv6->saddr, + sizeof(tuple->ipv6.saddr)); + tuple->ipv6.sport = sport; + tuple->ipv6.dport = dport; + return sizeof(tuple->ipv6); + } + + default: + return 0; + } +} + +static verdict_t classify_tcp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, + uint64_t tuplen, void *iph, struct tcphdr *tcp) +{ + struct bpf_sock *sk = + bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0); + + if (sk == NULL) + return UNKNOWN; + + if (sk->state != BPF_TCP_LISTEN) { + bpf_sk_release(sk); + return ESTABLISHED; + } + + if (iph != NULL && tcp != NULL) { + /* Kludge: we've run out of arguments, but need the length of the ip header. */ + uint64_t iphlen = sizeof(struct iphdr); + + if (tuplen == sizeof(tuple->ipv6)) + iphlen = sizeof(struct ipv6hdr); + + if (bpf_tcp_check_syncookie(sk, iph, iphlen, tcp, + sizeof(*tcp)) == 0) { + bpf_sk_release(sk); + return SYN_COOKIE; + } + } + + bpf_sk_release(sk); + return UNKNOWN; +} + +static verdict_t classify_udp(struct __sk_buff *skb, struct bpf_sock_tuple *tuple, uint64_t tuplen) +{ + struct bpf_sock *sk = + bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0); + + if (sk == NULL) + return UNKNOWN; + + if (sk->state == BPF_TCP_ESTABLISHED) { + bpf_sk_release(sk); + return ESTABLISHED; + } + + bpf_sk_release(sk); + return UNKNOWN; +} + +static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto, struct bpf_sock_tuple *tuple, + uint64_t tuplen, metrics_t *metrics) +{ + switch (proto) { + case IPPROTO_TCP: + return classify_tcp(skb, tuple, tuplen, NULL, NULL); + + case IPPROTO_UDP: + return classify_udp(skb, tuple, tuplen); + + default: + metrics->errors_total_malformed_icmp++; + return INVALID; + } +} + +static verdict_t process_icmpv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, __u64 *offset, + metrics_t *metrics) +{ + struct icmphdr icmp; + struct iphdr ipv4; + + if (bpf_dynptr_read(&icmp, sizeof(icmp), dynptr, *offset, 0)) { + metrics->errors_total_malformed_icmp++; + return INVALID; + } + + *offset += sizeof(icmp); + + /* We should never receive encapsulated echo replies. */ + if (icmp.type == ICMP_ECHOREPLY) { + metrics->errors_total_icmp_echo_replies++; + return INVALID; + } + + if (icmp.type == ICMP_ECHO) + return ECHO_REQUEST; + + if (icmp.type != ICMP_DEST_UNREACH || icmp.code != ICMP_FRAG_NEEDED) { + metrics->errors_total_unwanted_icmp++; + return INVALID; + } + + if (pkt_parse_ipv4(dynptr, offset, &ipv4)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + /* The source address in the outer IP header is from the entity that + * originated the ICMP message. Use the original IP header to restore + * the correct flow tuple. + */ + struct bpf_sock_tuple tuple; + tuple.ipv4.saddr = ipv4.daddr; + tuple.ipv4.daddr = ipv4.saddr; + + if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv4.sport)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + return classify_icmp(skb, ipv4.protocol, &tuple, + sizeof(tuple.ipv4), metrics); +} + +static verdict_t process_icmpv6(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct ipv6hdr ipv6; + struct icmp6hdr icmp6; + bool is_fragment; + uint8_t l4_proto; + + if (bpf_dynptr_read(&icmp6, sizeof(icmp6), dynptr, *offset, 0)) { + metrics->errors_total_malformed_icmp++; + return INVALID; + } + + /* We should never receive encapsulated echo replies. */ + if (icmp6.icmp6_type == ICMPV6_ECHO_REPLY) { + metrics->errors_total_icmp_echo_replies++; + return INVALID; + } + + if (icmp6.icmp6_type == ICMPV6_ECHO_REQUEST) { + return ECHO_REQUEST; + } + + if (icmp6.icmp6_type != ICMPV6_PKT_TOOBIG) { + metrics->errors_total_unwanted_icmp++; + return INVALID; + } + + if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + if (is_fragment) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + /* Swap source and dest addresses. */ + memcpy(&tuple.ipv6.saddr, &ipv6.daddr, sizeof(tuple.ipv6.saddr)); + memcpy(&tuple.ipv6.daddr, &ipv6.saddr, sizeof(tuple.ipv6.daddr)); + + if (!pkt_parse_icmp_l4_ports(dynptr, offset, (flow_ports_t *)&tuple.ipv6.sport)) { + metrics->errors_total_malformed_icmp_pkt_too_big++; + return INVALID; + } + + return classify_icmp(skb, l4_proto, &tuple, sizeof(tuple.ipv6), + metrics); +} + +static verdict_t process_tcp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + struct iphdr_info *info, metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct tcphdr tcp; + uint64_t tuplen; + + metrics->l4_protocol_packets_total_tcp++; + + if (bpf_dynptr_read(&tcp, sizeof(tcp), dynptr, *offset, 0)) { + metrics->errors_total_malformed_tcp++; + return INVALID; + } + + *offset += sizeof(tcp); + + if (tcp.syn) + return SYN; + + tuplen = fill_tuple(&tuple, info->hdr, info->len, tcp.source, tcp.dest); + return classify_tcp(skb, &tuple, tuplen, info->hdr, &tcp); +} + +static verdict_t process_udp(struct bpf_dynptr *dynptr, __u64 *offset, struct __sk_buff *skb, + struct iphdr_info *info, metrics_t *metrics) +{ + struct bpf_sock_tuple tuple; + struct udphdr udph; + uint64_t tuplen; + + metrics->l4_protocol_packets_total_udp++; + + if (bpf_dynptr_read(&udph, sizeof(udph), dynptr, *offset, 0)) { + metrics->errors_total_malformed_udp++; + return INVALID; + } + *offset += sizeof(udph); + + tuplen = fill_tuple(&tuple, info->hdr, info->len, udph.source, udph.dest); + return classify_udp(skb, &tuple, tuplen); +} + +static verdict_t process_ipv4(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + __u64 *offset, metrics_t *metrics) +{ + struct iphdr ipv4; + struct iphdr_info info = { + .hdr = &ipv4, + .len = sizeof(ipv4), + }; + + metrics->l3_protocol_packets_total_ipv4++; + + if (pkt_parse_ipv4(dynptr, offset, &ipv4)) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv4.version != 4) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv4_is_fragment(&ipv4)) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + switch (ipv4.protocol) { + case IPPROTO_ICMP: + return process_icmpv4(skb, dynptr, offset, metrics); + + case IPPROTO_TCP: + return process_tcp(dynptr, offset, skb, &info, metrics); + + case IPPROTO_UDP: + return process_udp(dynptr, offset, skb, &info, metrics); + + default: + metrics->errors_total_unknown_l4_proto++; + return INVALID; + } +} + +static verdict_t process_ipv6(struct __sk_buff *skb, struct bpf_dynptr *dynptr, + __u64 *offset, metrics_t *metrics) +{ + struct ipv6hdr ipv6; + struct iphdr_info info = { + .hdr = &ipv6, + .len = sizeof(ipv6), + }; + uint8_t l4_proto; + bool is_fragment; + + metrics->l3_protocol_packets_total_ipv6++; + + if (pkt_parse_ipv6(dynptr, offset, &ipv6, &l4_proto, &is_fragment)) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (ipv6.version != 6) { + metrics->errors_total_malformed_ip++; + return INVALID; + } + + if (is_fragment) { + metrics->errors_total_fragmented_ip++; + return INVALID; + } + + switch (l4_proto) { + case IPPROTO_ICMPV6: + return process_icmpv6(dynptr, offset, skb, metrics); + + case IPPROTO_TCP: + return process_tcp(dynptr, offset, skb, &info, metrics); + + case IPPROTO_UDP: + return process_udp(dynptr, offset, skb, &info, metrics); + + default: + metrics->errors_total_unknown_l4_proto++; + return INVALID; + } +} + +SEC("tc") +int cls_redirect(struct __sk_buff *skb) +{ + __u8 encap_buffer[sizeof(encap_headers_t)] = {}; + struct bpf_dynptr dynptr; + struct in_addr next_hop; + /* Tracks offset of the dynptr. This will be unnecessary once + * bpf_dynptr_advance() is available. + */ + __u64 off = 0; + ret_t ret; + + bpf_dynptr_from_skb(skb, 0, &dynptr); + + metrics_t *metrics = get_global_metrics(); + if (metrics == NULL) + return TC_ACT_SHOT; + + metrics->processed_packets_total++; + + /* Pass bogus packets as long as we're not sure they're + * destined for us. + */ + if (skb->protocol != bpf_htons(ETH_P_IP)) + return TC_ACT_OK; + + encap_headers_t *encap; + + /* Make sure that all encapsulation headers are available in + * the linear portion of the skb. This makes it easy to manipulate them. + */ + if (bpf_skb_pull_data(skb, sizeof(*encap))) + return TC_ACT_OK; + + encap = bpf_dynptr_slice_rdwr(&dynptr, 0, encap_buffer, sizeof(encap_buffer)); + if (!encap) + return TC_ACT_OK; + + off += sizeof(*encap); + + if (encap->ip.ihl != 5) + /* We never have any options. */ + return TC_ACT_OK; + + if (encap->ip.daddr != ENCAPSULATION_IP || + encap->ip.protocol != IPPROTO_UDP) + return TC_ACT_OK; + + /* TODO Check UDP length? */ + if (encap->udp.dest != ENCAPSULATION_PORT) + return TC_ACT_OK; + + /* We now know that the packet is destined to us, we can + * drop bogus ones. + */ + if (ipv4_is_fragment((void *)&encap->ip)) { + metrics->errors_total_fragmented_ip++; + return TC_ACT_SHOT; + } + + if (encap->gue.variant != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.control != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.flags != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->gue.hlen != + sizeof(encap->unigue) / 4 + encap->unigue.hop_count) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->unigue.version != 0) { + metrics->errors_total_malformed_encapsulation++; + return TC_ACT_SHOT; + } + + if (encap->unigue.reserved != 0) + return TC_ACT_SHOT; + + MAYBE_RETURN(get_next_hop(&dynptr, &off, encap, &next_hop)); + + if (next_hop.s_addr == 0) { + metrics->accepted_packets_total_last_hop++; + return accept_locally(skb, encap); + } + + verdict_t verdict; + switch (encap->gue.proto_ctype) { + case IPPROTO_IPIP: + verdict = process_ipv4(skb, &dynptr, &off, metrics); + break; + + case IPPROTO_IPV6: + verdict = process_ipv6(skb, &dynptr, &off, metrics); + break; + + default: + metrics->errors_total_unknown_l3_proto++; + return TC_ACT_SHOT; + } + + switch (verdict) { + case INVALID: + /* metrics have already been bumped */ + return TC_ACT_SHOT; + + case UNKNOWN: + return forward_to_next_hop(skb, &dynptr, encap, &next_hop, metrics); + + case ECHO_REQUEST: + metrics->accepted_packets_total_icmp_echo_request++; + break; + + case SYN: + if (encap->unigue.forward_syn) { + return forward_to_next_hop(skb, &dynptr, encap, &next_hop, + metrics); + } + + metrics->accepted_packets_total_syn++; + break; + + case SYN_COOKIE: + metrics->accepted_packets_total_syn_cookies++; + break; + + case ESTABLISHED: + metrics->accepted_packets_total_established++; + break; + } + + ret = accept_locally(skb, encap); + + if (encap == encap_buffer) + bpf_dynptr_write(&dynptr, 0, encap_buffer, sizeof(encap_buffer), 0); + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c new file mode 100644 index 000000000000..f997f5080748 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_iptunnel_common.h" +#include + +#include "bpf_kfuncs.h" + +static __always_inline __u32 rol32(__u32 word, unsigned int shift) +{ + return (word << shift) | (word >> ((-shift) & 31)); +} + +/* copy paste of jhash from kernel sources to make sure llvm + * can compile it into valid sequence of bpf instructions + */ +#define __jhash_mix(a, b, c) \ +{ \ + a -= c; a ^= rol32(c, 4); c += b; \ + b -= a; b ^= rol32(a, 6); a += c; \ + c -= b; c ^= rol32(b, 8); b += a; \ + a -= c; a ^= rol32(c, 16); c += b; \ + b -= a; b ^= rol32(a, 19); a += c; \ + c -= b; c ^= rol32(b, 4); b += a; \ +} + +#define __jhash_final(a, b, c) \ +{ \ + c ^= b; c -= rol32(b, 14); \ + a ^= c; a -= rol32(c, 11); \ + b ^= a; b -= rol32(a, 25); \ + c ^= b; c -= rol32(b, 16); \ + a ^= c; a -= rol32(c, 4); \ + b ^= a; b -= rol32(a, 14); \ + c ^= b; c -= rol32(b, 24); \ +} + +#define JHASH_INITVAL 0xdeadbeef + +typedef unsigned int u32; + +static __noinline u32 jhash(const void *key, u32 length, u32 initval) +{ + u32 a, b, c; + const unsigned char *k = key; + + a = b = c = JHASH_INITVAL + length + initval; + + while (length > 12) { + a += *(u32 *)(k); + b += *(u32 *)(k + 4); + c += *(u32 *)(k + 8); + __jhash_mix(a, b, c); + length -= 12; + k += 12; + } + switch (length) { + case 12: c += (u32)k[11]<<24; + case 11: c += (u32)k[10]<<16; + case 10: c += (u32)k[9]<<8; + case 9: c += k[8]; + case 8: b += (u32)k[7]<<24; + case 7: b += (u32)k[6]<<16; + case 6: b += (u32)k[5]<<8; + case 5: b += k[4]; + case 4: a += (u32)k[3]<<24; + case 3: a += (u32)k[2]<<16; + case 2: a += (u32)k[1]<<8; + case 1: a += k[0]; + __jhash_final(a, b, c); + case 0: /* Nothing left to add */ + break; + } + + return c; +} + +static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) +{ + a += initval; + b += initval; + c += initval; + __jhash_final(a, b, c); + return c; +} + +static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval) +{ + return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); +} + +#define PCKT_FRAGMENTED 65343 +#define IPV4_HDR_LEN_NO_OPT 20 +#define IPV4_PLUS_ICMP_HDR 28 +#define IPV6_PLUS_ICMP_HDR 48 +#define RING_SIZE 2 +#define MAX_VIPS 12 +#define MAX_REALS 5 +#define CTL_MAP_SIZE 16 +#define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE) +#define F_IPV6 (1 << 0) +#define F_HASH_NO_SRC_PORT (1 << 0) +#define F_ICMP (1 << 0) +#define F_SYN_SET (1 << 1) + +struct packet_description { + union { + __be32 src; + __be32 srcv6[4]; + }; + union { + __be32 dst; + __be32 dstv6[4]; + }; + union { + __u32 ports; + __u16 port16[2]; + }; + __u8 proto; + __u8 flags; +}; + +struct ctl_value { + union { + __u64 value; + __u32 ifindex; + __u8 mac[6]; + }; +}; + +struct vip_meta { + __u32 flags; + __u32 vip_num; +}; + +struct real_definition { + union { + __be32 dst; + __be32 dstv6[4]; + }; + __u8 flags; +}; + +struct vip_stats { + __u64 bytes; + __u64 pkts; +}; + +struct eth_hdr { + unsigned char eth_dest[ETH_ALEN]; + unsigned char eth_source[ETH_ALEN]; + unsigned short eth_proto; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_VIPS); + __type(key, struct vip); + __type(value, struct vip_meta); +} vip_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, CH_RINGS_SIZE); + __type(key, __u32); + __type(value, __u32); +} ch_rings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_REALS); + __type(key, __u32); + __type(value, struct real_definition); +} reals SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, MAX_VIPS); + __type(key, __u32); + __type(value, struct vip_stats); +} stats SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, CTL_MAP_SIZE); + __type(key, __u32); + __type(value, struct ctl_value); +} ctl_array SEC(".maps"); + +static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6) +{ + if (ipv6) + return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS), + pckt->ports, CH_RINGS_SIZE); + else + return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE); +} + +static __noinline bool get_packet_dst(struct real_definition **real, + struct packet_description *pckt, + struct vip_meta *vip_info, + bool is_ipv6) +{ + __u32 hash = get_packet_hash(pckt, is_ipv6); + __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE; + __u32 *real_pos; + + if (hash != 0x358459b7 /* jhash of ipv4 packet */ && + hash != 0x2f4bc6bb /* jhash of ipv6 packet */) + return false; + + real_pos = bpf_map_lookup_elem(&ch_rings, &key); + if (!real_pos) + return false; + key = *real_pos; + *real = bpf_map_lookup_elem(&reals, &key); + if (!(*real)) + return false; + return true; +} + +static __noinline int parse_icmpv6(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct ipv6hdr)] = {}; + struct icmp6hdr *icmp_hdr; + struct ipv6hdr *ip6h; + + icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!icmp_hdr) + return TC_ACT_SHOT; + + if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG) + return TC_ACT_OK; + off += sizeof(struct icmp6hdr); + ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!ip6h) + return TC_ACT_SHOT; + pckt->proto = ip6h->nexthdr; + pckt->flags |= F_ICMP; + memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16); + memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16); + return TC_ACT_UNSPEC; +} + +static __noinline int parse_icmp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer_icmp[sizeof(struct iphdr)] = {}; + __u8 buffer_ip[sizeof(struct iphdr)] = {}; + struct icmphdr *icmp_hdr; + struct iphdr *iph; + + icmp_hdr = bpf_dynptr_slice(skb_ptr, off, buffer_icmp, sizeof(buffer_icmp)); + if (!icmp_hdr) + return TC_ACT_SHOT; + if (icmp_hdr->type != ICMP_DEST_UNREACH || + icmp_hdr->code != ICMP_FRAG_NEEDED) + return TC_ACT_OK; + off += sizeof(struct icmphdr); + iph = bpf_dynptr_slice(skb_ptr, off, buffer_ip, sizeof(buffer_ip)); + if (!iph || iph->ihl != 5) + return TC_ACT_SHOT; + pckt->proto = iph->protocol; + pckt->flags |= F_ICMP; + pckt->src = iph->daddr; + pckt->dst = iph->saddr; + return TC_ACT_UNSPEC; +} + +static __noinline bool parse_udp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct udphdr)] = {}; + struct udphdr *udp; + + udp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!udp) + return false; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = udp->source; + pckt->port16[1] = udp->dest; + } else { + pckt->port16[0] = udp->dest; + pckt->port16[1] = udp->source; + } + return true; +} + +static __noinline bool parse_tcp(struct bpf_dynptr *skb_ptr, __u64 off, + struct packet_description *pckt) +{ + __u8 buffer[sizeof(struct tcphdr)] = {}; + struct tcphdr *tcp; + + tcp = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!tcp) + return false; + + if (tcp->syn) + pckt->flags |= F_SYN_SET; + + if (!(pckt->flags & F_ICMP)) { + pckt->port16[0] = tcp->source; + pckt->port16[1] = tcp->dest; + } else { + pckt->port16[0] = tcp->dest; + pckt->port16[1] = tcp->source; + } + return true; +} + +static __noinline int process_packet(struct bpf_dynptr *skb_ptr, + struct eth_hdr *eth, __u64 off, + bool is_ipv6, struct __sk_buff *skb) +{ + struct packet_description pckt = {}; + struct bpf_tunnel_key tkey = {}; + struct vip_stats *data_stats; + struct real_definition *dst; + struct vip_meta *vip_info; + struct ctl_value *cval; + __u32 v4_intf_pos = 1; + __u32 v6_intf_pos = 2; + struct ipv6hdr *ip6h; + struct vip vip = {}; + struct iphdr *iph; + int tun_flag = 0; + __u16 pkt_bytes; + __u64 iph_len; + __u32 ifindex; + __u8 protocol; + __u32 vip_num; + int action; + + tkey.tunnel_ttl = 64; + if (is_ipv6) { + __u8 buffer[sizeof(struct ipv6hdr)] = {}; + + ip6h = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!ip6h) + return TC_ACT_SHOT; + + iph_len = sizeof(struct ipv6hdr); + protocol = ip6h->nexthdr; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(ip6h->payload_len); + off += iph_len; + if (protocol == IPPROTO_FRAGMENT) { + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_ICMPV6) { + action = parse_icmpv6(skb_ptr, off, &pckt); + if (action >= 0) + return action; + off += IPV6_PLUS_ICMP_HDR; + } else { + memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16); + memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16); + } + } else { + __u8 buffer[sizeof(struct iphdr)] = {}; + + iph = bpf_dynptr_slice(skb_ptr, off, buffer, sizeof(buffer)); + if (!iph || iph->ihl != 5) + return TC_ACT_SHOT; + + protocol = iph->protocol; + pckt.proto = protocol; + pkt_bytes = bpf_ntohs(iph->tot_len); + off += IPV4_HDR_LEN_NO_OPT; + + if (iph->frag_off & PCKT_FRAGMENTED) + return TC_ACT_SHOT; + if (protocol == IPPROTO_ICMP) { + action = parse_icmp(skb_ptr, off, &pckt); + if (action >= 0) + return action; + off += IPV4_PLUS_ICMP_HDR; + } else { + pckt.src = iph->saddr; + pckt.dst = iph->daddr; + } + } + protocol = pckt.proto; + + if (protocol == IPPROTO_TCP) { + if (!parse_tcp(skb_ptr, off, &pckt)) + return TC_ACT_SHOT; + } else if (protocol == IPPROTO_UDP) { + if (!parse_udp(skb_ptr, off, &pckt)) + return TC_ACT_SHOT; + } else { + return TC_ACT_SHOT; + } + + if (is_ipv6) + memcpy(vip.daddr.v6, pckt.dstv6, 16); + else + vip.daddr.v4 = pckt.dst; + + vip.dport = pckt.port16[1]; + vip.protocol = pckt.proto; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) { + vip.dport = 0; + vip_info = bpf_map_lookup_elem(&vip_map, &vip); + if (!vip_info) + return TC_ACT_SHOT; + pckt.port16[1] = 0; + } + + if (vip_info->flags & F_HASH_NO_SRC_PORT) + pckt.port16[0] = 0; + + if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6)) + return TC_ACT_SHOT; + + if (dst->flags & F_IPV6) { + cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + memcpy(tkey.remote_ipv6, dst->dstv6, 16); + tun_flag = BPF_F_TUNINFO_IPV6; + } else { + cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos); + if (!cval) + return TC_ACT_SHOT; + ifindex = cval->ifindex; + tkey.remote_ipv4 = dst->dst; + } + vip_num = vip_info->vip_num; + data_stats = bpf_map_lookup_elem(&stats, &vip_num); + if (!data_stats) + return TC_ACT_SHOT; + data_stats->pkts++; + data_stats->bytes += pkt_bytes; + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag); + *(u32 *)eth->eth_dest = tkey.remote_ipv4; + return bpf_redirect(ifindex, 0); +} + +SEC("tc") +int balancer_ingress(struct __sk_buff *ctx) +{ + __u8 buffer[sizeof(struct eth_hdr)] = {}; + struct bpf_dynptr ptr; + struct eth_hdr *eth; + __u32 eth_proto; + __u32 nh_off; + int err; + + nh_off = sizeof(struct eth_hdr); + + bpf_dynptr_from_skb(ctx, 0, &ptr); + eth = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + if (!eth) + return TC_ACT_SHOT; + eth_proto = eth->eth_proto; + if (eth_proto == bpf_htons(ETH_P_IP)) + err = process_packet(&ptr, eth, nh_off, false, ctx); + else if (eth_proto == bpf_htons(ETH_P_IPV6)) + err = process_packet(&ptr, eth, nh_off, true, ctx); + else + return TC_ACT_SHOT; + + if (eth == buffer) + bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); + + return err; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c new file mode 100644 index 000000000000..79bab9b50e9e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* This parsing logic is taken from the open source library katran, a layer 4 + * load balancer. + * + * This code logic using dynptrs can be found in test_parse_tcp_hdr_opt_dynptr.c + * + * https://github.com/facebookincubator/katran/blob/main/katran/lib/bpf/pckt_parsing.h + */ + +#include +#include +#include +#include +#include +#include +#include "test_tcp_hdr_options.h" + +char _license[] SEC("license") = "GPL"; + +/* Kind number used for experiments */ +const __u32 tcp_hdr_opt_kind_tpr = 0xFD; +/* Length of the tcp header option */ +const __u32 tcp_hdr_opt_len_tpr = 6; +/* maximum number of header options to check to lookup server_id */ +const __u32 tcp_hdr_opt_max_opt_checks = 15; + +__u32 server_id; + +struct hdr_opt_state { + __u32 server_id; + __u8 byte_offset; + __u8 hdr_bytes_remaining; +}; + +static int parse_hdr_opt(const struct xdp_md *xdp, struct hdr_opt_state *state) +{ + const void *data = (void *)(long)xdp->data; + const void *data_end = (void *)(long)xdp->data_end; + __u8 *tcp_opt, kind, hdr_len; + + tcp_opt = (__u8 *)(data + state->byte_offset); + if (tcp_opt + 1 > data_end) + return -1; + + kind = tcp_opt[0]; + + if (kind == TCPOPT_EOL) + return -1; + + if (kind == TCPOPT_NOP) { + state->hdr_bytes_remaining--; + state->byte_offset++; + return 0; + } + + if (state->hdr_bytes_remaining < 2 || + tcp_opt + sizeof(__u8) + sizeof(__u8) > data_end) + return -1; + + hdr_len = tcp_opt[1]; + if (hdr_len > state->hdr_bytes_remaining) + return -1; + + if (kind == tcp_hdr_opt_kind_tpr) { + if (hdr_len != tcp_hdr_opt_len_tpr) + return -1; + + if (tcp_opt + tcp_hdr_opt_len_tpr > data_end) + return -1; + + state->server_id = *(__u32 *)&tcp_opt[2]; + return 1; + } + + state->hdr_bytes_remaining -= hdr_len; + state->byte_offset += hdr_len; + return 0; +} + +SEC("xdp") +int xdp_ingress_v6(struct xdp_md *xdp) +{ + const void *data = (void *)(long)xdp->data; + const void *data_end = (void *)(long)xdp->data_end; + struct hdr_opt_state opt_state = {}; + __u8 tcp_hdr_opt_len = 0; + struct tcphdr *tcp_hdr; + __u64 tcp_offset = 0; + __u32 off; + int err; + + tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); + tcp_hdr = (struct tcphdr *)(data + tcp_offset); + if (tcp_hdr + 1 > data_end) + return XDP_DROP; + + tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr); + if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr) + return XDP_DROP; + + opt_state.hdr_bytes_remaining = tcp_hdr_opt_len; + opt_state.byte_offset = sizeof(struct tcphdr) + tcp_offset; + + /* max number of bytes of options in tcp header is 40 bytes */ + for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) { + err = parse_hdr_opt(xdp, &opt_state); + + if (err || !opt_state.hdr_bytes_remaining) + break; + } + + if (!opt_state.server_id) + return XDP_DROP; + + server_id = opt_state.server_id; + + return XDP_PASS; +} diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c new file mode 100644 index 000000000000..d3b319722e30 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* This logic is lifted from a real-world use case of packet parsing, used in + * the open source library katran, a layer 4 load balancer. + * + * This test demonstrates how to parse packet contents using dynptrs. The + * original code (parsing without dynptrs) can be found in test_parse_tcp_hdr_opt.c + */ + +#include +#include +#include +#include +#include +#include +#include "test_tcp_hdr_options.h" +#include "bpf_kfuncs.h" + +char _license[] SEC("license") = "GPL"; + +/* Kind number used for experiments */ +const __u32 tcp_hdr_opt_kind_tpr = 0xFD; +/* Length of the tcp header option */ +const __u32 tcp_hdr_opt_len_tpr = 6; +/* maximum number of header options to check to lookup server_id */ +const __u32 tcp_hdr_opt_max_opt_checks = 15; + +__u32 server_id; + +static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining, + __u32 *server_id) +{ + __u8 *tcp_opt, kind, hdr_len; + __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)]; + __u8 *data; + + __builtin_memset(buffer, 0, sizeof(buffer)); + + data = bpf_dynptr_slice(ptr, *off, buffer, sizeof(buffer)); + if (!data) + return -1; + + kind = data[0]; + + if (kind == TCPOPT_EOL) + return -1; + + if (kind == TCPOPT_NOP) { + *off += 1; + *hdr_bytes_remaining -= 1; + return 0; + } + + if (*hdr_bytes_remaining < 2) + return -1; + + hdr_len = data[1]; + if (hdr_len > *hdr_bytes_remaining) + return -1; + + if (kind == tcp_hdr_opt_kind_tpr) { + if (hdr_len != tcp_hdr_opt_len_tpr) + return -1; + + __builtin_memcpy(server_id, (__u32 *)(data + 2), sizeof(*server_id)); + return 1; + } + + *off += hdr_len; + *hdr_bytes_remaining -= hdr_len; + return 0; +} + +SEC("xdp") +int xdp_ingress_v6(struct xdp_md *xdp) +{ + __u8 buffer[sizeof(struct tcphdr)] = {}; + __u8 hdr_bytes_remaining; + struct tcphdr *tcp_hdr; + __u8 tcp_hdr_opt_len; + int err = 0; + __u32 off; + + struct bpf_dynptr ptr; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + + off = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); + + tcp_hdr = bpf_dynptr_slice(&ptr, off, buffer, sizeof(buffer)); + if (!tcp_hdr) + return XDP_DROP; + + tcp_hdr_opt_len = (tcp_hdr->doff * 4) - sizeof(struct tcphdr); + if (tcp_hdr_opt_len < tcp_hdr_opt_len_tpr) + return XDP_DROP; + + hdr_bytes_remaining = tcp_hdr_opt_len; + + off += sizeof(struct tcphdr); + + /* max number of bytes of options in tcp header is 40 bytes */ + for (int i = 0; i < tcp_hdr_opt_max_opt_checks; i++) { + err = parse_hdr_opt(&ptr, &off, &hdr_bytes_remaining, &server_id); + + if (err || !hdr_bytes_remaining) + break; + } + + if (!server_id) + return XDP_DROP; + + return XDP_PASS; +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c new file mode 100644 index 000000000000..7521a805b506 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "test_iptunnel_common.h" +#include "bpf_kfuncs.h" + +const size_t tcphdr_sz = sizeof(struct tcphdr); +const size_t udphdr_sz = sizeof(struct udphdr); +const size_t ethhdr_sz = sizeof(struct ethhdr); +const size_t iphdr_sz = sizeof(struct iphdr); +const size_t ipv6hdr_sz = sizeof(struct ipv6hdr); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 256); + __type(key, __u32); + __type(value, __u64); +} rxcnt SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_IPTNL_ENTRIES); + __type(key, struct vip); + __type(value, struct iptnl_info); +} vip2tnl SEC(".maps"); + +static __always_inline void count_tx(__u32 protocol) +{ + __u64 *rxcnt_count; + + rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol); + if (rxcnt_count) + *rxcnt_count += 1; +} + +static __always_inline int get_dport(void *trans_data, __u8 protocol) +{ + struct tcphdr *th; + struct udphdr *uh; + + switch (protocol) { + case IPPROTO_TCP: + th = (struct tcphdr *)trans_data; + return th->dest; + case IPPROTO_UDP: + uh = (struct udphdr *)trans_data; + return uh->dest; + default: + return 0; + } +} + +static __always_inline void set_ethhdr(struct ethhdr *new_eth, + const struct ethhdr *old_eth, + const struct iptnl_info *tnl, + __be16 h_proto) +{ + memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source)); + memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest)); + new_eth->h_proto = h_proto; +} + +static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) +{ + __u8 eth_buffer[ethhdr_sz + iphdr_sz + ethhdr_sz]; + __u8 iph_buffer_tcp[iphdr_sz + tcphdr_sz]; + __u8 iph_buffer_udp[iphdr_sz + udphdr_sz]; + struct bpf_dynptr new_xdp_ptr; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + __u32 transport_hdr_sz; + struct iphdr *iph; + __u16 *next_iph; + __u16 payload_len; + struct vip vip = {}; + int dport; + __u32 csum = 0; + int i; + + __builtin_memset(eth_buffer, 0, sizeof(eth_buffer)); + __builtin_memset(iph_buffer_tcp, 0, sizeof(iph_buffer_tcp)); + __builtin_memset(iph_buffer_udp, 0, sizeof(iph_buffer_udp)); + + if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) + iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_udp, sizeof(iph_buffer_udp)); + else + iph = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, iph_buffer_tcp, sizeof(iph_buffer_tcp)); + + if (!iph) + return XDP_DROP; + + dport = get_dport(iph + 1, iph->protocol); + if (dport == -1) + return XDP_DROP; + + vip.protocol = iph->protocol; + vip.family = AF_INET; + vip.daddr.v4 = iph->daddr; + vip.dport = dport; + payload_len = bpf_ntohs(iph->tot_len); + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v4-in-v4 */ + if (!tnl || tnl->family != AF_INET) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)iphdr_sz)) + return XDP_DROP; + + bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); + new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer)); + if (!new_eth) + return XDP_DROP; + + iph = (struct iphdr *)(new_eth + 1); + old_eth = (struct ethhdr *)(iph + 1); + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP)); + + if (new_eth == eth_buffer) + bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0); + + iph->version = 4; + iph->ihl = iphdr_sz >> 2; + iph->frag_off = 0; + iph->protocol = IPPROTO_IPIP; + iph->check = 0; + iph->tos = 0; + iph->tot_len = bpf_htons(payload_len + iphdr_sz); + iph->daddr = tnl->daddr.v4; + iph->saddr = tnl->saddr.v4; + iph->ttl = 8; + + next_iph = (__u16 *)iph; + for (i = 0; i < iphdr_sz >> 1; i++) + csum += *next_iph++; + + iph->check = ~((csum & 0xffff) + (csum >> 16)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xdp_ptr) +{ + __u8 eth_buffer[ethhdr_sz + ipv6hdr_sz + ethhdr_sz]; + __u8 ip6h_buffer_tcp[ipv6hdr_sz + tcphdr_sz]; + __u8 ip6h_buffer_udp[ipv6hdr_sz + udphdr_sz]; + struct bpf_dynptr new_xdp_ptr; + struct iptnl_info *tnl; + struct ethhdr *new_eth; + struct ethhdr *old_eth; + __u32 transport_hdr_sz; + struct ipv6hdr *ip6h; + __u16 payload_len; + struct vip vip = {}; + int dport; + + __builtin_memset(eth_buffer, 0, sizeof(eth_buffer)); + __builtin_memset(ip6h_buffer_tcp, 0, sizeof(ip6h_buffer_tcp)); + __builtin_memset(ip6h_buffer_udp, 0, sizeof(ip6h_buffer_udp)); + + if (ethhdr_sz + iphdr_sz + tcphdr_sz > xdp->data_end - xdp->data) + ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_udp, sizeof(ip6h_buffer_udp)); + else + ip6h = bpf_dynptr_slice(xdp_ptr, ethhdr_sz, ip6h_buffer_tcp, sizeof(ip6h_buffer_tcp)); + + if (!ip6h) + return XDP_DROP; + + dport = get_dport(ip6h + 1, ip6h->nexthdr); + if (dport == -1) + return XDP_DROP; + + vip.protocol = ip6h->nexthdr; + vip.family = AF_INET6; + memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr)); + vip.dport = dport; + payload_len = ip6h->payload_len; + + tnl = bpf_map_lookup_elem(&vip2tnl, &vip); + /* It only does v6-in-v6 */ + if (!tnl || tnl->family != AF_INET6) + return XDP_PASS; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)ipv6hdr_sz)) + return XDP_DROP; + + bpf_dynptr_from_xdp(xdp, 0, &new_xdp_ptr); + new_eth = bpf_dynptr_slice_rdwr(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer)); + if (!new_eth) + return XDP_DROP; + + ip6h = (struct ipv6hdr *)(new_eth + 1); + old_eth = (struct ethhdr *)(ip6h + 1); + + set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6)); + + if (new_eth == eth_buffer) + bpf_dynptr_write(&new_xdp_ptr, 0, eth_buffer, sizeof(eth_buffer), 0); + + ip6h->version = 6; + ip6h->priority = 0; + memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); + ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + ipv6hdr_sz); + ip6h->nexthdr = IPPROTO_IPV6; + ip6h->hop_limit = 8; + memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6)); + memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6)); + + count_tx(vip.protocol); + + return XDP_TX; +} + +SEC("xdp") +int _xdp_tx_iptunnel(struct xdp_md *xdp) +{ + __u8 buffer[ethhdr_sz]; + struct bpf_dynptr ptr; + struct ethhdr *eth; + __u16 h_proto; + + __builtin_memset(buffer, 0, sizeof(buffer)); + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + eth = bpf_dynptr_slice(&ptr, 0, buffer, sizeof(buffer)); + if (!eth) + return XDP_DROP; + + h_proto = eth->h_proto; + + if (h_proto == bpf_htons(ETH_P_IP)) + return handle_ipv4(xdp, &ptr); + else if (h_proto == bpf_htons(ETH_P_IPV6)) + + return handle_ipv6(xdp, &ptr); + else + return XDP_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_tcp_hdr_options.h b/tools/testing/selftests/bpf/test_tcp_hdr_options.h index 6118e3ab61fc..56c9f8a3ad3d 100644 --- a/tools/testing/selftests/bpf/test_tcp_hdr_options.h +++ b/tools/testing/selftests/bpf/test_tcp_hdr_options.h @@ -50,6 +50,7 @@ struct linum_err { #define TCPOPT_EOL 0 #define TCPOPT_NOP 1 +#define TCPOPT_MSS 2 #define TCPOPT_WINDOW 3 #define TCPOPT_EXP 254 -- cgit v1.2.3-70-g09d2 From 85521e1ea4d0d7d8e62bbb0999f91e31ae421d76 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Sat, 25 Feb 2023 16:40:10 +0100 Subject: selftests/bpf: Add more tests for kptrs in maps Firstly, ensure programs successfully load when using all of the supported maps. Then, extend existing tests to test more cases at runtime. We are currently testing both the synchronous freeing of items and asynchronous destruction when map is freed, but the code needs to be adjusted a bit to be able to also accomodate support for percpu maps. We now do a delete on the item (and update for array maps which has a similar effect for kptrs) to perform a synchronous free of the kptr, and test destruction both for the synchronous and asynchronous deletion. Next time the program runs, it should observe the refcount as 1 since all existing references should have been released by then. By running the program after both possible paths freeing kptrs, we establish that they correctly release resources. Next, we augment the existing test to also test the same code path shared by all local storage maps using a task local storage map. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20230225154010.391965-4-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/map_kptr.c | 136 ++++++-- tools/testing/selftests/bpf/progs/map_kptr.c | 344 ++++++++++++++++++--- .../selftests/bpf/progs/rcu_tasks_trace_gp.c | 36 +++ 3 files changed, 451 insertions(+), 65 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c index 3533a4ecad01..8743df599567 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -4,70 +4,160 @@ #include "map_kptr.skel.h" #include "map_kptr_fail.skel.h" +#include "rcu_tasks_trace_gp.skel.h" static void test_map_kptr_success(bool test_run) { + LIBBPF_OPTS(bpf_test_run_opts, lopts); LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, ); + int key = 0, ret, cpu; struct map_kptr *skel; - int key = 0, ret; - char buf[16]; + char buf[16], *pbuf; skel = map_kptr__open_and_load(); if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) return; - ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts); - ASSERT_OK(ret, "test_map_kptr_ref refcount"); - ASSERT_OK(opts.retval, "test_map_kptr_ref retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref1), &opts); + ASSERT_OK(ret, "test_map_kptr_ref1 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval"); ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts); ASSERT_OK(ret, "test_map_kptr_ref2 refcount"); ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref1), &lopts); + ASSERT_OK(ret, "test_ls_map_kptr_ref1 refcount"); + ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref1 retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref2), &lopts); + ASSERT_OK(ret, "test_ls_map_kptr_ref2 refcount"); + ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref2 retval"); + if (test_run) goto exit; + cpu = libbpf_num_possible_cpus(); + if (!ASSERT_GT(cpu, 0, "libbpf_num_possible_cpus")) + goto exit; + + pbuf = calloc(cpu, sizeof(buf)); + if (!ASSERT_OK_PTR(pbuf, "calloc(pbuf)")) + goto exit; + ret = bpf_map__update_elem(skel->maps.array_map, &key, sizeof(key), buf, sizeof(buf), 0); ASSERT_OK(ret, "array_map update"); - ret = bpf_map__update_elem(skel->maps.array_map, - &key, sizeof(key), buf, sizeof(buf), 0); - ASSERT_OK(ret, "array_map update2"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); + + ret = bpf_map__update_elem(skel->maps.pcpu_array_map, + &key, sizeof(key), pbuf, cpu * sizeof(buf), 0); + ASSERT_OK(ret, "pcpu_array_map update"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); - ret = bpf_map__update_elem(skel->maps.hash_map, - &key, sizeof(key), buf, sizeof(buf), 0); - ASSERT_OK(ret, "hash_map update"); ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0); ASSERT_OK(ret, "hash_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); + + ret = bpf_map__delete_elem(skel->maps.pcpu_hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "pcpu_hash_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); - ret = bpf_map__update_elem(skel->maps.hash_malloc_map, - &key, sizeof(key), buf, sizeof(buf), 0); - ASSERT_OK(ret, "hash_malloc_map update"); ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0); ASSERT_OK(ret, "hash_malloc_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); + + ret = bpf_map__delete_elem(skel->maps.pcpu_hash_malloc_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "pcpu_hash_malloc_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); - ret = bpf_map__update_elem(skel->maps.lru_hash_map, - &key, sizeof(key), buf, sizeof(buf), 0); - ASSERT_OK(ret, "lru_hash_map update"); ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); ASSERT_OK(ret, "lru_hash_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); + + ret = bpf_map__delete_elem(skel->maps.lru_pcpu_hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "lru_pcpu_hash_map delete"); + skel->data->ref--; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref3), &opts); + ASSERT_OK(ret, "test_map_kptr_ref3 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_ls_map_kptr_ref_del), &lopts); + ASSERT_OK(ret, "test_ls_map_kptr_ref_del delete"); + skel->data->ref--; + ASSERT_OK(lopts.retval, "test_ls_map_kptr_ref_del retval"); + + free(pbuf); exit: map_kptr__destroy(skel); } -void test_map_kptr(void) +static int kern_sync_rcu_tasks_trace(struct rcu_tasks_trace_gp *rcu) { - if (test__start_subtest("success")) { + long gp_seq = READ_ONCE(rcu->bss->gp_seq); + LIBBPF_OPTS(bpf_test_run_opts, opts); + + if (!ASSERT_OK(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace), + &opts), "do_call_rcu_tasks_trace")) + return -EFAULT; + if (!ASSERT_OK(opts.retval, "opts.retval == 0")) + return -EFAULT; + while (gp_seq == READ_ONCE(rcu->bss->gp_seq)) + sched_yield(); + return 0; +} + +void serial_test_map_kptr(void) +{ + struct rcu_tasks_trace_gp *skel; + + RUN_TESTS(map_kptr_fail); + + skel = rcu_tasks_trace_gp__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rcu_tasks_trace_gp__open_and_load")) + return; + if (!ASSERT_OK(rcu_tasks_trace_gp__attach(skel), "rcu_tasks_trace_gp__attach")) + goto end; + + if (test__start_subtest("success-map")) { + test_map_kptr_success(true); + + ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace"); + ASSERT_OK(kern_sync_rcu(), "sync rcu"); + /* Observe refcount dropping to 1 on bpf_map_free_deferred */ test_map_kptr_success(false); - /* Do test_run twice, so that we see refcount going back to 1 - * after we leave it in map from first iteration. - */ + + ASSERT_OK(kern_sync_rcu_tasks_trace(skel), "sync rcu_tasks_trace"); + ASSERT_OK(kern_sync_rcu(), "sync rcu"); + /* Observe refcount dropping to 1 on synchronous delete elem */ test_map_kptr_success(true); } - RUN_TESTS(map_kptr_fail); +end: + rcu_tasks_trace_gp__destroy(skel); + return; } diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index 228ec45365a8..a24d17bc17eb 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -15,6 +15,13 @@ struct array_map { __uint(max_entries, 1); } array_map SEC(".maps"); +struct pcpu_array_map { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} pcpu_array_map SEC(".maps"); + struct hash_map { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); @@ -22,6 +29,13 @@ struct hash_map { __uint(max_entries, 1); } hash_map SEC(".maps"); +struct pcpu_hash_map { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} pcpu_hash_map SEC(".maps"); + struct hash_malloc_map { __uint(type, BPF_MAP_TYPE_HASH); __type(key, int); @@ -30,6 +44,14 @@ struct hash_malloc_map { __uint(map_flags, BPF_F_NO_PREALLOC); } hash_malloc_map SEC(".maps"); +struct pcpu_hash_malloc_map { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); + __uint(map_flags, BPF_F_NO_PREALLOC); +} pcpu_hash_malloc_map SEC(".maps"); + struct lru_hash_map { __uint(type, BPF_MAP_TYPE_LRU_HASH); __type(key, int); @@ -37,6 +59,41 @@ struct lru_hash_map { __uint(max_entries, 1); } lru_hash_map SEC(".maps"); +struct lru_pcpu_hash_map { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} lru_pcpu_hash_map SEC(".maps"); + +struct cgrp_ls_map { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} cgrp_ls_map SEC(".maps"); + +struct task_ls_map { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} task_ls_map SEC(".maps"); + +struct inode_ls_map { + __uint(type, BPF_MAP_TYPE_INODE_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} inode_ls_map SEC(".maps"); + +struct sk_ls_map { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct map_value); +} sk_ls_map SEC(".maps"); + #define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \ struct { \ __uint(type, map_type); \ @@ -160,6 +217,58 @@ int test_map_kptr(struct __sk_buff *ctx) return 0; } +SEC("tp_btf/cgroup_mkdir") +int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path) +{ + struct map_value *v; + + v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("lsm/inode_unlink") +int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim) +{ + struct task_struct *task; + struct map_value *v; + + task = bpf_get_current_task_btf(); + if (!task) + return 0; + v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("lsm/inode_unlink") +int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim) +{ + struct map_value *v; + + v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + +SEC("tc") +int test_sk_map_kptr(struct __sk_buff *ctx) +{ + struct map_value *v; + struct bpf_sock *sk; + + sk = ctx->sk; + if (!sk) + return 0; + v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (v) + test_kptr(v); + return 0; +} + SEC("tc") int test_map_in_map_kptr(struct __sk_buff *ctx) { @@ -189,106 +298,257 @@ int test_map_in_map_kptr(struct __sk_buff *ctx) return 0; } -SEC("tc") -int test_map_kptr_ref(struct __sk_buff *ctx) +int ref = 1; + +static __always_inline +int test_map_kptr_ref_pre(struct map_value *v) { struct prog_test_ref_kfunc *p, *p_st; unsigned long arg = 0; - struct map_value *v; - int key = 0, ret; + int ret; p = bpf_kfunc_call_test_acquire(&arg); if (!p) return 1; + ref++; p_st = p->next; - if (p_st->cnt.refs.counter != 2) { + if (p_st->cnt.refs.counter != ref) { ret = 2; goto end; } - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) { - ret = 3; - goto end; - } - p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 4; + ret = 3; goto end; } - if (p_st->cnt.refs.counter != 2) - return 5; + if (p_st->cnt.refs.counter != ref) + return 4; p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); if (!p) - return 6; - if (p_st->cnt.refs.counter != 3) { - ret = 7; + return 5; + ref++; + if (p_st->cnt.refs.counter != ref) { + ret = 6; goto end; } bpf_kfunc_call_test_release(p); - if (p_st->cnt.refs.counter != 2) - return 8; + ref--; + if (p_st->cnt.refs.counter != ref) + return 7; p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 9; + return 8; bpf_kfunc_call_test_release(p); - if (p_st->cnt.refs.counter != 1) - return 10; + ref--; + if (p_st->cnt.refs.counter != ref) + return 9; p = bpf_kfunc_call_test_acquire(&arg); if (!p) - return 11; + return 10; + ref++; p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 12; + ret = 11; goto end; } - if (p_st->cnt.refs.counter != 2) - return 13; + if (p_st->cnt.refs.counter != ref) + return 12; /* Leave in map */ return 0; end: + ref--; bpf_kfunc_call_test_release(p); return ret; } -SEC("tc") -int test_map_kptr_ref2(struct __sk_buff *ctx) +static __always_inline +int test_map_kptr_ref_post(struct map_value *v) { struct prog_test_ref_kfunc *p, *p_st; - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 1; p_st = v->ref_ptr; - if (!p_st || p_st->cnt.refs.counter != 2) - return 2; + if (!p_st || p_st->cnt.refs.counter != ref) + return 1; p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 3; - if (p_st->cnt.refs.counter != 2) { + return 2; + if (p_st->cnt.refs.counter != ref) { bpf_kfunc_call_test_release(p); - return 4; + return 3; } p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { bpf_kfunc_call_test_release(p); - return 5; + return 4; } - if (p_st->cnt.refs.counter != 2) - return 6; + if (p_st->cnt.refs.counter != ref) + return 5; + + return 0; +} + +#define TEST(map) \ + v = bpf_map_lookup_elem(&map, &key); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_pre(v); \ + if (ret) \ + return ret; + +#define TEST_PCPU(map) \ + v = bpf_map_lookup_percpu_elem(&map, &key, 0); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_pre(v); \ + if (ret) \ + return ret; + +SEC("tc") +int test_map_kptr_ref1(struct __sk_buff *ctx) +{ + struct map_value *v, val = {}; + int key = 0, ret; + + bpf_map_update_elem(&hash_map, &key, &val, 0); + bpf_map_update_elem(&hash_malloc_map, &key, &val, 0); + bpf_map_update_elem(&lru_hash_map, &key, &val, 0); + + bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0); + bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0); + bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0); + + TEST(array_map); + TEST(hash_map); + TEST(hash_malloc_map); + TEST(lru_hash_map); + + TEST_PCPU(pcpu_array_map); + TEST_PCPU(pcpu_hash_map); + TEST_PCPU(pcpu_hash_malloc_map); + TEST_PCPU(lru_pcpu_hash_map); + + return 0; +} + +#undef TEST +#undef TEST_PCPU + +#define TEST(map) \ + v = bpf_map_lookup_elem(&map, &key); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_post(v); \ + if (ret) \ + return ret; + +#define TEST_PCPU(map) \ + v = bpf_map_lookup_percpu_elem(&map, &key, 0); \ + if (!v) \ + return -1; \ + ret = test_map_kptr_ref_post(v); \ + if (ret) \ + return ret; + +SEC("tc") +int test_map_kptr_ref2(struct __sk_buff *ctx) +{ + struct map_value *v; + int key = 0, ret; + + TEST(array_map); + TEST(hash_map); + TEST(hash_malloc_map); + TEST(lru_hash_map); + + TEST_PCPU(pcpu_array_map); + TEST_PCPU(pcpu_hash_map); + TEST_PCPU(pcpu_hash_malloc_map); + TEST_PCPU(lru_pcpu_hash_map); return 0; } +#undef TEST +#undef TEST_PCPU + +SEC("tc") +int test_map_kptr_ref3(struct __sk_buff *ctx) +{ + struct prog_test_ref_kfunc *p; + unsigned long sp = 0; + + p = bpf_kfunc_call_test_acquire(&sp); + if (!p) + return 1; + ref++; + if (p->cnt.refs.counter != ref) { + bpf_kfunc_call_test_release(p); + return 2; + } + bpf_kfunc_call_test_release(p); + ref--; + return 0; +} + +SEC("syscall") +int test_ls_map_kptr_ref1(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + int ret; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (v) + return 150; + v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 200; + return test_map_kptr_ref_pre(v); +} + +SEC("syscall") +int test_ls_map_kptr_ref2(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + int ret; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (!v) + return 200; + return test_map_kptr_ref_post(v); +} + +SEC("syscall") +int test_ls_map_kptr_ref_del(void *ctx) +{ + struct task_struct *current; + struct map_value *v; + int ret; + + current = bpf_get_current_task_btf(); + if (!current) + return 100; + v = bpf_task_storage_get(&task_ls_map, current, NULL, 0); + if (!v) + return 200; + if (!v->ref_ptr) + return 300; + return bpf_task_storage_delete(&task_ls_map, current); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c new file mode 100644 index 000000000000..df4873558634 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +struct task_ls_map { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} task_ls_map SEC(".maps"); + +long gp_seq; + +SEC("syscall") +int do_call_rcu_tasks_trace(void *ctx) +{ + struct task_struct *current; + int *v; + + current = bpf_get_current_task_btf(); + v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!v) + return 1; + /* Invoke call_rcu_tasks_trace */ + return bpf_task_storage_delete(&task_ls_map, current); +} + +SEC("kprobe/rcu_tasks_trace_postgp") +int rcu_tasks_trace_postgp(void *ctx) +{ + __sync_add_and_fetch(&gp_seq, 1); + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From be35f4af719c94df137cd611bf497d658eb3adc2 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 28 Feb 2023 20:03:01 +0800 Subject: selftests/bpf: Set __BITS_PER_LONG if target is bpf for LoongArch If target is bpf, there is no __loongarch__ definition, __BITS_PER_LONG defaults to 32, __NR_nanosleep is not defined: #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_nanosleep 101 __SC_3264(__NR_nanosleep, sys_nanosleep_time32, sys_nanosleep) #endif Work around this problem, by explicitly setting __BITS_PER_LONG to __loongarch_grlen which is defined by compiler as 64 for LA64. This is similar with commit 36e70b9b06bf ("selftests, bpf: Fix broken riscv build"). Signed-off-by: Tiezhu Yang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1677585781-21628-1-git-send-email-yangtiezhu@loongson.cn --- tools/testing/selftests/bpf/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index b677dcd0b77a..f40606a85a0f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -338,7 +338,8 @@ $(RESOLVE_BTFIDS): $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/resolve_btfids \ define get_sys_includes $(shell $(1) $(2) -v -E - &1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ -$(shell $(1) $(2) -dM -E - Date: Wed, 1 Mar 2023 19:54:17 +0200 Subject: selftests/bpf: Support custom per-test flags and multiple expected messages Extend __flag attribute by allowing to specify one of the following: * BPF_F_STRICT_ALIGNMENT * BPF_F_ANY_ALIGNMENT * BPF_F_TEST_RND_HI32 * BPF_F_TEST_STATE_FREQ * BPF_F_SLEEPABLE * BPF_F_XDP_HAS_FRAGS * Some numeric value Extend __msg attribute by allowing to specify multiple exepcted messages. All messages are expected to be present in the verifier log in the order of application. Signed-off-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230301175417.3146070-2-eddyz87@gmail.com [ Eduard: added commit message, formatting, comments ] --- tools/testing/selftests/bpf/progs/bpf_misc.h | 23 ++++++++++ tools/testing/selftests/bpf/test_loader.c | 69 ++++++++++++++++++++++++---- tools/testing/selftests/bpf/test_progs.h | 1 + 3 files changed, 84 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 14e28f991451..f704885aa534 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -2,10 +2,33 @@ #ifndef __BPF_MISC_H__ #define __BPF_MISC_H__ +/* This set of attributes controls behavior of the + * test_loader.c:test_loader__run_subtests(). + * + * __msg Message expected to be found in the verifier log. + * Multiple __msg attributes could be specified. + * + * __success Expect program load success in privileged mode. + * + * __failure Expect program load failure in privileged mode. + * + * __log_level Log level to use for the program, numeric value expected. + * + * __flag Adds one flag use for the program, the following values are valid: + * - BPF_F_STRICT_ALIGNMENT; + * - BPF_F_TEST_RND_HI32; + * - BPF_F_TEST_STATE_FREQ; + * - BPF_F_SLEEPABLE; + * - BPF_F_XDP_HAS_FRAGS; + * - A numeric value. + * Multiple __flag attributes could be specified, the final flags + * value is derived by applying binary "or" to all specified values. + */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) +#define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 679efb3aa785..bf41390157bf 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -13,12 +13,15 @@ #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" +#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" struct test_spec { const char *name; bool expect_failure; - const char *expect_msg; + const char **expect_msgs; + size_t expect_msg_cnt; int log_level; + int prog_flags; }; static int tester_init(struct test_loader *tester) @@ -67,7 +70,8 @@ static int parse_test_spec(struct test_loader *tester, for (i = 1; i < btf__type_cnt(btf); i++) { const struct btf_type *t; - const char *s; + const char *s, *val; + char *e; t = btf__type_by_id(btf, i); if (!btf_is_decl_tag(t)) @@ -82,14 +86,48 @@ static int parse_test_spec(struct test_loader *tester, } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) { spec->expect_failure = false; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { - spec->expect_msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; + void *tmp; + const char **msg; + + tmp = realloc(spec->expect_msgs, + (1 + spec->expect_msg_cnt) * sizeof(void *)); + if (!tmp) { + ASSERT_FAIL("failed to realloc memory for messages\n"); + return -ENOMEM; + } + spec->expect_msgs = tmp; + msg = &spec->expect_msgs[spec->expect_msg_cnt++]; + *msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) { + val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1; errno = 0; - spec->log_level = strtol(s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1, NULL, 0); - if (errno) { + spec->log_level = strtol(val, &e, 0); + if (errno || e[0] != '\0') { ASSERT_FAIL("failed to parse test log level from '%s'", s); return -EINVAL; } + } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) { + val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1; + if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) { + spec->prog_flags |= BPF_F_STRICT_ALIGNMENT; + } else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) { + spec->prog_flags |= BPF_F_ANY_ALIGNMENT; + } else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) { + spec->prog_flags |= BPF_F_TEST_RND_HI32; + } else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) { + spec->prog_flags |= BPF_F_TEST_STATE_FREQ; + } else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) { + spec->prog_flags |= BPF_F_SLEEPABLE; + } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) { + spec->prog_flags |= BPF_F_XDP_HAS_FRAGS; + } else /* assume numeric value */ { + errno = 0; + spec->prog_flags |= strtol(val, &e, 0); + if (errno || e[0] != '\0') { + ASSERT_FAIL("failed to parse test prog flags from '%s'", s); + return -EINVAL; + } + } } } @@ -101,7 +139,7 @@ static void prepare_case(struct test_loader *tester, struct bpf_object *obj, struct bpf_program *prog) { - int min_log_level = 0; + int min_log_level = 0, prog_flags; if (env.verbosity > VERBOSE_NONE) min_log_level = 1; @@ -119,7 +157,11 @@ static void prepare_case(struct test_loader *tester, else bpf_program__set_log_level(prog, spec->log_level); + prog_flags = bpf_program__flags(prog); + bpf_program__set_flags(prog, prog_flags | spec->prog_flags); + tester->log_buf[0] = '\0'; + tester->next_match_pos = 0; } static void emit_verifier_log(const char *log_buf, bool force) @@ -135,17 +177,26 @@ static void validate_case(struct test_loader *tester, struct bpf_program *prog, int load_err) { - if (spec->expect_msg) { + int i, j; + + for (i = 0; i < spec->expect_msg_cnt; i++) { char *match; + const char *expect_msg; + + expect_msg = spec->expect_msgs[i]; - match = strstr(tester->log_buf, spec->expect_msg); + match = strstr(tester->log_buf + tester->next_match_pos, expect_msg); if (!ASSERT_OK_PTR(match, "expect_msg")) { /* if we are in verbose mode, we've already emitted log */ if (env.verbosity == VERBOSE_NONE) emit_verifier_log(tester->log_buf, true /*force*/); - fprintf(stderr, "EXPECTED MSG: '%s'\n", spec->expect_msg); + for (j = 0; j < i; j++) + fprintf(stderr, "MATCHED MSG: '%s'\n", spec->expect_msgs[j]); + fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg); return; } + + tester->next_match_pos = match - tester->log_buf + strlen(expect_msg); } } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 9fbdc57c5b57..3cbf005747ed 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -427,6 +427,7 @@ int get_bpf_max_tramp_links(void); struct test_loader { char *log_buf; size_t log_buf_sz; + size_t next_match_pos; struct bpf_object *obj; }; -- cgit v1.2.3-70-g09d2 From ec97a76f113ee0d8fd17178b4e4ffbf0ab9e5452 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Thu, 2 Mar 2023 16:55:00 -0800 Subject: selftests/bpf: Add -Wuninitialized flag to bpf prog flags Per C99 standard [0], Section 6.7.8, Paragraph 10: If an object that has automatic storage duration is not initialized explicitly, its value is indeterminate. And in the same document, in appendix "J.2 Undefined behavior": The behavior is undefined in the following circumstances: [...] The value of an object with automatic storage duration is used while it is indeterminate (6.2.4, 6.7.8, 6.8). This means that use of an uninitialized stack variable is undefined behavior, and therefore that clang can choose to do a variety of scary things, such as not generating bytecode for "bunch of useful code" in the below example: void some_func() { int i; if (!i) return; // bunch of useful code } To add insult to injury, if some_func above is a helper function for some BPF program, clang can choose to not generate an "exit" insn, causing verifier to fail with "last insn is not an exit or jmp". Going from that verification failure to the root cause of uninitialized use is certain to be frustrating. This patch adds -Wuninitialized to the cflags for selftest BPF progs and fixes up existing instances of uninitialized use. [0]: https://www.open-std.org/jtc1/sc22/WG14/www/docs/n1256.pdf Signed-off-by: Dave Marchevsky Cc: David Vernet Cc: Tejun Heo Acked-by: David Vernet Link: https://lore.kernel.org/r/20230303005500.1614874-1-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/progs/rbtree.c | 2 +- tools/testing/selftests/bpf/progs/rbtree_fail.c | 7 +++++-- tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c | 2 +- tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c | 2 +- tools/testing/selftests/bpf/progs/test_tunnel_kern.c | 10 +++++----- 6 files changed, 14 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f40606a85a0f..eab3cf5399f5 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -357,7 +357,7 @@ BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ -I$(abspath $(OUTPUT)/../usr/include) CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ - -Wno-compare-distinct-pointer-types + -Wno-compare-distinct-pointer-types -Wuninitialized $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index e5db1a4287e5..4c90aa6abddd 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -75,7 +75,7 @@ SEC("tc") long rbtree_add_and_remove(void *ctx) { struct bpf_rb_node *res = NULL; - struct node_data *n, *m; + struct node_data *n, *m = NULL; n = bpf_obj_new(typeof(*n)); if (!n) diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index bf3cba115897..1ced900f3fce 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -232,8 +232,11 @@ long rbtree_api_first_release_unlock_escape(void *ctx) bpf_spin_lock(&glock); res = bpf_rbtree_first(&groot); - if (res) - n = container_of(res, struct node_data, node); + if (!res) { + bpf_spin_unlock(&glock); + return 1; + } + n = container_of(res, struct node_data, node); bpf_spin_unlock(&glock); bpf_spin_lock(&glock); diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index 2fbef3cc7ad8..2dde8e3fe4c9 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -48,7 +48,7 @@ SEC("?lsm.s/bpf") __failure __msg("arg#0 expected pointer to stack or dynptr_ptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { - unsigned long val; + unsigned long val = 0; return bpf_verify_pkcs7_signature((struct bpf_dynptr *)val, (struct bpf_dynptr *)val, NULL); diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c index b502e5c92e33..6ccf6d546074 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c @@ -23,8 +23,8 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off, bool *ipv4) { struct bpf_sock_tuple *result; + __u64 ihl_len = 0; __u8 proto = 0; - __u64 ihl_len; if (eth_proto == bpf_htons(ETH_P_IP)) { struct iphdr *iph = (struct iphdr *)(data + nh_off); diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 508da4a23c4f..95b4aa0928ba 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -324,11 +324,11 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb) SEC("tc") int vxlan_set_tunnel_dst(struct __sk_buff *skb) { - int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; __u32 index = 0; __u32 *local_ip = NULL; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -363,11 +363,11 @@ int vxlan_set_tunnel_dst(struct __sk_buff *skb) SEC("tc") int vxlan_set_tunnel_src(struct __sk_buff *skb) { - int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; __u32 index = 0; __u32 *local_ip = NULL; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -494,9 +494,9 @@ SEC("tc") int ip6vxlan_set_tunnel_dst(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -525,9 +525,9 @@ SEC("tc") int ip6vxlan_set_tunnel_src(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { @@ -556,9 +556,9 @@ SEC("tc") int ip6vxlan_get_tunnel_src(struct __sk_buff *skb) { struct bpf_tunnel_key key; - int ret; __u32 index = 0; __u32 *local_ip; + int ret = 0; local_ip = bpf_map_lookup_elem(&local_ip_map, &index); if (!local_ip) { -- cgit v1.2.3-70-g09d2 From 944459e88b4f5c71683b56710f96e39756afae31 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 2 Mar 2023 13:46:14 +0200 Subject: selftests/bpf: Add absolute timer test Add test for the absolute BPF timer under the existing timer tests. This will run the timer two times with 1us expiration time, and then re-arm the timer at ~35s in the future. At the end, it is verified that the absolute timer expired exactly two times. Signed-off-by: Tero Kristo Link: https://lore.kernel.org/r/20230302114614.2985072-3-tero.kristo@linux.intel.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/timer.c | 3 ++ tools/testing/selftests/bpf/progs/timer.c | 45 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 7eb049214859..290c21dbe65a 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -29,6 +29,9 @@ static int timer(struct timer *timer_skel) /* check that timer_cb2() was executed twice */ ASSERT_EQ(timer_skel->bss->bss_data, 10, "bss_data"); + /* check that timer_cb3() was executed twice */ + ASSERT_EQ(timer_skel->bss->abs_data, 12, "abs_data"); + /* check that there were no errors in timer execution */ ASSERT_EQ(timer_skel->bss->err, 0, "err"); diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c index acda5c9cea93..9a16d95213e1 100644 --- a/tools/testing/selftests/bpf/progs/timer.c +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -46,7 +46,15 @@ struct { __type(value, struct elem); } lru SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} abs_timer SEC(".maps"); + __u64 bss_data; +__u64 abs_data; __u64 err; __u64 ok; __u64 callback_check = 52; @@ -284,3 +292,40 @@ int BPF_PROG2(test2, int, a, int, b) return bpf_timer_test(); } + +/* callback for absolute timer */ +static int timer_cb3(void *map, int *key, struct bpf_timer *timer) +{ + abs_data += 6; + + if (abs_data < 12) { + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, + BPF_F_TIMER_ABS); + } else { + /* Re-arm timer ~35 seconds in future */ + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35), + BPF_F_TIMER_ABS); + } + + return 0; +} + +SEC("fentry/bpf_fentry_test3") +int BPF_PROG2(test3, int, a) +{ + int key = 0; + struct bpf_timer *timer; + + bpf_printk("test3"); + + timer = bpf_map_lookup_elem(&abs_timer, &key); + if (timer) { + if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0) + err |= 2048; + bpf_timer_set_callback(timer, timer_cb3); + bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000, + BPF_F_TIMER_ABS); + } + + return 0; +} -- cgit v1.2.3-70-g09d2 From 03b77e17aeb22a5935ea20d585ca6a1f2947e62b Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:41 -0800 Subject: bpf: Rename __kptr_ref -> __kptr and __kptr -> __kptr_untrusted. __kptr meant to store PTR_UNTRUSTED kernel pointers inside bpf maps. The concept felt useful, but didn't get much traction, since bpf_rdonly_cast() was added soon after and bpf programs received a simpler way to access PTR_UNTRUSTED kernel pointers without going through restrictive __kptr usage. Rename __kptr_ref -> __kptr and __kptr -> __kptr_untrusted to indicate its intended usage. The main goal of __kptr_untrusted was to read/write such pointers directly while bpf_kptr_xchg was a mechanism to access refcnted kernel pointers. The next patch will allow RCU protected __kptr access with direct read. At that point __kptr_untrusted will be deprecated. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-2-alexei.starovoitov@gmail.com --- Documentation/bpf/bpf_design_QA.rst | 4 ++-- Documentation/bpf/cpumasks.rst | 4 ++-- Documentation/bpf/kfuncs.rst | 2 +- kernel/bpf/btf.c | 4 ++-- tools/lib/bpf/bpf_helpers.h | 2 +- tools/testing/selftests/bpf/progs/cb_refs.c | 2 +- .../selftests/bpf/progs/cgrp_kfunc_common.h | 2 +- tools/testing/selftests/bpf/progs/cpumask_common.h | 2 +- tools/testing/selftests/bpf/progs/jit_probe_mem.c | 2 +- tools/testing/selftests/bpf/progs/lru_bug.c | 2 +- tools/testing/selftests/bpf/progs/map_kptr.c | 4 ++-- tools/testing/selftests/bpf/progs/map_kptr_fail.c | 6 +++--- .../selftests/bpf/progs/task_kfunc_common.h | 2 +- tools/testing/selftests/bpf/test_verifier.c | 22 +++++++++++----------- 14 files changed, 30 insertions(+), 30 deletions(-) (limited to 'tools/testing') diff --git a/Documentation/bpf/bpf_design_QA.rst b/Documentation/bpf/bpf_design_QA.rst index bfff0e7e37c2..38372a956d65 100644 --- a/Documentation/bpf/bpf_design_QA.rst +++ b/Documentation/bpf/bpf_design_QA.rst @@ -314,7 +314,7 @@ Q: What is the compatibility story for special BPF types in map values? Q: Users are allowed to embed bpf_spin_lock, bpf_timer fields in their BPF map values (when using BTF support for BPF maps). This allows to use helpers for such objects on these fields inside map values. Users are also allowed to embed -pointers to some kernel types (with __kptr and __kptr_ref BTF tags). Will the +pointers to some kernel types (with __kptr_untrusted and __kptr BTF tags). Will the kernel preserve backwards compatibility for these features? A: It depends. For bpf_spin_lock, bpf_timer: YES, for kptr and everything else: @@ -324,7 +324,7 @@ For struct types that have been added already, like bpf_spin_lock and bpf_timer, the kernel will preserve backwards compatibility, as they are part of UAPI. For kptrs, they are also part of UAPI, but only with respect to the kptr -mechanism. The types that you can use with a __kptr and __kptr_ref tagged +mechanism. The types that you can use with a __kptr_untrusted and __kptr tagged pointer in your struct are NOT part of the UAPI contract. The supported types can and will change across kernel releases. However, operations like accessing kptr fields and bpf_kptr_xchg() helper will continue to be supported across kernel diff --git a/Documentation/bpf/cpumasks.rst b/Documentation/bpf/cpumasks.rst index 24bef9cbbeee..75344cd230e5 100644 --- a/Documentation/bpf/cpumasks.rst +++ b/Documentation/bpf/cpumasks.rst @@ -51,7 +51,7 @@ For example: .. code-block:: c struct cpumask_map_value { - struct bpf_cpumask __kptr_ref * cpumask; + struct bpf_cpumask __kptr * cpumask; }; struct array_map { @@ -128,7 +128,7 @@ Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map: /* struct containing the struct bpf_cpumask kptr which is stored in the map. */ struct cpumasks_kfunc_map_value { - struct bpf_cpumask __kptr_ref * bpf_cpumask; + struct bpf_cpumask __kptr * bpf_cpumask; }; /* The map containing struct cpumasks_kfunc_map_value entries. */ diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 9d85bbc3b771..b5d9b0d446bc 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -544,7 +544,7 @@ Here's an example of how it can be used: /* struct containing the struct task_struct kptr which is actually stored in the map. */ struct __cgroups_kfunc_map_value { - struct cgroup __kptr_ref * cgroup; + struct cgroup __kptr * cgroup; }; /* The map containing struct __cgroups_kfunc_map_value entries. */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ef2d8969ed1f..c5e1d6955491 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3288,9 +3288,9 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t, /* Reject extra tags */ if (btf_type_is_type_tag(btf_type_by_id(btf, t->type))) return -EINVAL; - if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) + if (!strcmp("kptr_untrusted", __btf_name_by_offset(btf, t->name_off))) type = BPF_KPTR_UNREF; - else if (!strcmp("kptr_ref", __btf_name_by_offset(btf, t->name_off))) + else if (!strcmp("kptr", __btf_name_by_offset(btf, t->name_off))) type = BPF_KPTR_REF; else return -EINVAL; diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 5ec1871acb2f..7d12d3e620cc 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -174,8 +174,8 @@ enum libbpf_tristate { #define __kconfig __attribute__((section(".kconfig"))) #define __ksym __attribute__((section(".ksyms"))) +#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted"))) #define __kptr __attribute__((btf_type_tag("kptr"))) -#define __kptr_ref __attribute__((btf_type_tag("kptr_ref"))) #ifndef ___bpf_concat #define ___bpf_concat(a, b) a ## b diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 7653df1bc787..ce96b33e38d6 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -4,7 +4,7 @@ #include struct map_value { - struct prog_test_ref_kfunc __kptr_ref *ptr; + struct prog_test_ref_kfunc __kptr *ptr; }; struct { diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h index 2f8de933b957..d0b7cd0d09d7 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -10,7 +10,7 @@ #include struct __cgrps_kfunc_map_value { - struct cgroup __kptr_ref * cgrp; + struct cgroup __kptr * cgrp; }; struct hash_map { diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index ad34f3b602be..65e5496ca1b2 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -10,7 +10,7 @@ int err; struct __cpumask_map_value { - struct bpf_cpumask __kptr_ref * cpumask; + struct bpf_cpumask __kptr * cpumask; }; struct array_map { diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index 2d2e61470794..13f00ca2ed0a 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -4,7 +4,7 @@ #include #include -static struct prog_test_ref_kfunc __kptr_ref *v; +static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; diff --git a/tools/testing/selftests/bpf/progs/lru_bug.c b/tools/testing/selftests/bpf/progs/lru_bug.c index 687081a724b3..ad73029cb1e3 100644 --- a/tools/testing/selftests/bpf/progs/lru_bug.c +++ b/tools/testing/selftests/bpf/progs/lru_bug.c @@ -4,7 +4,7 @@ #include struct map_value { - struct task_struct __kptr *ptr; + struct task_struct __kptr_untrusted *ptr; }; struct { diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index a24d17bc17eb..3fe7cde4cbfd 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -4,8 +4,8 @@ #include struct map_value { - struct prog_test_ref_kfunc __kptr *unref_ptr; - struct prog_test_ref_kfunc __kptr_ref *ref_ptr; + struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; + struct prog_test_ref_kfunc __kptr *ref_ptr; }; struct array_map { diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 760e41e1a632..e19e2a5f38cf 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -7,9 +7,9 @@ struct map_value { char buf[8]; - struct prog_test_ref_kfunc __kptr *unref_ptr; - struct prog_test_ref_kfunc __kptr_ref *ref_ptr; - struct prog_test_member __kptr_ref *ref_memb_ptr; + struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; + struct prog_test_ref_kfunc __kptr *ref_ptr; + struct prog_test_member __kptr *ref_memb_ptr; }; struct array_map { diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index c0ffd171743e..4c2a4b0e3a25 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -10,7 +10,7 @@ #include struct __tasks_kfunc_map_value { - struct task_struct __kptr_ref * task; + struct task_struct __kptr * task; }; struct hash_map { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 8b9949bb833d..49a70d9beb0b 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -699,13 +699,13 @@ static int create_cgroup_storage(bool percpu) * struct bpf_timer t; * }; * struct btf_ptr { + * struct prog_test_ref_kfunc __kptr_untrusted *ptr; * struct prog_test_ref_kfunc __kptr *ptr; - * struct prog_test_ref_kfunc __kptr_ref *ptr; - * struct prog_test_member __kptr_ref *ptr; + * struct prog_test_member __kptr *ptr; * } */ static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t" - "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref" + "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_untrusted" "\0prog_test_member"; static __u32 btf_raw_types[] = { /* int */ @@ -724,20 +724,20 @@ static __u32 btf_raw_types[] = { BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */ /* struct prog_test_ref_kfunc */ /* [6] */ BTF_STRUCT_ENC(51, 0, 0), - BTF_STRUCT_ENC(89, 0, 0), /* [7] */ + BTF_STRUCT_ENC(95, 0, 0), /* [7] */ + /* type tag "kptr_untrusted" */ + BTF_TYPE_TAG_ENC(80, 6), /* [8] */ /* type tag "kptr" */ - BTF_TYPE_TAG_ENC(75, 6), /* [8] */ - /* type tag "kptr_ref" */ - BTF_TYPE_TAG_ENC(80, 6), /* [9] */ - BTF_TYPE_TAG_ENC(80, 7), /* [10] */ + BTF_TYPE_TAG_ENC(75, 6), /* [9] */ + BTF_TYPE_TAG_ENC(75, 7), /* [10] */ BTF_PTR_ENC(8), /* [11] */ BTF_PTR_ENC(9), /* [12] */ BTF_PTR_ENC(10), /* [13] */ /* struct btf_ptr */ /* [14] */ BTF_STRUCT_ENC(43, 3, 24), - BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */ - BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */ - BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */ + BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr_untrusted *ptr; */ + BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr *ptr; */ + BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr *ptr; */ }; static char bpf_vlog[UINT_MAX >> 8]; -- cgit v1.2.3-70-g09d2 From 20c09d92faeefb8536f705d3a4629e0dc314c8a1 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:43 -0800 Subject: bpf: Introduce kptr_rcu. The life time of certain kernel structures like 'struct cgroup' is protected by RCU. Hence it's safe to dereference them directly from __kptr tagged pointers in bpf maps. The resulting pointer is MEM_RCU and can be passed to kfuncs that expect KF_RCU. Derefrence of other kptr-s returns PTR_UNTRUSTED. For example: struct map_value { struct cgroup __kptr *cgrp; }; SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_cgrp_get_ancestors, struct cgroup *cgrp_arg, const char *path) { struct cgroup *cg, *cg2; cg = bpf_cgroup_acquire(cgrp_arg); // cg is PTR_TRUSTED and ref_obj_id > 0 bpf_kptr_xchg(&v->cgrp, cg); cg2 = v->cgrp; // This is new feature introduced by this patch. // cg2 is PTR_MAYBE_NULL | MEM_RCU. // When cg2 != NULL, it's a valid cgroup, but its percpu_ref could be zero if (cg2) bpf_cgroup_ancestor(cg2, level); // safe to do. } Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Tejun Heo Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-4-alexei.starovoitov@gmail.com --- Documentation/bpf/kfuncs.rst | 12 +++-- include/linux/btf.h | 2 +- kernel/bpf/helpers.c | 6 ++- kernel/bpf/verifier.c | 55 ++++++++++++++++++---- net/bpf/test_run.c | 3 +- .../selftests/bpf/progs/cgrp_kfunc_failure.c | 2 +- tools/testing/selftests/bpf/progs/map_kptr_fail.c | 4 +- tools/testing/selftests/bpf/verifier/calls.c | 2 +- tools/testing/selftests/bpf/verifier/map_kptr.c | 2 +- 9 files changed, 65 insertions(+), 23 deletions(-) (limited to 'tools/testing') diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index b5d9b0d446bc..69eccf6f98ef 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -249,11 +249,13 @@ added later. 2.4.8 KF_RCU flag ----------------- -The KF_RCU flag is used for kfuncs which have a rcu ptr as its argument. -When used together with KF_ACQUIRE, it indicates the kfunc should have a -single argument which must be a trusted argument or a MEM_RCU pointer. -The argument may have reference count of 0 and the kfunc must take this -into consideration. +The KF_RCU flag is a weaker version of KF_TRUSTED_ARGS. The kfuncs marked with +KF_RCU expect either PTR_TRUSTED or MEM_RCU arguments. The verifier guarantees +that the objects are valid and there is no use-after-free. The pointers are not +NULL, but the object's refcount could have reached zero. The kfuncs need to +consider doing refcnt != 0 check, especially when returning a KF_ACQUIRE +pointer. Note as well that a KF_ACQUIRE kfunc that is KF_RCU should very likely +also be KF_RET_NULL. .. _KF_deprecated_flag: diff --git a/include/linux/btf.h b/include/linux/btf.h index 49e0fe6d8274..556b3e2e7471 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -70,7 +70,7 @@ #define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */ #define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */ #define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */ -#define KF_RCU (1 << 7) /* kfunc only takes rcu pointer arguments */ +#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 12f12e879bcf..637ac4e92e75 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2163,8 +2163,10 @@ __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) if (level > cgrp->level || level < 0) return NULL; + /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ ancestor = cgrp->ancestors[level]; - cgroup_get(ancestor); + if (!cgroup_tryget(ancestor)) + return NULL; return ancestor; } @@ -2382,7 +2384,7 @@ BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) #endif BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b834f3d2d81a..a095055d7ef4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4218,7 +4218,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno) { const char *targ_name = kernel_type_name(kptr_field->kptr.btf, kptr_field->kptr.btf_id); - int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED; + int perm_flags = PTR_MAYBE_NULL | PTR_TRUSTED | MEM_RCU; const char *reg_name = ""; /* Only unreferenced case accepts untrusted pointers */ @@ -4285,6 +4285,34 @@ bad_type: return -EINVAL; } +/* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() + * can dereference RCU protected pointers and result is PTR_TRUSTED. + */ +static bool in_rcu_cs(struct bpf_verifier_env *env) +{ + return env->cur_state->active_rcu_lock || !env->prog->aux->sleepable; +} + +/* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ +BTF_SET_START(rcu_protected_types) +BTF_ID(struct, prog_test_ref_kfunc) +BTF_ID(struct, cgroup) +BTF_SET_END(rcu_protected_types) + +static bool rcu_protected_object(const struct btf *btf, u32 btf_id) +{ + if (!btf_is_kernel(btf)) + return false; + return btf_id_set_contains(&rcu_protected_types, btf_id); +} + +static bool rcu_safe_kptr(const struct btf_field *field) +{ + const struct btf_field_kptr *kptr = &field->kptr; + + return field->type == BPF_KPTR_REF && rcu_protected_object(kptr->btf, kptr->btf_id); +} + static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, int value_regno, int insn_idx, struct btf_field *kptr_field) @@ -4319,7 +4347,10 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno, * value from map as PTR_TO_BTF_ID, with the correct type. */ mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf, - kptr_field->kptr.btf_id, PTR_MAYBE_NULL | PTR_UNTRUSTED); + kptr_field->kptr.btf_id, + rcu_safe_kptr(kptr_field) && in_rcu_cs(env) ? + PTR_MAYBE_NULL | MEM_RCU : + PTR_MAYBE_NULL | PTR_UNTRUSTED); /* For mark_ptr_or_null_reg */ val_reg->id = ++env->id_gen; } else if (class == BPF_STX) { @@ -5163,10 +5194,17 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, * An RCU-protected pointer can also be deemed trusted if we are in an * RCU read region. This case is handled below. */ - if (nested_ptr_is_trusted(env, reg, off)) + if (nested_ptr_is_trusted(env, reg, off)) { flag |= PTR_TRUSTED; - else + /* + * task->cgroups is trusted. It provides a stronger guarantee + * than __rcu tag on 'cgroups' field in 'struct task_struct'. + * Clear MEM_RCU in such case. + */ + flag &= ~MEM_RCU; + } else { flag &= ~PTR_TRUSTED; + } if (flag & MEM_RCU) { /* Mark value register as MEM_RCU only if it is protected by @@ -5175,11 +5213,10 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, * read lock region. Also mark rcu pointer as PTR_MAYBE_NULL since * it could be null in some cases. */ - if (!env->cur_state->active_rcu_lock || - !(is_trusted_reg(reg) || is_rcu_reg(reg))) - flag &= ~MEM_RCU; - else + if (in_rcu_cs(env) && (is_trusted_reg(reg) || is_rcu_reg(reg))) flag |= PTR_MAYBE_NULL; + else + flag &= ~MEM_RCU; } else if (reg->type & MEM_RCU) { /* ptr (reg) is marked as MEM_RCU, but the struct field is not tagged * with __rcu. Mark the flag as PTR_UNTRUSTED conservatively. @@ -9676,7 +9713,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } - if (is_kfunc_trusted_args(meta) && + if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && (register_is_null(reg) || type_may_be_null(reg->type))) { verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); return -EACCES; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 6f3d654b3339..6a8b33a103a4 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -737,6 +737,7 @@ __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) { + /* p != NULL, but p->cnt could be 0 */ } __bpf_kfunc void bpf_kfunc_call_test_destructive(void) @@ -784,7 +785,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_SET8_END(test_sk_check_kfunc_ids) diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 4ad7fe24966d..b42291ed9586 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -205,7 +205,7 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") +__failure __msg("expects refcounted") int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value *v; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index e19e2a5f38cf..08f9ec18c345 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -281,7 +281,7 @@ int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_") +__failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) { struct map_value *v; @@ -316,7 +316,7 @@ int reject_untrusted_store_to_ref(struct __sk_buff *ctx) } SEC("?tc") -__failure __msg("R2 type=untrusted_ptr_ expected=ptr_") +__failure __msg("R2 must be referenced") int reject_untrusted_xchg(struct __sk_buff *ctx) { struct prog_test_ref_kfunc *p; diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 289ed202ec66..9a326a800e5c 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -243,7 +243,7 @@ }, .result_unpriv = REJECT, .result = REJECT, - .errstr = "R1 must be referenced", + .errstr = "R1 must be", }, { "calls: valid kfunc call: referenced arg needs refcounted PTR_TO_BTF_ID", diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index 6914904344c0..d775ccb01989 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -336,7 +336,7 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_kptr = { 1 }, .result = REJECT, - .errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_", + .errstr = "R1 type=rcu_ptr_or_null_ expected=percpu_ptr_", }, { "map_kptr: ref: reject off != 0", -- cgit v1.2.3-70-g09d2 From 838bd4ac9aa35bdf43bf0199fa8eef9d3a004611 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:44 -0800 Subject: selftests/bpf: Add a test case for kptr_rcu. Tweak existing map_kptr test to check kptr_rcu. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-5-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/progs/map_kptr.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index 3fe7cde4cbfd..3903d30217b8 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -118,6 +118,7 @@ extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp extern struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) @@ -147,12 +148,23 @@ static void test_kptr_ref(struct map_value *v) WRITE_ONCE(v->unref_ptr, p); if (!p) return; + /* + * p is rcu_ptr_prog_test_ref_kfunc, + * because bpf prog is non-sleepable and runs in RCU CS. + * p can be passed to kfunc that requires KF_RCU. + */ + bpf_kfunc_call_test_ref(p); if (p->a + p->b > 100) return; /* store NULL */ p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) return; + /* + * p is trusted_ptr_prog_test_ref_kfunc. + * p can be passed to kfunc that requires KF_RCU. + */ + bpf_kfunc_call_test_ref(p); if (p->a + p->b > 100) { bpf_kfunc_call_test_release(p); return; -- cgit v1.2.3-70-g09d2 From 0047d8343f6042c4feea24072ef254d47b8a33b3 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:45 -0800 Subject: selftests/bpf: Tweak cgroup kfunc test. Adjust cgroup kfunc test to dereference RCU protected cgroup pointer as PTR_TRUSTED and pass into KF_TRUSTED_ARGS kfunc. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-6-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c index 42e13aebdd62..030aff700084 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -61,7 +61,7 @@ int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *pa SEC("tp_btf/cgroup_mkdir") int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) { - struct cgroup *kptr; + struct cgroup *kptr, *cg; struct __cgrps_kfunc_map_value *v; long status; @@ -80,6 +80,16 @@ int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path) return 0; } + kptr = v->cgrp; + if (!kptr) { + err = 4; + return 0; + } + + cg = bpf_cgroup_ancestor(kptr, 1); + if (cg) /* verifier only check */ + bpf_cgroup_release(cg); + kptr = bpf_kptr_xchg(&v->cgrp, NULL); if (!kptr) { err = 3; -- cgit v1.2.3-70-g09d2 From 6fcd486b3a0a628c41f12b3a7329a18a2c74b351 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 2 Mar 2023 20:14:46 -0800 Subject: bpf: Refactor RCU enforcement in the verifier. bpf_rcu_read_lock/unlock() are only available in clang compiled kernels. Lack of such key mechanism makes it impossible for sleepable bpf programs to use RCU pointers. Allow bpf_rcu_read_lock/unlock() in GCC compiled kernels (though GCC doesn't support btf_type_tag yet) and allowlist certain field dereferences in important data structures like tast_struct, cgroup, socket that are used by sleepable programs either as RCU pointer or full trusted pointer (which is valid outside of RCU CS). Use BTF_TYPE_SAFE_RCU and BTF_TYPE_SAFE_TRUSTED macros for such tagging. They will be removed once GCC supports btf_type_tag. With that refactor check_ptr_to_btf_access(). Make it strict in enforcing PTR_TRUSTED and PTR_UNTRUSTED while deprecating old PTR_TO_BTF_ID without modifier flags. There is a chance that this strict enforcement might break existing programs (especially on GCC compiled kernels), but this cleanup has to start sooner than later. Note PTR_TO_CTX access still yields old deprecated PTR_TO_BTF_ID. Once it's converted to strict PTR_TRUSTED or PTR_UNTRUSTED the kfuncs and helpers will be able to default to KF_TRUSTED_ARGS. KF_RCU will remain as a weaker version of KF_TRUSTED_ARGS where obj refcnt could be 0. Adjust rcu_read_lock selftest to run on gcc and clang compiled kernels. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230303041446.3630-7-alexei.starovoitov@gmail.com --- include/linux/bpf.h | 2 +- include/linux/bpf_verifier.h | 1 - kernel/bpf/btf.c | 16 +- kernel/bpf/cpumask.c | 40 ++--- kernel/bpf/verifier.c | 178 ++++++++++++++------- .../selftests/bpf/prog_tests/cgrp_local_storage.c | 14 +- .../selftests/bpf/prog_tests/rcu_read_lock.c | 16 +- .../selftests/bpf/progs/cgrp_ls_sleepable.c | 4 +- .../testing/selftests/bpf/progs/cpumask_failure.c | 2 +- .../selftests/bpf/progs/nested_trust_failure.c | 2 +- tools/testing/selftests/bpf/progs/rcu_read_lock.c | 6 +- tools/testing/selftests/bpf/verifier/calls.c | 2 +- 12 files changed, 173 insertions(+), 110 deletions(-) (limited to 'tools/testing') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 23ec684e660d..d3456804f7aa 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2279,7 +2279,7 @@ struct bpf_core_ctx { bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, - int off); + int off, const char *suffix); bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, const struct btf *reg_btf, u32 reg_id, diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index b26ff2a8f63b..18538bad2b8c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -537,7 +537,6 @@ struct bpf_verifier_env { bool bypass_spec_v1; bool bypass_spec_v4; bool seen_direct_write; - bool rcu_tag_supported; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index c5e1d6955491..a8cb09e5973b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6163,6 +6163,7 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, const char *tname, *mname, *tag_value; u32 vlen, elem_id, mid; + *flag = 0; again: tname = __btf_name_by_offset(btf, t->name_off); if (!btf_type_is_struct(t)) { @@ -6329,6 +6330,15 @@ error: * of this field or inside of this struct */ if (btf_type_is_struct(mtype)) { + if (BTF_INFO_KIND(mtype->info) == BTF_KIND_UNION && + btf_type_vlen(mtype) != 1) + /* + * walking unions yields untrusted pointers + * with exception of __bpf_md_ptr and other + * unions with a single member + */ + *flag |= PTR_UNTRUSTED; + /* our field must be inside that union or struct */ t = mtype; @@ -6373,7 +6383,7 @@ error: stype = btf_type_skip_modifiers(btf, mtype->type, &id); if (btf_type_is_struct(stype)) { *next_btf_id = id; - *flag = tmp_flag; + *flag |= tmp_flag; return WALK_PTR; } } @@ -8357,7 +8367,7 @@ out: bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, const struct bpf_reg_state *reg, - int off) + int off, const char *suffix) { struct btf *btf = reg->btf; const struct btf_type *walk_type, *safe_type; @@ -8374,7 +8384,7 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, tname = btf_name_by_offset(btf, walk_type->name_off); - ret = snprintf(safe_tname, sizeof(safe_tname), "%s__safe_fields", tname); + ret = snprintf(safe_tname, sizeof(safe_tname), "%s%s", tname, suffix); if (ret < 0) return false; diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 2b3fbbfebdc5..b6587ec40f1b 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -427,26 +427,26 @@ BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_cpumask_first, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_and, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_or, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_full, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_any, KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_clear_cpu, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_test_cpu, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_test_and_set_cpu, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_test_and_clear_cpu, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_setall, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_clear, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_and, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_or, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_xor, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_equal, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_intersects, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_subset, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_empty, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_full, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_any, KF_RCU) +BTF_ID_FLAGS(func, bpf_cpumask_any_and, KF_RCU) BTF_SET8_END(cpumask_kfunc_btf_ids) static const struct btf_kfunc_id_set cpumask_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a095055d7ef4..c2adf3c24c64 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5073,29 +5073,76 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val) return 0; } -#define BTF_TYPE_SAFE_NESTED(__type) __PASTE(__type, __safe_fields) +#define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu) +#define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted) -BTF_TYPE_SAFE_NESTED(struct task_struct) { +/* + * Allow list few fields as RCU trusted or full trusted. + * This logic doesn't allow mix tagging and will be removed once GCC supports + * btf_type_tag. + */ + +/* RCU trusted: these fields are trusted in RCU CS and never NULL */ +BTF_TYPE_SAFE_RCU(struct task_struct) { const cpumask_t *cpus_ptr; struct css_set __rcu *cgroups; + struct task_struct __rcu *real_parent; + struct task_struct *group_leader; }; -BTF_TYPE_SAFE_NESTED(struct css_set) { +BTF_TYPE_SAFE_RCU(struct css_set) { struct cgroup *dfl_cgrp; }; -static bool nested_ptr_is_trusted(struct bpf_verifier_env *env, - struct bpf_reg_state *reg, - int off) +/* full trusted: these fields are trusted even outside of RCU CS and never NULL */ +BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) { + __bpf_md_ptr(struct seq_file *, seq); +}; + +BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct task_struct *, task); +}; + +BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) { + struct file *file; +}; + +BTF_TYPE_SAFE_TRUSTED(struct file) { + struct inode *f_inode; +}; + +BTF_TYPE_SAFE_TRUSTED(struct dentry) { + /* no negative dentry-s in places where bpf can see it */ + struct inode *d_inode; +}; + +BTF_TYPE_SAFE_TRUSTED(struct socket) { + struct sock *sk; +}; + +static bool type_is_rcu(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, + int off) { - /* If its parent is not trusted, it can't regain its trusted status. */ - if (!is_trusted_reg(reg)) - return false; + BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set)); - BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct task_struct)); - BTF_TYPE_EMIT(BTF_TYPE_SAFE_NESTED(struct css_set)); + return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_rcu"); +} - return btf_nested_type_is_trusted(&env->log, reg, off); +static bool type_is_trusted(struct bpf_verifier_env *env, + struct bpf_reg_state *reg, + int off) +{ + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry)); + BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket)); + + return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_trusted"); } static int check_ptr_to_btf_access(struct bpf_verifier_env *env, @@ -5181,49 +5228,58 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (ret < 0) return ret; - /* If this is an untrusted pointer, all pointers formed by walking it - * also inherit the untrusted flag. - */ - if (type_flag(reg->type) & PTR_UNTRUSTED) - flag |= PTR_UNTRUSTED; + if (ret != PTR_TO_BTF_ID) { + /* just mark; */ - /* By default any pointer obtained from walking a trusted pointer is no - * longer trusted, unless the field being accessed has explicitly been - * marked as inheriting its parent's state of trust. - * - * An RCU-protected pointer can also be deemed trusted if we are in an - * RCU read region. This case is handled below. - */ - if (nested_ptr_is_trusted(env, reg, off)) { - flag |= PTR_TRUSTED; - /* - * task->cgroups is trusted. It provides a stronger guarantee - * than __rcu tag on 'cgroups' field in 'struct task_struct'. - * Clear MEM_RCU in such case. + } else if (type_flag(reg->type) & PTR_UNTRUSTED) { + /* If this is an untrusted pointer, all pointers formed by walking it + * also inherit the untrusted flag. + */ + flag = PTR_UNTRUSTED; + + } else if (is_trusted_reg(reg) || is_rcu_reg(reg)) { + /* By default any pointer obtained from walking a trusted pointer is no + * longer trusted, unless the field being accessed has explicitly been + * marked as inheriting its parent's state of trust (either full or RCU). + * For example: + * 'cgroups' pointer is untrusted if task->cgroups dereference + * happened in a sleepable program outside of bpf_rcu_read_lock() + * section. In a non-sleepable program it's trusted while in RCU CS (aka MEM_RCU). + * Note bpf_rcu_read_unlock() converts MEM_RCU pointers to PTR_UNTRUSTED. + * + * A regular RCU-protected pointer with __rcu tag can also be deemed + * trusted if we are in an RCU CS. Such pointer can be NULL. */ - flag &= ~MEM_RCU; + if (type_is_trusted(env, reg, off)) { + flag |= PTR_TRUSTED; + } else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) { + if (type_is_rcu(env, reg, off)) { + /* ignore __rcu tag and mark it MEM_RCU */ + flag |= MEM_RCU; + } else if (flag & MEM_RCU) { + /* __rcu tagged pointers can be NULL */ + flag |= PTR_MAYBE_NULL; + } else if (flag & (MEM_PERCPU | MEM_USER)) { + /* keep as-is */ + } else { + /* walking unknown pointers yields untrusted pointer */ + flag = PTR_UNTRUSTED; + } + } else { + /* + * If not in RCU CS or MEM_RCU pointer can be NULL then + * aggressively mark as untrusted otherwise such + * pointers will be plain PTR_TO_BTF_ID without flags + * and will be allowed to be passed into helpers for + * compat reasons. + */ + flag = PTR_UNTRUSTED; + } } else { + /* Old compat. Deprecated */ flag &= ~PTR_TRUSTED; } - if (flag & MEM_RCU) { - /* Mark value register as MEM_RCU only if it is protected by - * bpf_rcu_read_lock() and the ptr reg is rcu or trusted. MEM_RCU - * itself can already indicate trustedness inside the rcu - * read lock region. Also mark rcu pointer as PTR_MAYBE_NULL since - * it could be null in some cases. - */ - if (in_rcu_cs(env) && (is_trusted_reg(reg) || is_rcu_reg(reg))) - flag |= PTR_MAYBE_NULL; - else - flag &= ~MEM_RCU; - } else if (reg->type & MEM_RCU) { - /* ptr (reg) is marked as MEM_RCU, but the struct field is not tagged - * with __rcu. Mark the flag as PTR_UNTRUSTED conservatively. - */ - flag |= PTR_UNTRUSTED; - } - if (atype == BPF_READ && value_regno >= 0) mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); @@ -10049,10 +10105,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); - if ((rcu_lock || rcu_unlock) && !env->rcu_tag_supported) { - verbose(env, "no vmlinux btf rcu tag support for kfunc %s\n", func_name); - return -EACCES; - } if (env->cur_state->active_rcu_lock) { struct bpf_func_state *state; @@ -14911,8 +14963,22 @@ static int do_check(struct bpf_verifier_env *env) * src_reg == stack|map in some other branch. * Reject it. */ - verbose(env, "same insn cannot be used with different pointers\n"); - return -EINVAL; + if (base_type(src_reg_type) == PTR_TO_BTF_ID && + base_type(*prev_src_type) == PTR_TO_BTF_ID) { + /* + * Have to support a use case when one path through + * the program yields TRUSTED pointer while another + * is UNTRUSTED. Fallback to UNTRUSTED to generate + * BPF_PROBE_MEM. + */ + *prev_src_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; + } else { + verbose(env, + "The same insn cannot be used with different pointers: %s", + reg_type_str(env, src_reg_type)); + verbose(env, " != %s\n", reg_type_str(env, *prev_src_type)); + return -EINVAL; + } } } else if (class == BPF_STX) { @@ -17984,8 +18050,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr) env->bypass_spec_v1 = bpf_bypass_spec_v1(); env->bypass_spec_v4 = bpf_bypass_spec_v4(); env->bpf_capable = bpf_capable(); - env->rcu_tag_supported = btf_vmlinux && - btf_find_by_name_kind(btf_vmlinux, "rcu", BTF_KIND_TYPE_TAG) > 0; if (is_priv) env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c index 2cc759956e3b..63e776f4176e 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c @@ -193,7 +193,7 @@ out: cgrp_ls_sleepable__destroy(skel); } -static void test_no_rcu_lock(__u64 cgroup_id) +static void test_yes_rcu_lock(__u64 cgroup_id) { struct cgrp_ls_sleepable *skel; int err; @@ -204,7 +204,7 @@ static void test_no_rcu_lock(__u64 cgroup_id) skel->bss->target_pid = syscall(SYS_gettid); - bpf_program__set_autoload(skel->progs.no_rcu_lock, true); + bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); err = cgrp_ls_sleepable__load(skel); if (!ASSERT_OK(err, "skel_load")) goto out; @@ -220,7 +220,7 @@ out: cgrp_ls_sleepable__destroy(skel); } -static void test_rcu_lock(void) +static void test_no_rcu_lock(void) { struct cgrp_ls_sleepable *skel; int err; @@ -229,7 +229,7 @@ static void test_rcu_lock(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; - bpf_program__set_autoload(skel->progs.yes_rcu_lock, true); + bpf_program__set_autoload(skel->progs.no_rcu_lock, true); err = cgrp_ls_sleepable__load(skel); ASSERT_ERR(err, "skel_load"); @@ -256,10 +256,10 @@ void test_cgrp_local_storage(void) test_negative(); if (test__start_subtest("cgroup_iter_sleepable")) test_cgroup_iter_sleepable(cgroup_fd, cgroup_id); + if (test__start_subtest("yes_rcu_lock")) + test_yes_rcu_lock(cgroup_id); if (test__start_subtest("no_rcu_lock")) - test_no_rcu_lock(cgroup_id); - if (test__start_subtest("rcu_lock")) - test_rcu_lock(); + test_no_rcu_lock(); close(cgroup_fd); } diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index 447d8560ecb6..3f1f58d3a729 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -25,10 +25,10 @@ static void test_success(void) bpf_program__set_autoload(skel->progs.get_cgroup_id, true); bpf_program__set_autoload(skel->progs.task_succ, true); - bpf_program__set_autoload(skel->progs.no_lock, true); bpf_program__set_autoload(skel->progs.two_regions, true); bpf_program__set_autoload(skel->progs.non_sleepable_1, true); bpf_program__set_autoload(skel->progs.non_sleepable_2, true); + bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true); err = rcu_read_lock__load(skel); if (!ASSERT_OK(err, "skel_load")) goto out; @@ -69,6 +69,7 @@ out: static const char * const inproper_region_tests[] = { "miss_lock", + "no_lock", "miss_unlock", "non_sleepable_rcu_mismatch", "inproper_sleepable_helper", @@ -99,7 +100,6 @@ out: } static const char * const rcuptr_misuse_tests[] = { - "task_untrusted_non_rcuptr", "task_untrusted_rcuptr", "cross_rcu_region", }; @@ -128,17 +128,8 @@ out: void test_rcu_read_lock(void) { - struct btf *vmlinux_btf; int cgroup_fd; - vmlinux_btf = btf__load_vmlinux_btf(); - if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF")) - return; - if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) { - test__skip(); - goto out; - } - cgroup_fd = test__join_cgroup("/rcu_read_lock"); if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock")) goto out; @@ -153,6 +144,5 @@ void test_rcu_read_lock(void) if (test__start_subtest("negative_tests_rcuptr_misuse")) test_rcuptr_misuse(); close(cgroup_fd); -out: - btf__free(vmlinux_btf); +out:; } diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c index 2d11ed528b6f..7615dc23d301 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -49,7 +49,7 @@ int no_rcu_lock(void *ctx) if (task->pid != target_pid) return 0; - /* ptr_to_btf_id semantics. should work. */ + /* task->cgroups is untrusted in sleepable prog outside of RCU CS */ cgrp = task->cgroups->dfl_cgrp; ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); @@ -71,7 +71,7 @@ int yes_rcu_lock(void *ctx) bpf_rcu_read_lock(); cgrp = task->cgroups->dfl_cgrp; - /* cgrp is untrusted and cannot pass to bpf_cgrp_storage_get() helper. */ + /* cgrp is trusted under RCU CS */ ptr = bpf_cgrp_storage_get(&map_a, cgrp, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (ptr) cgroup_id = cgrp->kn->id; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index 33e8e86dd090..c16f7563b84e 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -44,7 +44,7 @@ int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flag } SEC("tp_btf/task_newtask") -__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask") +__failure __msg("must be referenced") int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c index 14aff7676436..0d1aa6bbace4 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c @@ -17,7 +17,7 @@ char _license[] SEC("license") = "GPL"; */ SEC("tp_btf/task_newtask") -__failure __msg("R2 must be referenced or trusted") +__failure __msg("R2 must be") int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_flags) { bpf_cpumask_test_cpu(0, task->user_cpus_ptr); diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 5cecbdbbb16e..7250bb76d18a 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -81,7 +81,7 @@ int no_lock(void *ctx) { struct task_struct *task, *real_parent; - /* no bpf_rcu_read_lock(), old code still works */ + /* old style ptr_to_btf_id is not allowed in sleepable */ task = bpf_get_current_task_btf(); real_parent = task->real_parent; (void)bpf_task_storage_get(&map_a, real_parent, 0, 0); @@ -286,13 +286,13 @@ out: } SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") -int task_untrusted_non_rcuptr(void *ctx) +int task_trusted_non_rcuptr(void *ctx) { struct task_struct *task, *group_leader; task = bpf_get_current_task_btf(); bpf_rcu_read_lock(); - /* the pointer group_leader marked as untrusted */ + /* the pointer group_leader is explicitly marked as trusted */ group_leader = task->real_parent->group_leader; (void)bpf_task_storage_get(&map_a, group_leader, 0, 0); bpf_rcu_read_unlock(); diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 9a326a800e5c..5702fc9761ef 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -181,7 +181,7 @@ }, .result_unpriv = REJECT, .result = REJECT, - .errstr = "negative offset ptr_ ptr R1 off=-4 disallowed", + .errstr = "ptr R1 off=-4 disallowed", }, { "calls: invalid kfunc call: PTR_TO_BTF_ID with variable offset", -- cgit v1.2.3-70-g09d2 From 0d80a619c113d0e216dbffa56b2d5ccc079ee520 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 4 Mar 2023 03:12:45 +0200 Subject: bpf: allow ctx writes using BPF_ST_MEM instruction Lift verifier restriction to use BPF_ST_MEM instructions to write to context data structures. This requires the following changes: - verifier.c:do_check() for BPF_ST updated to: - no longer forbid writes to registers of type PTR_TO_CTX; - track dst_reg type in the env->insn_aux_data[...].ptr_type field (same way it is done for BPF_STX and BPF_LDX instructions). - verifier.c:convert_ctx_access() and various callbacks invoked by it are updated to handled BPF_ST instruction alongside BPF_STX. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230304011247.566040-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/cgroup.c | 49 ++++++++----- kernel/bpf/verifier.c | 110 ++++++++++++++--------------- net/core/filter.c | 79 ++++++++++++--------- tools/testing/selftests/bpf/verifier/ctx.c | 11 --- 4 files changed, 126 insertions(+), 123 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index a4ae422b8f12..53edb8ad2471 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -2223,10 +2223,12 @@ static u32 sysctl_convert_ctx_access(enum bpf_access_type type, BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), treg, si->dst_reg, offsetof(struct bpf_sysctl_kern, ppos)); - *insn++ = BPF_STX_MEM( - BPF_SIZEOF(u32), treg, si->src_reg, + *insn++ = BPF_RAW_INSN( + BPF_CLASS(si->code) | BPF_MEM | BPF_SIZEOF(u32), + treg, si->src_reg, bpf_ctx_narrow_access_offset( - 0, sizeof(u32), sizeof(loff_t))); + 0, sizeof(u32), sizeof(loff_t)), + si->imm); *insn++ = BPF_LDX_MEM( BPF_DW, treg, si->dst_reg, offsetof(struct bpf_sysctl_kern, tmp_reg)); @@ -2376,10 +2378,17 @@ static bool cg_sockopt_is_valid_access(int off, int size, return true; } -#define CG_SOCKOPT_ACCESS_FIELD(T, F) \ - T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ - si->dst_reg, si->src_reg, \ - offsetof(struct bpf_sockopt_kern, F)) +#define CG_SOCKOPT_READ_FIELD(F) \ + BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sockopt_kern, F)) + +#define CG_SOCKOPT_WRITE_FIELD(F) \ + BPF_RAW_INSN((BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F) | \ + BPF_MEM | BPF_CLASS(si->code)), \ + si->dst_reg, si->src_reg, \ + offsetof(struct bpf_sockopt_kern, F), \ + si->imm) static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, @@ -2391,25 +2400,25 @@ static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, switch (si->off) { case offsetof(struct bpf_sockopt, sk): - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); + *insn++ = CG_SOCKOPT_READ_FIELD(sk); break; case offsetof(struct bpf_sockopt, level): if (type == BPF_WRITE) - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); + *insn++ = CG_SOCKOPT_WRITE_FIELD(level); else - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); + *insn++ = CG_SOCKOPT_READ_FIELD(level); break; case offsetof(struct bpf_sockopt, optname): if (type == BPF_WRITE) - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); + *insn++ = CG_SOCKOPT_WRITE_FIELD(optname); else - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); + *insn++ = CG_SOCKOPT_READ_FIELD(optname); break; case offsetof(struct bpf_sockopt, optlen): if (type == BPF_WRITE) - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); + *insn++ = CG_SOCKOPT_WRITE_FIELD(optlen); else - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); + *insn++ = CG_SOCKOPT_READ_FIELD(optlen); break; case offsetof(struct bpf_sockopt, retval): BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0); @@ -2429,9 +2438,11 @@ static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), treg, treg, offsetof(struct task_struct, bpf_ctx)); - *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), - treg, si->src_reg, - offsetof(struct bpf_cg_run_ctx, retval)); + *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_MEM | + BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), + treg, si->src_reg, + offsetof(struct bpf_cg_run_ctx, retval), + si->imm); *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg, offsetof(struct bpf_sockopt_kern, tmp_reg)); } else { @@ -2447,10 +2458,10 @@ static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, } break; case offsetof(struct bpf_sockopt, optval): - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); + *insn++ = CG_SOCKOPT_READ_FIELD(optval); break; case offsetof(struct bpf_sockopt, optval_end): - *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); + *insn++ = CG_SOCKOPT_READ_FIELD(optval_end); break; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c2adf3c24c64..4c5d2b5e25c8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -14813,6 +14813,44 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) !reg_type_mismatch_ok(prev)); } +static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, + bool allow_trust_missmatch) +{ + enum bpf_reg_type *prev_type = &env->insn_aux_data[env->insn_idx].ptr_type; + + if (*prev_type == NOT_INIT) { + /* Saw a valid insn + * dst_reg = *(u32 *)(src_reg + off) + * save type to validate intersecting paths + */ + *prev_type = type; + } else if (reg_type_mismatch(type, *prev_type)) { + /* Abuser program is trying to use the same insn + * dst_reg = *(u32*) (src_reg + off) + * with different pointer types: + * src_reg == ctx in one branch and + * src_reg == stack|map in some other branch. + * Reject it. + */ + if (allow_trust_missmatch && + base_type(type) == PTR_TO_BTF_ID && + base_type(*prev_type) == PTR_TO_BTF_ID) { + /* + * Have to support a use case when one path through + * the program yields TRUSTED pointer while another + * is UNTRUSTED. Fallback to UNTRUSTED to generate + * BPF_PROBE_MEM. + */ + *prev_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; + } else { + verbose(env, "same insn cannot be used with different pointers\n"); + return -EINVAL; + } + } + + return 0; +} + static int do_check(struct bpf_verifier_env *env) { bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); @@ -14922,7 +14960,7 @@ static int do_check(struct bpf_verifier_env *env) return err; } else if (class == BPF_LDX) { - enum bpf_reg_type *prev_src_type, src_reg_type; + enum bpf_reg_type src_reg_type; /* check for reserved fields is already done */ @@ -14946,43 +14984,11 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; - - if (*prev_src_type == NOT_INIT) { - /* saw a valid insn - * dst_reg = *(u32 *)(src_reg + off) - * save type to validate intersecting paths - */ - *prev_src_type = src_reg_type; - - } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { - /* ABuser program is trying to use the same insn - * dst_reg = *(u32*) (src_reg + off) - * with different pointer types: - * src_reg == ctx in one branch and - * src_reg == stack|map in some other branch. - * Reject it. - */ - if (base_type(src_reg_type) == PTR_TO_BTF_ID && - base_type(*prev_src_type) == PTR_TO_BTF_ID) { - /* - * Have to support a use case when one path through - * the program yields TRUSTED pointer while another - * is UNTRUSTED. Fallback to UNTRUSTED to generate - * BPF_PROBE_MEM. - */ - *prev_src_type = PTR_TO_BTF_ID | PTR_UNTRUSTED; - } else { - verbose(env, - "The same insn cannot be used with different pointers: %s", - reg_type_str(env, src_reg_type)); - verbose(env, " != %s\n", reg_type_str(env, *prev_src_type)); - return -EINVAL; - } - } - + err = save_aux_ptr_type(env, src_reg_type, true); + if (err) + return err; } else if (class == BPF_STX) { - enum bpf_reg_type *prev_dst_type, dst_reg_type; + enum bpf_reg_type dst_reg_type; if (BPF_MODE(insn->code) == BPF_ATOMIC) { err = check_atomic(env, env->insn_idx, insn); @@ -15015,16 +15021,12 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; - - if (*prev_dst_type == NOT_INIT) { - *prev_dst_type = dst_reg_type; - } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { - verbose(env, "same insn cannot be used with different pointers\n"); - return -EINVAL; - } - + err = save_aux_ptr_type(env, dst_reg_type, false); + if (err) + return err; } else if (class == BPF_ST) { + enum bpf_reg_type dst_reg_type; + if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); @@ -15035,12 +15037,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; - if (is_ctx_reg(env, insn->dst_reg)) { - verbose(env, "BPF_ST stores into R%d %s is not allowed\n", - insn->dst_reg, - reg_type_str(env, reg_state(env, insn->dst_reg)->type)); - return -EACCES; - } + dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, env->insn_idx, insn->dst_reg, @@ -15049,6 +15046,9 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; + err = save_aux_ptr_type(env, dst_reg_type, false); + if (err) + return err; } else if (class == BPF_JMP || class == BPF_JMP32) { u8 opcode = BPF_OP(insn->code); @@ -16157,14 +16157,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) for (i = 0; i < insn_cnt; i++, insn++) { bpf_convert_ctx_access_t convert_ctx_access; - bool ctx_access; if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) { type = BPF_READ; - ctx_access = true; } else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || @@ -16174,7 +16172,6 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->code == (BPF_ST | BPF_MEM | BPF_W) || insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { type = BPF_WRITE; - ctx_access = BPF_CLASS(insn->code) == BPF_STX; } else { continue; } @@ -16197,9 +16194,6 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) continue; } - if (!ctx_access) - continue; - switch ((int)env->insn_aux_data[i + delta].ptr_type) { case PTR_TO_CTX: if (!ops->convert_ctx_access) diff --git a/net/core/filter.c b/net/core/filter.c index a2dc44e70ea0..50f649f1b4a9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9279,11 +9279,15 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, #endif /* : skb->tstamp = tstamp */ - *insn++ = BPF_STX_MEM(BPF_DW, skb_reg, value_reg, - offsetof(struct sk_buff, tstamp)); + *insn++ = BPF_RAW_INSN(BPF_CLASS(si->code) | BPF_DW | BPF_MEM, + skb_reg, value_reg, offsetof(struct sk_buff, tstamp), si->imm); return insn; } +#define BPF_EMIT_STORE(size, si, off) \ + BPF_RAW_INSN(BPF_CLASS((si)->code) | (size) | BPF_MEM, \ + (si)->dst_reg, (si)->src_reg, (off), (si)->imm) + static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, @@ -9313,9 +9317,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, priority): if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, priority, 4, - target_size)); + *insn++ = BPF_EMIT_STORE(BPF_W, si, + bpf_target_off(struct sk_buff, priority, 4, + target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, priority, 4, @@ -9346,9 +9350,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, mark): if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, mark, 4, - target_size)); + *insn++ = BPF_EMIT_STORE(BPF_W, si, + bpf_target_off(struct sk_buff, mark, 4, + target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, mark, 4, @@ -9367,11 +9371,16 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, queue_mapping): if (type == BPF_WRITE) { - *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); - *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, - queue_mapping, - 2, target_size)); + u32 off = bpf_target_off(struct sk_buff, queue_mapping, 2, target_size); + + if (BPF_CLASS(si->code) == BPF_ST && si->imm >= NO_QUEUE_MAPPING) { + *insn++ = BPF_JMP_A(0); /* noop */ + break; + } + + if (BPF_CLASS(si->code) == BPF_STX) + *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); + *insn++ = BPF_EMIT_STORE(BPF_H, si, off); } else { *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, @@ -9407,8 +9416,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, data); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, - si->src_reg, off); + *insn++ = BPF_EMIT_STORE(BPF_SIZE(si->code), si, off); else *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); @@ -9423,8 +9431,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, off += offsetof(struct qdisc_skb_cb, tc_classid); *target_size = 2; if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, - si->src_reg, off); + *insn++ = BPF_EMIT_STORE(BPF_H, si, off); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, off); @@ -9457,9 +9464,9 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, case offsetof(struct __sk_buff, tc_index): #ifdef CONFIG_NET_SCHED if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, - bpf_target_off(struct sk_buff, tc_index, 2, - target_size)); + *insn++ = BPF_EMIT_STORE(BPF_H, si, + bpf_target_off(struct sk_buff, tc_index, 2, + target_size)); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tc_index, 2, @@ -9660,8 +9667,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - offsetof(struct sock, sk_bound_dev_if)); + *insn++ = BPF_EMIT_STORE(BPF_W, si, + offsetof(struct sock, sk_bound_dev_if)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); @@ -9671,8 +9678,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - offsetof(struct sock, sk_mark)); + *insn++ = BPF_EMIT_STORE(BPF_W, si, + offsetof(struct sock, sk_mark)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_mark)); @@ -9682,8 +9689,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - offsetof(struct sock, sk_priority)); + *insn++ = BPF_EMIT_STORE(BPF_W, si, + offsetof(struct sock, sk_priority)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_priority)); @@ -9948,10 +9955,12 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, offsetof(S, TF)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ si->dst_reg, offsetof(S, F)); \ - *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ + *insn++ = BPF_RAW_INSN(SIZE | BPF_MEM | BPF_CLASS(si->code), \ + tmp_reg, si->src_reg, \ bpf_target_off(NS, NF, sizeof_field(NS, NF), \ target_size) \ - + OFF); \ + + OFF, \ + si->imm); \ *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ offsetof(S, TF)); \ } while (0) @@ -10186,9 +10195,11 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, struct bpf_sock_ops_kern, sk),\ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, sk));\ - *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ - reg, si->src_reg, \ - offsetof(OBJ, OBJ_FIELD)); \ + *insn++ = BPF_RAW_INSN(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD) | \ + BPF_MEM | BPF_CLASS(si->code), \ + reg, si->src_reg, \ + offsetof(OBJ, OBJ_FIELD), \ + si->imm); \ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ temp)); \ @@ -10220,8 +10231,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, off -= offsetof(struct bpf_sock_ops, replylong[0]); off += offsetof(struct bpf_sock_ops_kern, replylong[0]); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, - off); + *insn++ = BPF_EMIT_STORE(BPF_W, si, off); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); @@ -10578,8 +10588,7 @@ static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, off += offsetof(struct sk_buff, cb); off += offsetof(struct sk_skb_cb, data); if (type == BPF_WRITE) - *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, - si->src_reg, off); + *insn++ = BPF_EMIT_STORE(BPF_SIZE(si->code), si, off); else *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c index c8eaf0536c24..2fd31612c0b8 100644 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ b/tools/testing/selftests/bpf/verifier/ctx.c @@ -1,14 +1,3 @@ -{ - "context stores via ST", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_ST stores into R1 ctx is not allowed", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, { "context stores via BPF_ATOMIC", .insns = { -- cgit v1.2.3-70-g09d2 From 806f81cd1ee30c66a3d2a4cd18b13c97429397a0 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 4 Mar 2023 03:12:46 +0200 Subject: selftests/bpf: test if pointer type is tracked for BPF_ST_MEM Check that verifier tracks pointer types for BPF_ST_MEM instructions and reports error if pointer types do not match for different execution branches. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230304011247.566040-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/verifier/unpriv.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c index 878ca26c3f0a..af0c0f336625 100644 --- a/tools/testing/selftests/bpf/verifier/unpriv.c +++ b/tools/testing/selftests/bpf/verifier/unpriv.c @@ -239,6 +239,29 @@ .errstr = "same insn cannot be used with different pointers", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, +{ + /* Same as above, but use BPF_ST_MEM to save 42 + * instead of BPF_STX_MEM. + */ + "unpriv: spill/fill of different pointers st", + .insns = { + BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), + BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "same insn cannot be used with different pointers", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, { "unpriv: spill/fill of different pointers stx - ctx and sock", .insns = { -- cgit v1.2.3-70-g09d2 From 71cf4d027ad53a1e2847191ac14e50132d35a6a7 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 4 Mar 2023 03:12:47 +0200 Subject: selftests/bpf: Disassembler tests for verifier.c:convert_ctx_access() Function verifier.c:convert_ctx_access() applies some rewrites to BPF instructions that read or write BPF program context. This commit adds machinery to allow test cases that inspect BPF program after these rewrites are applied. An example of a test case: { // Shorthand for field offset and size specification N(CGROUP_SOCKOPT, struct bpf_sockopt, retval), // Pattern generated for field read .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::current_task);" "$dst = *(u64 *)($dst + task_struct::bpf_ctx);" "$dst = *(u32 *)($dst + bpf_cg_run_ctx::retval);", // Pattern generated for field write .write = "*(u64 *)($ctx + bpf_sockopt_kern::tmp_reg) = r9;" "r9 = *(u64 *)($ctx + bpf_sockopt_kern::current_task);" "r9 = *(u64 *)(r9 + task_struct::bpf_ctx);" "*(u32 *)(r9 + bpf_cg_run_ctx::retval) = $src;" "r9 = *(u64 *)($ctx + bpf_sockopt_kern::tmp_reg);" , }, For each test case, up to three programs are created: - One that uses BPF_LDX_MEM to read the context field. - One that uses BPF_STX_MEM to write to the context field. - One that uses BPF_ST_MEM to write to the context field. The disassembly of each program is compared with the pattern specified in the test case. Kernel code for disassembly is reused (as is in the bpftool). To keep Makefile changes to the minimum, symbolic links to `kernel/bpf/disasm.c` and `kernel/bpf/disasm.h ` are added. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230304011247.566040-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/disasm.c | 1 + tools/testing/selftests/bpf/disasm.h | 1 + .../testing/selftests/bpf/prog_tests/ctx_rewrite.c | 917 +++++++++++++++++++++ 4 files changed, 920 insertions(+), 1 deletion(-) create mode 120000 tools/testing/selftests/bpf/disasm.c create mode 120000 tools/testing/selftests/bpf/disasm.h create mode 100644 tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index eab3cf5399f5..16f404aa1b23 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -559,7 +559,7 @@ TRUNNER_BPF_PROGS_DIR := progs TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ btf_helpers.c flow_dissector_load.h \ - cap_helpers.c test_loader.c xsk.c + cap_helpers.c test_loader.c xsk.c disasm.c TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ diff --git a/tools/testing/selftests/bpf/disasm.c b/tools/testing/selftests/bpf/disasm.c new file mode 120000 index 000000000000..b1571927bd54 --- /dev/null +++ b/tools/testing/selftests/bpf/disasm.c @@ -0,0 +1 @@ +../../../../kernel/bpf/disasm.c \ No newline at end of file diff --git a/tools/testing/selftests/bpf/disasm.h b/tools/testing/selftests/bpf/disasm.h new file mode 120000 index 000000000000..8054fd497340 --- /dev/null +++ b/tools/testing/selftests/bpf/disasm.h @@ -0,0 +1 @@ +../../../../kernel/bpf/disasm.h \ No newline at end of file diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c new file mode 100644 index 000000000000..d5fe3d4b936c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -0,0 +1,917 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include "bpf/btf.h" +#include "bpf_util.h" +#include "linux/filter.h" +#include "disasm.h" + +#define MAX_PROG_TEXT_SZ (32 * 1024) + +/* The code in this file serves the sole purpose of executing test cases + * specified in the test_cases array. Each test case specifies a program + * type, context field offset, and disassembly patterns that correspond + * to read and write instructions generated by + * verifier.c:convert_ctx_access() for accessing that field. + * + * For each test case, up to three programs are created: + * - One that uses BPF_LDX_MEM to read the context field. + * - One that uses BPF_STX_MEM to write to the context field. + * - One that uses BPF_ST_MEM to write to the context field. + * + * The disassembly of each program is then compared with the pattern + * specified in the test case. + */ +struct test_case { + char *name; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + int field_offset; + int field_sz; + /* Program generated for BPF_ST_MEM uses value 42 by default, + * this field allows to specify custom value. + */ + struct { + bool use; + int value; + } st_value; + /* Pattern for BPF_LDX_MEM(field_sz, dst, ctx, field_offset) */ + char *read; + /* Pattern for BPF_STX_MEM(field_sz, ctx, src, field_offset) and + * BPF_ST_MEM (field_sz, ctx, src, field_offset) + */ + char *write; + /* Pattern for BPF_ST_MEM(field_sz, ctx, src, field_offset), + * takes priority over `write`. + */ + char *write_st; + /* Pattern for BPF_STX_MEM (field_sz, ctx, src, field_offset), + * takes priority over `write`. + */ + char *write_stx; +}; + +#define N(_prog_type, type, field, name_extra...) \ + .name = #_prog_type "." #field name_extra, \ + .prog_type = BPF_PROG_TYPE_##_prog_type, \ + .field_offset = offsetof(type, field), \ + .field_sz = sizeof(typeof(((type *)NULL)->field)) + +static struct test_case test_cases[] = { +/* Sign extension on s390 changes the pattern */ +#if defined(__x86_64__) || defined(__aarch64__) + { + N(SCHED_CLS, struct __sk_buff, tstamp), + .read = "r11 = *(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset);" + "w11 &= 160;" + "if w11 != 0xa0 goto pc+2;" + "$dst = 0;" + "goto pc+1;" + "$dst = *(u64 *)($ctx + sk_buff::tstamp);", + .write = "r11 = *(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset);" + "if w11 & 0x80 goto pc+1;" + "goto pc+2;" + "w11 &= -33;" + "*(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset) = r11;" + "*(u64 *)($ctx + sk_buff::tstamp) = $src;", + }, +#endif + { + N(SCHED_CLS, struct __sk_buff, priority), + .read = "$dst = *(u32 *)($ctx + sk_buff::priority);", + .write = "*(u32 *)($ctx + sk_buff::priority) = $src;", + }, + { + N(SCHED_CLS, struct __sk_buff, mark), + .read = "$dst = *(u32 *)($ctx + sk_buff::mark);", + .write = "*(u32 *)($ctx + sk_buff::mark) = $src;", + }, + { + N(SCHED_CLS, struct __sk_buff, cb[0]), + .read = "$dst = *(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data));", + .write = "*(u32 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::data)) = $src;", + }, + { + N(SCHED_CLS, struct __sk_buff, tc_classid), + .read = "$dst = *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid));", + .write = "*(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src;", + }, + { + N(SCHED_CLS, struct __sk_buff, tc_index), + .read = "$dst = *(u16 *)($ctx + sk_buff::tc_index);", + .write = "*(u16 *)($ctx + sk_buff::tc_index) = $src;", + }, + { + N(SCHED_CLS, struct __sk_buff, queue_mapping), + .read = "$dst = *(u16 *)($ctx + sk_buff::queue_mapping);", + .write_stx = "if $src >= 0xffff goto pc+1;" + "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;", + .write_st = "*(u16 *)($ctx + sk_buff::queue_mapping) = $src;", + }, + { + /* This is a corner case in filter.c:bpf_convert_ctx_access() */ + N(SCHED_CLS, struct __sk_buff, queue_mapping, ".ushrt_max"), + .st_value = { true, USHRT_MAX }, + .write_st = "goto pc+0;", + }, + { + N(CGROUP_SOCK, struct bpf_sock, bound_dev_if), + .read = "$dst = *(u32 *)($ctx + sock_common::skc_bound_dev_if);", + .write = "*(u32 *)($ctx + sock_common::skc_bound_dev_if) = $src;", + }, + { + N(CGROUP_SOCK, struct bpf_sock, mark), + .read = "$dst = *(u32 *)($ctx + sock::sk_mark);", + .write = "*(u32 *)($ctx + sock::sk_mark) = $src;", + }, + { + N(CGROUP_SOCK, struct bpf_sock, priority), + .read = "$dst = *(u32 *)($ctx + sock::sk_priority);", + .write = "*(u32 *)($ctx + sock::sk_priority) = $src;", + }, + { + N(SOCK_OPS, struct bpf_sock_ops, replylong[0]), + .read = "$dst = *(u32 *)($ctx + bpf_sock_ops_kern::replylong);", + .write = "*(u32 *)($ctx + bpf_sock_ops_kern::replylong) = $src;", + }, + { + N(CGROUP_SYSCTL, struct bpf_sysctl, file_pos), +#if __BYTE_ORDER == __LITTLE_ENDIAN + .read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);" + "$dst = *(u32 *)($dst +0);", + .write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;" + "r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);" + "*(u32 *)(r9 +0) = $src;" + "r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);", +#else + .read = "$dst = *(u64 *)($ctx + bpf_sysctl_kern::ppos);" + "$dst = *(u32 *)($dst +4);", + .write = "*(u64 *)($ctx + bpf_sysctl_kern::tmp_reg) = r9;" + "r9 = *(u64 *)($ctx + bpf_sysctl_kern::ppos);" + "*(u32 *)(r9 +4) = $src;" + "r9 = *(u64 *)($ctx + bpf_sysctl_kern::tmp_reg);", +#endif + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, sk), + .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::sk);", + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, level), + .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::level);", + .write = "*(u32 *)($ctx + bpf_sockopt_kern::level) = $src;", + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, optname), + .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optname);", + .write = "*(u32 *)($ctx + bpf_sockopt_kern::optname) = $src;", + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, optlen), + .read = "$dst = *(u32 *)($ctx + bpf_sockopt_kern::optlen);", + .write = "*(u32 *)($ctx + bpf_sockopt_kern::optlen) = $src;", + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, retval), + .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::current_task);" + "$dst = *(u64 *)($dst + task_struct::bpf_ctx);" + "$dst = *(u32 *)($dst + bpf_cg_run_ctx::retval);", + .write = "*(u64 *)($ctx + bpf_sockopt_kern::tmp_reg) = r9;" + "r9 = *(u64 *)($ctx + bpf_sockopt_kern::current_task);" + "r9 = *(u64 *)(r9 + task_struct::bpf_ctx);" + "*(u32 *)(r9 + bpf_cg_run_ctx::retval) = $src;" + "r9 = *(u64 *)($ctx + bpf_sockopt_kern::tmp_reg);", + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, optval), + .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval);", + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + }, + { + N(CGROUP_SOCKOPT, struct bpf_sockopt, optval_end), + .read = "$dst = *(u64 *)($ctx + bpf_sockopt_kern::optval_end);", + .expected_attach_type = BPF_CGROUP_GETSOCKOPT, + }, +}; + +#undef N + +static regex_t *ident_regex; +static regex_t *field_regex; + +static char *skip_space(char *str) +{ + while (*str && isspace(*str)) + ++str; + return str; +} + +static char *skip_space_and_semi(char *str) +{ + while (*str && (isspace(*str) || *str == ';')) + ++str; + return str; +} + +static char *match_str(char *str, char *prefix) +{ + while (*str && *prefix && *str == *prefix) { + ++str; + ++prefix; + } + if (*prefix) + return NULL; + return str; +} + +static char *match_number(char *str, int num) +{ + char *next; + int snum = strtol(str, &next, 10); + + if (next - str == 0 || num != snum) + return NULL; + + return next; +} + +static int find_field_offset_aux(struct btf *btf, int btf_id, char *field_name, int off) +{ + const struct btf_type *type = btf__type_by_id(btf, btf_id); + const struct btf_member *m; + __u16 mnum; + int i; + + if (!type) { + PRINT_FAIL("Can't find btf_type for id %d\n", btf_id); + return -1; + } + + if (!btf_is_struct(type) && !btf_is_union(type)) { + PRINT_FAIL("BTF id %d is not struct or union\n", btf_id); + return -1; + } + + m = btf_members(type); + mnum = btf_vlen(type); + + for (i = 0; i < mnum; ++i, ++m) { + const char *mname = btf__name_by_offset(btf, m->name_off); + + if (strcmp(mname, "") == 0) { + int msize = find_field_offset_aux(btf, m->type, field_name, + off + m->offset); + if (msize >= 0) + return msize; + } + + if (strcmp(mname, field_name)) + continue; + + return (off + m->offset) / 8; + } + + return -1; +} + +static int find_field_offset(struct btf *btf, char *pattern, regmatch_t *matches) +{ + int type_sz = matches[1].rm_eo - matches[1].rm_so; + int field_sz = matches[2].rm_eo - matches[2].rm_so; + char *type = pattern + matches[1].rm_so; + char *field = pattern + matches[2].rm_so; + char field_str[128] = {}; + char type_str[128] = {}; + int btf_id, field_offset; + + if (type_sz >= sizeof(type_str)) { + PRINT_FAIL("Malformed pattern: type ident is too long: %d\n", type_sz); + return -1; + } + + if (field_sz >= sizeof(field_str)) { + PRINT_FAIL("Malformed pattern: field ident is too long: %d\n", field_sz); + return -1; + } + + strncpy(type_str, type, type_sz); + strncpy(field_str, field, field_sz); + btf_id = btf__find_by_name(btf, type_str); + if (btf_id < 0) { + PRINT_FAIL("No BTF info for type %s\n", type_str); + return -1; + } + + field_offset = find_field_offset_aux(btf, btf_id, field_str, 0); + if (field_offset < 0) { + PRINT_FAIL("No BTF info for field %s::%s\n", type_str, field_str); + return -1; + } + + return field_offset; +} + +static regex_t *compile_regex(char *pat) +{ + regex_t *re; + int err; + + re = malloc(sizeof(regex_t)); + if (!re) { + PRINT_FAIL("Can't alloc regex\n"); + return NULL; + } + + err = regcomp(re, pat, REG_EXTENDED); + if (err) { + char errbuf[512]; + + regerror(err, re, errbuf, sizeof(errbuf)); + PRINT_FAIL("Can't compile regex: %s\n", errbuf); + free(re); + return NULL; + } + + return re; +} + +static void free_regex(regex_t *re) +{ + if (!re) + return; + + regfree(re); + free(re); +} + +static u32 max_line_len(char *str) +{ + u32 max_line = 0; + char *next = str; + + while (next) { + next = strchr(str, '\n'); + if (next) { + max_line = max_t(u32, max_line, (next - str)); + str = next + 1; + } else { + max_line = max_t(u32, max_line, strlen(str)); + } + } + + return min(max_line, 60u); +} + +/* Print strings `pattern_origin` and `text_origin` side by side, + * assume `pattern_pos` and `text_pos` designate location within + * corresponding origin string where match diverges. + * The output should look like: + * + * Can't match disassembly(left) with pattern(right): + * r2 = *(u64 *)(r1 +0) ; $dst = *(u64 *)($ctx + bpf_sockopt_kern::sk1) + * ^ ^ + * r0 = 0 ; + * exit ; + */ +static void print_match_error(FILE *out, + char *pattern_origin, char *text_origin, + char *pattern_pos, char *text_pos) +{ + char *pattern = pattern_origin; + char *text = text_origin; + int middle = max_line_len(text) + 2; + + fprintf(out, "Can't match disassembly(left) with pattern(right):\n"); + while (*pattern || *text) { + int column = 0; + int mark1 = -1; + int mark2 = -1; + + /* Print one line from text */ + while (*text && *text != '\n') { + if (text == text_pos) + mark1 = column; + fputc(*text, out); + ++text; + ++column; + } + if (text == text_pos) + mark1 = column; + + /* Pad to the middle */ + while (column < middle) { + fputc(' ', out); + ++column; + } + fputs("; ", out); + column += 3; + + /* Print one line from pattern, pattern lines are terminated by ';' */ + while (*pattern && *pattern != ';') { + if (pattern == pattern_pos) + mark2 = column; + fputc(*pattern, out); + ++pattern; + ++column; + } + if (pattern == pattern_pos) + mark2 = column; + + fputc('\n', out); + if (*pattern) + ++pattern; + if (*text) + ++text; + + /* If pattern and text diverge at this line, print an + * additional line with '^' marks, highlighting + * positions where match fails. + */ + if (mark1 > 0 || mark2 > 0) { + for (column = 0; column <= max(mark1, mark2); ++column) { + if (column == mark1 || column == mark2) + fputc('^', out); + else + fputc(' ', out); + } + fputc('\n', out); + } + } +} + +/* Test if `text` matches `pattern`. Pattern consists of the following elements: + * + * - Field offset references: + * + * :: + * + * When such reference is encountered BTF is used to compute numerical + * value for the offset of in . The `text` is expected to + * contain matching numerical value. + * + * - Field groups: + * + * $(:: [+ ::]*) + * + * Allows to specify an offset that is a sum of multiple field offsets. + * The `text` is expected to contain matching numerical value. + * + * - Variable references, e.g. `$src`, `$dst`, `$ctx`. + * These are substitutions specified in `reg_map` array. + * If a substring of pattern is equal to `reg_map[i][0]` the `text` is + * expected to contain `reg_map[i][1]` in the matching position. + * + * - Whitespace is ignored, ';' counts as whitespace for `pattern`. + * + * - Any other characters, `pattern` and `text` should match one-to-one. + * + * Example of a pattern: + * + * __________ fields group ________________ + * ' ' + * *(u16 *)($ctx + $(sk_buff::cb + qdisc_skb_cb::tc_classid)) = $src; + * ^^^^ '______________________' + * variable reference field offset reference + */ +static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_map[][2]) +{ + char *pattern_origin = pattern; + char *text_origin = text; + regmatch_t matches[3]; + +_continue: + while (*pattern) { + if (!*text) + goto err; + + /* Skip whitespace */ + if (isspace(*pattern) || *pattern == ';') { + if (!isspace(*text) && text != text_origin && isalnum(text[-1])) + goto err; + pattern = skip_space_and_semi(pattern); + text = skip_space(text); + continue; + } + + /* Check for variable references */ + for (int i = 0; reg_map[i][0]; ++i) { + char *pattern_next, *text_next; + + pattern_next = match_str(pattern, reg_map[i][0]); + if (!pattern_next) + continue; + + text_next = match_str(text, reg_map[i][1]); + if (!text_next) + goto err; + + pattern = pattern_next; + text = text_next; + goto _continue; + } + + /* Match field group: + * $(sk_buff::cb + qdisc_skb_cb::tc_classid) + */ + if (strncmp(pattern, "$(", 2) == 0) { + char *group_start = pattern, *text_next; + int acc_offset = 0; + + pattern += 2; + + for (;;) { + int field_offset; + + pattern = skip_space(pattern); + if (!*pattern) { + PRINT_FAIL("Unexpected end of pattern\n"); + goto err; + } + + if (*pattern == ')') { + ++pattern; + break; + } + + if (*pattern == '+') { + ++pattern; + continue; + } + + printf("pattern: %s\n", pattern); + if (regexec(field_regex, pattern, 3, matches, 0) != 0) { + PRINT_FAIL("Field reference expected\n"); + goto err; + } + + field_offset = find_field_offset(btf, pattern, matches); + if (field_offset < 0) + goto err; + + pattern += matches[0].rm_eo; + acc_offset += field_offset; + } + + text_next = match_number(text, acc_offset); + if (!text_next) { + PRINT_FAIL("No match for group offset %.*s (%d)\n", + (int)(pattern - group_start), + group_start, + acc_offset); + goto err; + } + text = text_next; + } + + /* Match field reference: + * sk_buff::cb + */ + if (regexec(field_regex, pattern, 3, matches, 0) == 0) { + int field_offset; + char *text_next; + + field_offset = find_field_offset(btf, pattern, matches); + if (field_offset < 0) + goto err; + + text_next = match_number(text, field_offset); + if (!text_next) { + PRINT_FAIL("No match for field offset %.*s (%d)\n", + (int)matches[0].rm_eo, pattern, field_offset); + goto err; + } + + pattern += matches[0].rm_eo; + text = text_next; + continue; + } + + /* If pattern points to identifier not followed by '::' + * skip the identifier to avoid n^2 application of the + * field reference rule. + */ + if (regexec(ident_regex, pattern, 1, matches, 0) == 0) { + if (strncmp(pattern, text, matches[0].rm_eo) != 0) + goto err; + + pattern += matches[0].rm_eo; + text += matches[0].rm_eo; + continue; + } + + /* Match literally */ + if (*pattern != *text) + goto err; + + ++pattern; + ++text; + } + + return true; + +err: + test__fail(); + print_match_error(stdout, pattern_origin, text_origin, pattern, text); + return false; +} + +/* Request BPF program instructions after all rewrites are applied, + * e.g. verifier.c:convert_ctx_access() is done. + */ +static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 xlated_prog_len; + __u32 buf_element_size = sizeof(struct bpf_insn); + + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("bpf_prog_get_info_by_fd failed"); + return -1; + } + + xlated_prog_len = info.xlated_prog_len; + if (xlated_prog_len % buf_element_size) { + printf("Program length %d is not multiple of %d\n", + xlated_prog_len, buf_element_size); + return -1; + } + + *cnt = xlated_prog_len / buf_element_size; + *buf = calloc(*cnt, buf_element_size); + if (!buf) { + perror("can't allocate xlated program buffer"); + return -ENOMEM; + } + + bzero(&info, sizeof(info)); + info.xlated_prog_len = xlated_prog_len; + info.xlated_prog_insns = (__u64)(unsigned long)*buf; + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("second bpf_prog_get_info_by_fd failed"); + goto out_free_buf; + } + + return 0; + +out_free_buf: + free(*buf); + return -1; +} + +static void print_insn(void *private_data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vfprintf((FILE *)private_data, fmt, args); + va_end(args); +} + +/* Disassemble instructions to a stream */ +static void print_xlated(FILE *out, struct bpf_insn *insn, __u32 len) +{ + const struct bpf_insn_cbs cbs = { + .cb_print = print_insn, + .cb_call = NULL, + .cb_imm = NULL, + .private_data = out, + }; + bool double_insn = false; + int i; + + for (i = 0; i < len; i++) { + if (double_insn) { + double_insn = false; + continue; + } + + double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); + print_bpf_insn(&cbs, insn + i, true); + } +} + +/* We share code with kernel BPF disassembler, it adds '(FF) ' prefix + * for each instruction (FF stands for instruction `code` byte). + * This function removes the prefix inplace for each line in `str`. + */ +static void remove_insn_prefix(char *str, int size) +{ + const int prefix_size = 5; + + int write_pos = 0, read_pos = prefix_size; + int len = strlen(str); + char c; + + size = min(size, len); + + while (read_pos < size) { + c = str[read_pos++]; + if (c == 0) + break; + str[write_pos++] = c; + if (c == '\n') + read_pos += prefix_size; + } + str[write_pos] = 0; +} + +struct prog_info { + char *prog_kind; + enum bpf_prog_type prog_type; + enum bpf_attach_type expected_attach_type; + struct bpf_insn *prog; + u32 prog_len; +}; + +static void match_program(struct btf *btf, + struct prog_info *pinfo, + char *pattern, + char *reg_map[][2], + bool skip_first_insn) +{ + struct bpf_insn *buf = NULL; + int err = 0, prog_fd = 0; + FILE *prog_out = NULL; + char *text = NULL; + __u32 cnt = 0; + + text = calloc(MAX_PROG_TEXT_SZ, 1); + if (!text) { + PRINT_FAIL("Can't allocate %d bytes\n", MAX_PROG_TEXT_SZ); + goto out; + } + + // TODO: log level + LIBBPF_OPTS(bpf_prog_load_opts, opts); + opts.log_buf = text; + opts.log_size = MAX_PROG_TEXT_SZ; + opts.log_level = 1 | 2 | 4; + opts.expected_attach_type = pinfo->expected_attach_type; + + prog_fd = bpf_prog_load(pinfo->prog_type, NULL, "GPL", + pinfo->prog, pinfo->prog_len, &opts); + if (prog_fd < 0) { + PRINT_FAIL("Can't load program, errno %d (%s), verifier log:\n%s\n", + errno, strerror(errno), text); + goto out; + } + + memset(text, 0, MAX_PROG_TEXT_SZ); + + err = get_xlated_program(prog_fd, &buf, &cnt); + if (err) { + PRINT_FAIL("Can't load back BPF program\n"); + goto out; + } + + prog_out = fmemopen(text, MAX_PROG_TEXT_SZ - 1, "w"); + if (!prog_out) { + PRINT_FAIL("Can't open memory stream\n"); + goto out; + } + if (skip_first_insn) + print_xlated(prog_out, buf + 1, cnt - 1); + else + print_xlated(prog_out, buf, cnt); + fclose(prog_out); + remove_insn_prefix(text, MAX_PROG_TEXT_SZ); + + ASSERT_TRUE(match_pattern(btf, pattern, text, reg_map), + pinfo->prog_kind); + +out: + if (prog_fd) + close(prog_fd); + free(buf); + free(text); +} + +static void run_one_testcase(struct btf *btf, struct test_case *test) +{ + struct prog_info pinfo = {}; + int bpf_sz; + + if (!test__start_subtest(test->name)) + return; + + switch (test->field_sz) { + case 8: + bpf_sz = BPF_DW; + break; + case 4: + bpf_sz = BPF_W; + break; + case 2: + bpf_sz = BPF_H; + break; + case 1: + bpf_sz = BPF_B; + break; + default: + PRINT_FAIL("Unexpected field size: %d, want 8,4,2 or 1\n", test->field_sz); + return; + } + + pinfo.prog_type = test->prog_type; + pinfo.expected_attach_type = test->expected_attach_type; + + if (test->read) { + struct bpf_insn ldx_prog[] = { + BPF_LDX_MEM(bpf_sz, BPF_REG_2, BPF_REG_1, test->field_offset), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + char *reg_map[][2] = { + { "$ctx", "r1" }, + { "$dst", "r2" }, + {} + }; + + pinfo.prog_kind = "LDX"; + pinfo.prog = ldx_prog; + pinfo.prog_len = ARRAY_SIZE(ldx_prog); + match_program(btf, &pinfo, test->read, reg_map, false); + } + + if (test->write || test->write_st || test->write_stx) { + struct bpf_insn stx_prog[] = { + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_STX_MEM(bpf_sz, BPF_REG_1, BPF_REG_2, test->field_offset), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + char *stx_reg_map[][2] = { + { "$ctx", "r1" }, + { "$src", "r2" }, + {} + }; + struct bpf_insn st_prog[] = { + BPF_ST_MEM(bpf_sz, BPF_REG_1, test->field_offset, + test->st_value.use ? test->st_value.value : 42), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + char *st_reg_map[][2] = { + { "$ctx", "r1" }, + { "$src", "42" }, + {} + }; + + if (test->write || test->write_stx) { + char *pattern = test->write_stx ? test->write_stx : test->write; + + pinfo.prog_kind = "STX"; + pinfo.prog = stx_prog; + pinfo.prog_len = ARRAY_SIZE(stx_prog); + match_program(btf, &pinfo, pattern, stx_reg_map, true); + } + + if (test->write || test->write_st) { + char *pattern = test->write_st ? test->write_st : test->write; + + pinfo.prog_kind = "ST"; + pinfo.prog = st_prog; + pinfo.prog_len = ARRAY_SIZE(st_prog); + match_program(btf, &pinfo, pattern, st_reg_map, false); + } + } + + test__end_subtest(); +} + +void test_ctx_rewrite(void) +{ + struct btf *btf; + int i; + + field_regex = compile_regex("^([[:alpha:]_][[:alnum:]_]+)::([[:alpha:]_][[:alnum:]_]+)"); + ident_regex = compile_regex("^[[:alpha:]_][[:alnum:]_]+"); + if (!field_regex || !ident_regex) + return; + + btf = btf__load_vmlinux_btf(); + if (!btf) { + PRINT_FAIL("Can't load vmlinux BTF, errno %d (%s)\n", errno, strerror(errno)); + goto out; + } + + for (i = 0; i < ARRAY_SIZE(test_cases); ++i) + run_one_testcase(btf, &test_cases[i]); + +out: + btf__free(btf); + free_regex(field_regex); + free_regex(ident_regex); +} -- cgit v1.2.3-70-g09d2 From 6f876e75d316a75957f3d43c3a8c2a6fe9bc18b2 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 2 Mar 2023 15:50:01 -0800 Subject: selftests/bpf: enhance align selftest's expected log matching Allow to search for expected register state in all the verifier log output that's related to specified instruction number. See added comment for an example of possible situation that is happening due to a simple enhancement done in the next patch, which fixes handling of env->test_state_freq flag in state checkpointing logic. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230302235015.2044271-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/align.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 4666f88f2bb4..c94fa8d6c4f6 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -660,16 +660,22 @@ static int do_test_single(struct bpf_align_test *test) * func#0 @0 * 0: R1=ctx(off=0,imm=0) R10=fp0 * 0: (b7) r3 = 2 ; R3_w=2 + * + * Sometimes it's actually two lines below, e.g. when + * searching for "6: R3_w=scalar(umax=255,var_off=(0x0; 0xff))": + * from 4 to 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 + * 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 + * 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff)) */ - if (!strstr(line_ptr, m.match)) { + while (!strstr(line_ptr, m.match)) { cur_line = -1; line_ptr = strtok(NULL, "\n"); - sscanf(line_ptr, "%u: ", &cur_line); + sscanf(line_ptr ?: "", "%u: ", &cur_line); + if (!line_ptr || cur_line != m.line) + break; } - if (cur_line != m.line || !line_ptr || - !strstr(line_ptr, m.match)) { - printf("Failed to find match %u: %s\n", - m.line, m.match); + if (cur_line != m.line || !line_ptr || !strstr(line_ptr, m.match)) { + printf("Failed to find match %u: %s\n", m.line, m.match); ret = 1; printf("%s", bpf_vlog); break; -- cgit v1.2.3-70-g09d2 From fffc893b6bf29162aca76842238868b131fcb477 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 2 Mar 2023 15:50:03 -0800 Subject: selftests/bpf: adjust log_fixup's buffer size for proper truncation Adjust log_fixup's expected buffer length to fix the test. It's pretty finicky in its length expectation, but it doesn't break often. So just adjust the length to work on current kernel and with follow up iterator changes as well. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230302235015.2044271-6-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/log_fixup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index f4ffdcabf4e4..239e1c5753b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -141,7 +141,7 @@ void test_log_fixup(void) if (test__start_subtest("bad_core_relo_trunc_partial")) bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); if (test__start_subtest("bad_core_relo_trunc_full")) - bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */); + bad_core_relo(210, TRUNC_FULL /* truncate also libbpf's message patch */); if (test__start_subtest("bad_core_relo_subprog")) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) -- cgit v1.2.3-70-g09d2 From 7391ec6391e2b129aeaee5462487c404f61157aa Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Mon, 6 Mar 2023 14:48:32 +0800 Subject: selftests/bpf: Split test_attach_probe into multi subtests In order to adapt to the older kernel, now we split the "attach_probe" testing into multi subtests: manual // manual attach tests for kprobe/uprobe auto // auto-attach tests for kprobe and uprobe kprobe-sleepable // kprobe sleepable test uprobe-lib // uprobe tests for library function by name uprobe-sleepable // uprobe sleepable test uprobe-ref_ctr // uprobe ref_ctr test As sleepable kprobe needs to set BPF_F_SLEEPABLE flag before loading, we need to move it to a stand alone skel file, in case of it is not supported by kernel and make the whole loading fail. Therefore, we can only enable part of the subtests for older kernel. Signed-off-by: Menglong Dong Signed-off-by: Andrii Nakryiko Reviewed-by: Biao Jiang Link: https://lore.kernel.org/bpf/20230306064833.7932-3-imagedong@tencent.com --- .../selftests/bpf/prog_tests/attach_probe.c | 260 ++++++++++++++------- .../bpf/progs/test_attach_kprobe_sleepable.c | 23 ++ .../selftests/bpf/progs/test_attach_probe.c | 23 +- 3 files changed, 205 insertions(+), 101 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index 56374c8b5436..c374759f39ce 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include "test_attach_kprobe_sleepable.skel.h" #include "test_attach_probe.skel.h" /* this is how USDT semaphore is actually defined, except volatile modifier */ @@ -23,52 +24,24 @@ static noinline void trigger_func3(void) asm volatile (""); } +/* attach point for ref_ctr */ +static noinline void trigger_func4(void) +{ + asm volatile (""); +} + static char test_data[] = "test_data"; -void test_attach_probe(void) +/* manual attach kprobe/kretprobe/uprobe/uretprobe testings */ +static void test_attach_probe_manual(struct test_attach_probe *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); struct bpf_link *kprobe_link, *kretprobe_link; struct bpf_link *uprobe_link, *uretprobe_link; - struct test_attach_probe* skel; - ssize_t uprobe_offset, ref_ctr_offset; - struct bpf_link *uprobe_err_link; - FILE *devnull; - bool legacy; - - /* Check if new-style kprobe/uprobe API is supported. - * Kernels that support new FD-based kprobe and uprobe BPF attachment - * through perf_event_open() syscall expose - * /sys/bus/event_source/devices/kprobe/type and - * /sys/bus/event_source/devices/uprobe/type files, respectively. They - * contain magic numbers that are passed as "type" field of - * perf_event_attr. Lack of such file in the system indicates legacy - * kernel with old-style kprobe/uprobe attach interface through - * creating per-probe event through tracefs. For such cases - * ref_ctr_offset feature is not supported, so we don't test it. - */ - legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; + ssize_t uprobe_offset; uprobe_offset = get_uprobe_offset(&trigger_func); if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) - return; - - ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); - if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) - return; - - skel = test_attach_probe__open(); - if (!ASSERT_OK_PTR(skel, "skel_open")) - return; - - /* sleepable kprobe test case needs flags set before loading */ - if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, - BPF_F_SLEEPABLE), "kprobe_sleepable_flags")) - goto cleanup; - - if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) - goto cleanup; - if (!ASSERT_OK_PTR(skel->bss, "check_bss")) goto cleanup; /* manual-attach kprobe/kretprobe */ @@ -86,18 +59,9 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_kretprobe = kretprobe_link; - /* auto-attachable kprobe and kretprobe */ - skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); - ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); - - skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); - ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); - - if (!legacy) - ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); - + /* manual-attach uprobe/uretprobe */ + uprobe_opts.ref_ctr_offset = 0; uprobe_opts.retprobe = false; - uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */, "/proc/self/exe", @@ -107,12 +71,7 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_uprobe = uprobe_link; - if (!legacy) - ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); - - /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ uprobe_opts.retprobe = true; - uprobe_opts.ref_ctr_offset = legacy ? 0 : ref_ctr_offset; uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */, "/proc/self/exe", @@ -121,12 +80,7 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_uretprobe = uretprobe_link; - /* verify auto-attach fails for old-style uprobe definition */ - uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); - if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, - "auto-attach should fail for old-style name")) - goto cleanup; - + /* attach uprobe by function name manually */ uprobe_opts.func_name = "trigger_func2"; uprobe_opts.retprobe = false; uprobe_opts.ref_ctr_offset = 0; @@ -138,11 +92,62 @@ void test_attach_probe(void) if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname")) goto cleanup; + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + /* trigger & validate uprobe & uretprobe */ + trigger_func(); + + /* trigger & validate uprobe attached by name */ + trigger_func2(); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); + ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); + ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); + +cleanup: +} + +static void test_attach_probe_auto(struct test_attach_probe *skel) +{ + struct bpf_link *uprobe_err_link; + + /* auto-attachable kprobe and kretprobe */ + skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); + + skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); + + /* verify auto-attach fails for old-style uprobe definition */ + uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); + if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, + "auto-attach should fail for old-style name")) + return; + /* verify auto-attach works */ skel->links.handle_uretprobe_byname = bpf_program__attach(skel->progs.handle_uretprobe_byname); if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname")) - goto cleanup; + return; + + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + /* trigger & validate uprobe attached by name */ + trigger_func2(); + + ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); + ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); + ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); +} + +static void test_uprobe_lib(struct test_attach_probe *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + FILE *devnull; /* test attach by name for a library function, using the library * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo(). @@ -155,7 +160,7 @@ void test_attach_probe(void) "libc.so.6", 0, &uprobe_opts); if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2")) - goto cleanup; + return; uprobe_opts.func_name = "fclose"; uprobe_opts.retprobe = true; @@ -165,62 +170,137 @@ void test_attach_probe(void) "libc.so.6", 0, &uprobe_opts); if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2")) + return; + + /* trigger & validate shared library u[ret]probes attached by name */ + devnull = fopen("/dev/null", "r"); + fclose(devnull); + + ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); + ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); +} + +static void test_uprobe_ref_ctr(struct test_attach_probe *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + struct bpf_link *uprobe_link, *uretprobe_link; + ssize_t uprobe_offset, ref_ctr_offset; + + uprobe_offset = get_uprobe_offset(&trigger_func4); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset_ref_ctr")) + return; + + ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); + if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) + return; + + ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); + + uprobe_opts.retprobe = false; + uprobe_opts.ref_ctr_offset = ref_ctr_offset; + uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_ref_ctr, + 0 /* self pid */, + "/proc/self/exe", + uprobe_offset, + &uprobe_opts); + if (!ASSERT_OK_PTR(uprobe_link, "attach_uprobe_ref_ctr")) + return; + skel->links.handle_uprobe_ref_ctr = uprobe_link; + + ASSERT_GT(uprobe_ref_ctr, 0, "uprobe_ref_ctr_after"); + + /* if uprobe uses ref_ctr, uretprobe has to use ref_ctr as well */ + uprobe_opts.retprobe = true; + uprobe_opts.ref_ctr_offset = ref_ctr_offset; + uretprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_ref_ctr, + -1 /* any pid */, + "/proc/self/exe", + uprobe_offset, &uprobe_opts); + if (!ASSERT_OK_PTR(uretprobe_link, "attach_uretprobe_ref_ctr")) + return; + skel->links.handle_uretprobe_ref_ctr = uretprobe_link; +} + +static void test_kprobe_sleepable(void) +{ + struct test_attach_kprobe_sleepable *skel; + + skel = test_attach_kprobe_sleepable__open(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_sleepable_open")) + return; + + /* sleepable kprobe test case needs flags set before loading */ + if (!ASSERT_OK(bpf_program__set_flags(skel->progs.handle_kprobe_sleepable, + BPF_F_SLEEPABLE), "kprobe_sleepable_flags")) + goto cleanup; + + if (!ASSERT_OK(test_attach_kprobe_sleepable__load(skel), + "skel_kprobe_sleepable_load")) goto cleanup; /* sleepable kprobes should not attach successfully */ skel->links.handle_kprobe_sleepable = bpf_program__attach(skel->progs.handle_kprobe_sleepable); - if (!ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable")) - goto cleanup; + ASSERT_ERR_PTR(skel->links.handle_kprobe_sleepable, "attach_kprobe_sleepable"); +cleanup: + test_attach_kprobe_sleepable__destroy(skel); +} + +static void test_uprobe_sleepable(struct test_attach_probe *skel) +{ /* test sleepable uprobe and uretprobe variants */ skel->links.handle_uprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uprobe_byname3_sleepable); if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3_sleepable, "attach_uprobe_byname3_sleepable")) - goto cleanup; + return; skel->links.handle_uprobe_byname3 = bpf_program__attach(skel->progs.handle_uprobe_byname3); if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname3, "attach_uprobe_byname3")) - goto cleanup; + return; skel->links.handle_uretprobe_byname3_sleepable = bpf_program__attach(skel->progs.handle_uretprobe_byname3_sleepable); if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3_sleepable, "attach_uretprobe_byname3_sleepable")) - goto cleanup; + return; skel->links.handle_uretprobe_byname3 = bpf_program__attach(skel->progs.handle_uretprobe_byname3); if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname3, "attach_uretprobe_byname3")) - goto cleanup; + return; skel->bss->user_ptr = test_data; - /* trigger & validate kprobe && kretprobe */ - usleep(1); - - /* trigger & validate shared library u[ret]probes attached by name */ - devnull = fopen("/dev/null", "r"); - fclose(devnull); - - /* trigger & validate uprobe & uretprobe */ - trigger_func(); - - /* trigger & validate uprobe attached by name */ - trigger_func2(); - /* trigger & validate sleepable uprobe attached by name */ trigger_func3(); - ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); - ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); - ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); - ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); - ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); - ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); - ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); - ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); - ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); - ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); ASSERT_EQ(skel->bss->uprobe_byname3_sleepable_res, 9, "check_uprobe_byname3_sleepable_res"); ASSERT_EQ(skel->bss->uprobe_byname3_res, 10, "check_uprobe_byname3_res"); ASSERT_EQ(skel->bss->uretprobe_byname3_sleepable_res, 11, "check_uretprobe_byname3_sleepable_res"); ASSERT_EQ(skel->bss->uretprobe_byname3_res, 12, "check_uretprobe_byname3_res"); +} + +void test_attach_probe(void) +{ + struct test_attach_probe *skel; + + skel = test_attach_probe__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(test_attach_probe__load(skel), "skel_load")) + goto cleanup; + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) + goto cleanup; + + if (test__start_subtest("manual")) + test_attach_probe_manual(skel); + if (test__start_subtest("auto")) + test_attach_probe_auto(skel); + if (test__start_subtest("kprobe-sleepable")) + test_kprobe_sleepable(); + if (test__start_subtest("uprobe-lib")) + test_uprobe_lib(skel); + if (test__start_subtest("uprobe-sleepable")) + test_uprobe_sleepable(skel); + if (test__start_subtest("uprobe-ref_ctr")) + test_uprobe_ref_ctr(skel); cleanup: test_attach_probe__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c new file mode 100644 index 000000000000..f548b7446218 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook + +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" + +int kprobe_res = 0; + +/** + * This program will be manually made sleepable on the userspace side + * and should thus be unattachable. + */ +SEC("kprobe/" SYS_PREFIX "sys_nanosleep") +int handle_kprobe_sleepable(struct pt_regs *ctx) +{ + kprobe_res = 1; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 3b5dc34d23e9..9e1e7163bb67 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -37,17 +37,6 @@ int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __ker return 0; } -/** - * This program will be manually made sleepable on the userspace side - * and should thus be unattachable. - */ -SEC("kprobe/" SYS_PREFIX "sys_nanosleep") -int handle_kprobe_sleepable(struct pt_regs *ctx) -{ - kprobe_res = 2; - return 0; -} - SEC("kretprobe") int handle_kretprobe(struct pt_regs *ctx) { @@ -76,6 +65,18 @@ int handle_uretprobe(struct pt_regs *ctx) return 0; } +SEC("uprobe") +int handle_uprobe_ref_ctr(struct pt_regs *ctx) +{ + return 0; +} + +SEC("uretprobe") +int handle_uretprobe_ref_ctr(struct pt_regs *ctx) +{ + return 0; +} + SEC("uprobe") int handle_uprobe_byname(struct pt_regs *ctx) { -- cgit v1.2.3-70-g09d2 From c7aec81b31e43a0aa94ee55d9bb33d70b1046f76 Mon Sep 17 00:00:00 2001 From: Menglong Dong Date: Mon, 6 Mar 2023 14:48:33 +0800 Subject: selftests/bpf: Add test for legacy/perf kprobe/uprobe attach mode Add the testing for kprobe/uprobe attaching in default, legacy, perf and link mode. And the testing passed: ./test_progs -t attach_probe $5/1 attach_probe/manual-default:OK $5/2 attach_probe/manual-legacy:OK $5/3 attach_probe/manual-perf:OK $5/4 attach_probe/manual-link:OK $5/5 attach_probe/auto:OK $5/6 attach_probe/kprobe-sleepable:OK $5/7 attach_probe/uprobe-lib:OK $5/8 attach_probe/uprobe-sleepable:OK $5/9 attach_probe/uprobe-ref_ctr:OK $5 attach_probe:OK Summary: 1/9 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Menglong Dong Signed-off-by: Andrii Nakryiko Reviewed-by: Biao Jiang Link: https://lore.kernel.org/bpf/20230306064833.7932-4-imagedong@tencent.com --- .../selftests/bpf/prog_tests/attach_probe.c | 37 +++++++++++---- .../selftests/bpf/progs/test_attach_probe.c | 32 ------------- .../selftests/bpf/progs/test_attach_probe_manual.c | 53 ++++++++++++++++++++++ 3 files changed, 81 insertions(+), 41 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/test_attach_probe_manual.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index c374759f39ce..7175af39134f 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include "test_attach_kprobe_sleepable.skel.h" +#include "test_attach_probe_manual.skel.h" #include "test_attach_probe.skel.h" /* this is how USDT semaphore is actually defined, except volatile modifier */ @@ -33,33 +34,43 @@ static noinline void trigger_func4(void) static char test_data[] = "test_data"; /* manual attach kprobe/kretprobe/uprobe/uretprobe testings */ -static void test_attach_probe_manual(struct test_attach_probe *skel) +static void test_attach_probe_manual(enum probe_attach_mode attach_mode) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); + DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts); struct bpf_link *kprobe_link, *kretprobe_link; struct bpf_link *uprobe_link, *uretprobe_link; + struct test_attach_probe_manual *skel; ssize_t uprobe_offset; + skel = test_attach_probe_manual__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_kprobe_manual_open_and_load")) + return; + uprobe_offset = get_uprobe_offset(&trigger_func); if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) goto cleanup; /* manual-attach kprobe/kretprobe */ - kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, - false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); + kprobe_opts.attach_mode = attach_mode; + kprobe_opts.retprobe = false; + kprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe, + SYS_NANOSLEEP_KPROBE_NAME, + &kprobe_opts); if (!ASSERT_OK_PTR(kprobe_link, "attach_kprobe")) goto cleanup; skel->links.handle_kprobe = kprobe_link; - kretprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kretprobe, - true /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); + kprobe_opts.retprobe = true; + kretprobe_link = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe, + SYS_NANOSLEEP_KPROBE_NAME, + &kprobe_opts); if (!ASSERT_OK_PTR(kretprobe_link, "attach_kretprobe")) goto cleanup; skel->links.handle_kretprobe = kretprobe_link; /* manual-attach uprobe/uretprobe */ + uprobe_opts.attach_mode = attach_mode; uprobe_opts.ref_ctr_offset = 0; uprobe_opts.retprobe = false; uprobe_link = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, @@ -108,6 +119,7 @@ static void test_attach_probe_manual(struct test_attach_probe *skel) ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); cleanup: + test_attach_probe_manual__destroy(skel); } static void test_attach_probe_auto(struct test_attach_probe *skel) @@ -289,8 +301,15 @@ void test_attach_probe(void) if (!ASSERT_OK_PTR(skel->bss, "check_bss")) goto cleanup; - if (test__start_subtest("manual")) - test_attach_probe_manual(skel); + if (test__start_subtest("manual-default")) + test_attach_probe_manual(PROBE_ATTACH_MODE_DEFAULT); + if (test__start_subtest("manual-legacy")) + test_attach_probe_manual(PROBE_ATTACH_MODE_LEGACY); + if (test__start_subtest("manual-perf")) + test_attach_probe_manual(PROBE_ATTACH_MODE_PERF); + if (test__start_subtest("manual-link")) + test_attach_probe_manual(PROBE_ATTACH_MODE_LINK); + if (test__start_subtest("auto")) test_attach_probe_auto(skel); if (test__start_subtest("kprobe-sleepable")) diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe.c b/tools/testing/selftests/bpf/progs/test_attach_probe.c index 9e1e7163bb67..68466a6ad18c 100644 --- a/tools/testing/selftests/bpf/progs/test_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/test_attach_probe.c @@ -7,12 +7,8 @@ #include #include "bpf_misc.h" -int kprobe_res = 0; int kprobe2_res = 0; -int kretprobe_res = 0; int kretprobe2_res = 0; -int uprobe_res = 0; -int uretprobe_res = 0; int uprobe_byname_res = 0; int uretprobe_byname_res = 0; int uprobe_byname2_res = 0; @@ -23,13 +19,6 @@ int uretprobe_byname3_sleepable_res = 0; int uretprobe_byname3_res = 0; void *user_ptr = 0; -SEC("kprobe") -int handle_kprobe(struct pt_regs *ctx) -{ - kprobe_res = 1; - return 0; -} - SEC("ksyscall/nanosleep") int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __kernel_timespec *rem) { @@ -37,13 +26,6 @@ int BPF_KSYSCALL(handle_kprobe_auto, struct __kernel_timespec *req, struct __ker return 0; } -SEC("kretprobe") -int handle_kretprobe(struct pt_regs *ctx) -{ - kretprobe_res = 2; - return 0; -} - SEC("kretsyscall/nanosleep") int BPF_KRETPROBE(handle_kretprobe_auto, int ret) { @@ -51,20 +33,6 @@ int BPF_KRETPROBE(handle_kretprobe_auto, int ret) return ret; } -SEC("uprobe") -int handle_uprobe(struct pt_regs *ctx) -{ - uprobe_res = 3; - return 0; -} - -SEC("uretprobe") -int handle_uretprobe(struct pt_regs *ctx) -{ - uretprobe_res = 4; - return 0; -} - SEC("uprobe") int handle_uprobe_ref_ctr(struct pt_regs *ctx) { diff --git a/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c new file mode 100644 index 000000000000..7f08bce94596 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_attach_probe_manual.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2017 Facebook + +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" + +int kprobe_res = 0; +int kretprobe_res = 0; +int uprobe_res = 0; +int uretprobe_res = 0; +int uprobe_byname_res = 0; +void *user_ptr = 0; + +SEC("kprobe") +int handle_kprobe(struct pt_regs *ctx) +{ + kprobe_res = 1; + return 0; +} + +SEC("kretprobe") +int handle_kretprobe(struct pt_regs *ctx) +{ + kretprobe_res = 2; + return 0; +} + +SEC("uprobe") +int handle_uprobe(struct pt_regs *ctx) +{ + uprobe_res = 3; + return 0; +} + +SEC("uretprobe") +int handle_uretprobe(struct pt_regs *ctx) +{ + uretprobe_res = 4; + return 0; +} + +SEC("uprobe") +int handle_uprobe_byname(struct pt_regs *ctx) +{ + uprobe_byname_res = 5; + return 0; +} + + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 6bb382bcf742de5c17209848325653059c995a04 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 7 Mar 2023 16:31:32 -0500 Subject: selftests: add a selftest for big tcp This test runs on the client-router-server topo, and monitors the traffic on the RX devices of router and server while sending BIG TCP packets with netperf from client to server. Meanwhile, it changes 'tso' on the TX devs and 'gro' on the RX devs. Then it checks if any BIG TCP packets appears on the RX devs with 'ip/ip6tables -m length ! --length 0:65535' for each case. Note that we also add tc action ct in link1 ingress to cover the ipv6 jumbo packets process in nf_ct_skb_network_trim() of nf_conntrack_ovs. Signed-off-by: Xin Long Reviewed-by: Aaron Conole Reviewed-by: Nikolay Aleksandrov Signed-off-by: Florian Westphal --- tools/testing/selftests/net/Makefile | 1 + tools/testing/selftests/net/big_tcp.sh | 180 +++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100755 tools/testing/selftests/net/big_tcp.sh (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 6cd8993454d7..099741290184 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -48,6 +48,7 @@ TEST_PROGS += l2_tos_ttl_inherit.sh TEST_PROGS += bind_bhash.sh TEST_PROGS += ip_local_port_range.sh TEST_PROGS += rps_default_mask.sh +TEST_PROGS += big_tcp.sh TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest diff --git a/tools/testing/selftests/net/big_tcp.sh b/tools/testing/selftests/net/big_tcp.sh new file mode 100755 index 000000000000..cde9a91c4797 --- /dev/null +++ b/tools/testing/selftests/net/big_tcp.sh @@ -0,0 +1,180 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Testing For IPv4 and IPv6 BIG TCP. +# TOPO: CLIENT_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) SERVER_NS + +CLIENT_NS=$(mktemp -u client-XXXXXXXX) +CLIENT_IP4="198.51.100.1" +CLIENT_IP6="2001:db8:1::1" + +SERVER_NS=$(mktemp -u server-XXXXXXXX) +SERVER_IP4="203.0.113.1" +SERVER_IP6="2001:db8:2::1" + +ROUTER_NS=$(mktemp -u router-XXXXXXXX) +SERVER_GW4="203.0.113.2" +CLIENT_GW4="198.51.100.2" +SERVER_GW6="2001:db8:2::2" +CLIENT_GW6="2001:db8:1::2" + +MAX_SIZE=128000 +CHK_SIZE=65535 + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +setup() { + ip netns add $CLIENT_NS + ip netns add $SERVER_NS + ip netns add $ROUTER_NS + ip -net $ROUTER_NS link add link1 type veth peer name link0 netns $CLIENT_NS + ip -net $ROUTER_NS link add link2 type veth peer name link3 netns $SERVER_NS + + ip -net $CLIENT_NS link set link0 up + ip -net $CLIENT_NS link set link0 mtu 1442 + ip -net $CLIENT_NS addr add $CLIENT_IP4/24 dev link0 + ip -net $CLIENT_NS addr add $CLIENT_IP6/64 dev link0 nodad + ip -net $CLIENT_NS route add $SERVER_IP4 dev link0 via $CLIENT_GW4 + ip -net $CLIENT_NS route add $SERVER_IP6 dev link0 via $CLIENT_GW6 + ip -net $CLIENT_NS link set dev link0 \ + gro_ipv4_max_size $MAX_SIZE gso_ipv4_max_size $MAX_SIZE + ip -net $CLIENT_NS link set dev link0 \ + gro_max_size $MAX_SIZE gso_max_size $MAX_SIZE + ip net exec $CLIENT_NS sysctl -wq net.ipv4.tcp_window_scaling=10 + + ip -net $ROUTER_NS link set link1 up + ip -net $ROUTER_NS link set link2 up + ip -net $ROUTER_NS addr add $CLIENT_GW4/24 dev link1 + ip -net $ROUTER_NS addr add $CLIENT_GW6/64 dev link1 nodad + ip -net $ROUTER_NS addr add $SERVER_GW4/24 dev link2 + ip -net $ROUTER_NS addr add $SERVER_GW6/64 dev link2 nodad + ip -net $ROUTER_NS link set dev link1 \ + gro_ipv4_max_size $MAX_SIZE gso_ipv4_max_size $MAX_SIZE + ip -net $ROUTER_NS link set dev link2 \ + gro_ipv4_max_size $MAX_SIZE gso_ipv4_max_size $MAX_SIZE + ip -net $ROUTER_NS link set dev link1 \ + gro_max_size $MAX_SIZE gso_max_size $MAX_SIZE + ip -net $ROUTER_NS link set dev link2 \ + gro_max_size $MAX_SIZE gso_max_size $MAX_SIZE + # test for nf_ct_skb_network_trim in nf_conntrack_ovs used by TC ct action. + ip net exec $ROUTER_NS tc qdisc add dev link1 ingress + ip net exec $ROUTER_NS tc filter add dev link1 ingress \ + proto ip flower ip_proto tcp action ct + ip net exec $ROUTER_NS tc filter add dev link1 ingress \ + proto ipv6 flower ip_proto tcp action ct + ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1 + ip net exec $ROUTER_NS sysctl -wq net.ipv6.conf.all.forwarding=1 + + ip -net $SERVER_NS link set link3 up + ip -net $SERVER_NS addr add $SERVER_IP4/24 dev link3 + ip -net $SERVER_NS addr add $SERVER_IP6/64 dev link3 nodad + ip -net $SERVER_NS route add $CLIENT_IP4 dev link3 via $SERVER_GW4 + ip -net $SERVER_NS route add $CLIENT_IP6 dev link3 via $SERVER_GW6 + ip -net $SERVER_NS link set dev link3 \ + gro_ipv4_max_size $MAX_SIZE gso_ipv4_max_size $MAX_SIZE + ip -net $SERVER_NS link set dev link3 \ + gro_max_size $MAX_SIZE gso_max_size $MAX_SIZE + ip net exec $SERVER_NS sysctl -wq net.ipv4.tcp_window_scaling=10 + ip net exec $SERVER_NS netserver 2>&1 >/dev/null +} + +cleanup() { + ip net exec $SERVER_NS pkill netserver + ip -net $ROUTER_NS link del link1 + ip -net $ROUTER_NS link del link2 + ip netns del "$CLIENT_NS" + ip netns del "$SERVER_NS" + ip netns del "$ROUTER_NS" +} + +start_counter() { + local ipt="iptables" + local iface=$1 + local netns=$2 + + [ "$NF" = "6" ] && ipt="ip6tables" + ip net exec $netns $ipt -t raw -A PREROUTING -i $iface \ + -m length ! --length 0:$CHK_SIZE -j ACCEPT +} + +check_counter() { + local ipt="iptables" + local iface=$1 + local netns=$2 + + [ "$NF" = "6" ] && ipt="ip6tables" + test `ip net exec $netns $ipt -t raw -L -v |grep $iface | awk '{print $1}'` != "0" +} + +stop_counter() { + local ipt="iptables" + local iface=$1 + local netns=$2 + + [ "$NF" = "6" ] && ipt="ip6tables" + ip net exec $netns $ipt -t raw -D PREROUTING -i $iface \ + -m length ! --length 0:$CHK_SIZE -j ACCEPT +} + +do_netperf() { + local serip=$SERVER_IP4 + local netns=$1 + + [ "$NF" = "6" ] && serip=$SERVER_IP6 + ip net exec $netns netperf -$NF -t TCP_STREAM -H $serip 2>&1 >/dev/null +} + +do_test() { + local cli_tso=$1 + local gw_gro=$2 + local gw_tso=$3 + local ser_gro=$4 + local ret="PASS" + + ip net exec $CLIENT_NS ethtool -K link0 tso $cli_tso + ip net exec $ROUTER_NS ethtool -K link1 gro $gw_gro + ip net exec $ROUTER_NS ethtool -K link2 tso $gw_tso + ip net exec $SERVER_NS ethtool -K link3 gro $ser_gro + + start_counter link1 $ROUTER_NS + start_counter link3 $SERVER_NS + do_netperf $CLIENT_NS + + if check_counter link1 $ROUTER_NS; then + check_counter link3 $SERVER_NS || ret="FAIL_on_link3" + else + ret="FAIL_on_link1" + fi + + stop_counter link1 $ROUTER_NS + stop_counter link3 $SERVER_NS + printf "%-9s %-8s %-8s %-8s: [%s]\n" \ + $cli_tso $gw_gro $gw_tso $ser_gro $ret + test $ret = "PASS" +} + +testup() { + echo "CLI GSO | GW GRO | GW GSO | SER GRO" && \ + do_test "on" "on" "on" "on" && \ + do_test "on" "off" "on" "off" && \ + do_test "off" "on" "on" "on" && \ + do_test "on" "on" "off" "on" && \ + do_test "off" "on" "off" "on" +} + +if ! netperf -V &> /dev/null; then + echo "SKIP: Could not run test without netperf tool" + exit $ksft_skip +fi + +if ! ip link help 2>&1 | grep gso_ipv4_max_size &> /dev/null; then + echo "SKIP: Could not run test without gso/gro_ipv4_max_size supported in ip-link" + exit $ksft_skip +fi + +trap cleanup EXIT +setup && echo "Testing for BIG TCP:" && \ +NF=4 testup && echo "***v4 Tests Done***" && \ +NF=6 testup && echo "***v6 Tests Done***" +exit $? -- cgit v1.2.3-70-g09d2 From 12fabae03ca6474fd571bf6ddb37d009533305d6 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 8 Mar 2023 11:37:13 +0100 Subject: selftests/bpf: Fix IMA test Commit 62622dab0a28 ("ima: return IMA digest value only when IMA_COLLECTED flag is set") caused bpf_ima_inode_hash() to refuse to give non-fresh digests. IMA test #3 assumed the old behavior, that bpf_ima_inode_hash() still returned also non-fresh digests. Correct the test by accepting both cases. If the samples returned are 1, assume that the commit above is applied and that the returned digest is fresh. If the samples returned are 2, assume that the commit above is not applied, and check both the non-fresh and fresh digest. Fixes: 62622dab0a28 ("ima: return IMA digest value only when IMA_COLLECTED flag is set") Reported-by: David Vernet Signed-off-by: Roberto Sassu Signed-off-by: Andrii Nakryiko Reviewed-by: Matt Bobrowski Link: https://lore.kernel.org/bpf/20230308103713.1681200-1-roberto.sassu@huaweicloud.com --- tools/testing/selftests/bpf/prog_tests/test_ima.c | 29 ++++++++++++++++------- 1 file changed, 21 insertions(+), 8 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index b13feceb38f1..810b14981c2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -70,7 +70,7 @@ void test_test_ima(void) u64 bin_true_sample; char cmd[256]; - int err, duration = 0; + int err, duration = 0, fresh_digest_idx = 0; struct ima *skel = NULL; skel = ima__open_and_load(); @@ -129,7 +129,15 @@ void test_test_ima(void) /* * Test #3 * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest - * - Expected result: 2 samples (/bin/true: non-fresh, fresh) + * - Expected result: + * 1 sample (/bin/true: fresh) if commit 62622dab0a28 applied + * 2 samples (/bin/true: non-fresh, fresh) if commit 62622dab0a28 is + * not applied + * + * If commit 62622dab0a28 ("ima: return IMA digest value only when + * IMA_COLLECTED flag is set") is applied, bpf_ima_inode_hash() refuses + * to give a non-fresh digest, hence the correct result is 1 instead of + * 2. */ test_init(skel->bss); @@ -144,13 +152,18 @@ void test_test_ima(void) goto close_clean; err = ring_buffer__consume(ringbuf); - ASSERT_EQ(err, 2, "num_samples_or_err"); - ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); - ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); - ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); + ASSERT_GE(err, 1, "num_samples_or_err"); + if (err == 2) { + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, + "sample_equal_or_err"); + fresh_digest_idx = 1; + } + + ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], 0, "ima_hash"); /* IMA refreshed the digest. */ - ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, - "sample_different_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[fresh_digest_idx], bin_true_sample, + "sample_equal_or_err"); /* * Test #4 -- cgit v1.2.3-70-g09d2 From 8c2b5e90505e474f36ecc3b7f3f8298b59d72e91 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 10:41:18 -0800 Subject: selftests/bpf: add bpf_for_each(), bpf_for(), and bpf_repeat() macros Add bpf_for_each(), bpf_for(), and bpf_repeat() macros that make writing open-coded iterator-based loops much more convenient and natural. These macros utilize cleanup attribute to ensure proper destruction of the iterator and thanks to that manage to provide the ergonomics that is very close to C language's for() construct. Typical loop would look like: int i; int arr[N]; bpf_for(i, 0, N) { /* verifier will know that i >= 0 && i < N, so could be used to * directly access array elements with no extra checks */ arr[i] = i; } bpf_repeat() is very similar, but it doesn't expose iteration number and is meant as a simple "repeat action N times" loop: bpf_repeat(N) { /* whatever, N times */ } Note that `break` and `continue` statements inside the {} block work as expected. bpf_for_each() is a generalization over any kind of BPF open-coded iterator allowing to use for-each-like approach instead of calling low-level bpf_iter__{new,next,destroy}() APIs explicitly. E.g.: struct cgroup *cg; bpf_for_each(cgroup, cg, some, input, args) { /* do something with each cg */ } would call (not-yet-implemented) bpf_iter_cgroup_{new,next,destroy}() functions to form a loop over cgroups, where `some, input, args` are passed verbatim into constructor as bpf_iter_cgroup_new(&it, some, input, args). As a first demonstration, add pyperf variant based on the bpf_for() loop. Also clean up a few tests that either included bpf_misc.h header unnecessarily from the user-space, which is unsupported, or included it before any common types are defined (and thus leading to unnecessary compilation warnings, potentially). Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230308184121.1165081-6-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/bpf_verif_scale.c | 6 ++ .../selftests/bpf/prog_tests/uprobe_autoattach.c | 1 - tools/testing/selftests/bpf/progs/bpf_misc.h | 99 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/lsm.c | 4 +- tools/testing/selftests/bpf/progs/pyperf.h | 14 ++- tools/testing/selftests/bpf/progs/pyperf600_iter.c | 7 ++ .../selftests/bpf/progs/pyperf600_nounroll.c | 3 - 7 files changed, 124 insertions(+), 10 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/pyperf600_iter.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 5ca252823294..731c343897d8 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -144,6 +144,12 @@ void test_verif_scale_pyperf600_nounroll() scale_test("pyperf600_nounroll.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); } +void test_verif_scale_pyperf600_iter() +{ + /* open-coded BPF iterator version */ + scale_test("pyperf600_iter.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); +} + void test_verif_scale_loop1() { scale_test("loop1.bpf.o", BPF_PROG_TYPE_RAW_TRACEPOINT, false); diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c index 6558c857e620..d5b3377aa33c 100644 --- a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c @@ -3,7 +3,6 @@ #include #include "test_uprobe_autoattach.skel.h" -#include "progs/bpf_misc.h" /* uprobe attach point */ static noinline int autoattach_trigger_func(int arg1, int arg2, int arg3, diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index f704885aa534..597688a188ae 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -75,5 +75,104 @@ #define FUNC_REG_ARG_CNT 5 #endif +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup())), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p = (bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p = (bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p = (bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ #endif diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c index dc93887ed34c..fadfdd98707c 100644 --- a/tools/testing/selftests/bpf/progs/lsm.c +++ b/tools/testing/selftests/bpf/progs/lsm.c @@ -4,12 +4,12 @@ * Copyright 2020 Google LLC. */ -#include "bpf_misc.h" #include "vmlinux.h" +#include #include #include #include -#include +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 6c7b1fb268d6..f2e7a31c8d75 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -7,6 +7,7 @@ #include #include #include +#include "bpf_misc.h" #define FUNCTION_NAME_LEN 64 #define FILE_NAME_LEN 128 @@ -294,17 +295,22 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) if (ctx.done) return 0; #else -#ifdef NO_UNROLL +#if defined(USE_ITER) +/* no for loop, no unrolling */ +#elif defined(NO_UNROLL) #pragma clang loop unroll(disable) -#else -#ifdef UNROLL_COUNT +#elif defined(UNROLL_COUNT) #pragma clang loop unroll_count(UNROLL_COUNT) #else #pragma clang loop unroll(full) -#endif #endif /* NO_UNROLL */ /* Unwind python stack */ +#ifdef USE_ITER + int i; + bpf_for(i, 0, STACK_MAX_LEN) { +#else /* !USE_ITER */ for (int i = 0; i < STACK_MAX_LEN; ++i) { +#endif if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) { int32_t new_symbol_id = *symbol_counter * 64 + cur_cpu; int32_t *symbol_id = bpf_map_lookup_elem(&symbolmap, &sym); diff --git a/tools/testing/selftests/bpf/progs/pyperf600_iter.c b/tools/testing/selftests/bpf/progs/pyperf600_iter.c new file mode 100644 index 000000000000..d62e1b200c30 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/pyperf600_iter.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2023 Meta Platforms, Inc. and affiliates. +#define STACK_MAX_LEN 600 +#define SUBPROGS +#define NO_UNROLL +#define USE_ITER +#include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c index 6beff7502f4d..520b58c4f8db 100644 --- a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c +++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c @@ -2,7 +2,4 @@ // Copyright (c) 2019 Facebook #define STACK_MAX_LEN 600 #define NO_UNROLL -/* clang will not unroll at all. - * Total program size is around 2k insns - */ #include "pyperf.h" -- cgit v1.2.3-70-g09d2 From 57400dcce6c2cf3985120c4ee28b37a1f4238dbb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 10:41:19 -0800 Subject: selftests/bpf: add iterators tests Add various tests for open-coded iterators. Some of them excercise various possible coding patterns in C, some go down to low-level assembly for more control over various conditions, especially invalid ones. We also make use of bpf_for(), bpf_for_each(), bpf_repeat() macros in some of these tests. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230308184121.1165081-7-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/iters.c | 15 + tools/testing/selftests/bpf/progs/bpf_misc.h | 1 + tools/testing/selftests/bpf/progs/iters.c | 720 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/iters_looping.c | 163 +++++ .../selftests/bpf/progs/iters_state_safety.c | 426 ++++++++++++ 5 files changed, 1325 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/iters.c create mode 100644 tools/testing/selftests/bpf/progs/iters.c create mode 100644 tools/testing/selftests/bpf/progs/iters_looping.c create mode 100644 tools/testing/selftests/bpf/progs/iters_state_safety.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c new file mode 100644 index 000000000000..414fb8d82145 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include + +#include "iters.skel.h" +#include "iters_state_safety.skel.h" +#include "iters_looping.skel.h" + +void test_iters(void) +{ + RUN_TESTS(iters_state_safety); + RUN_TESTS(iters_looping); + RUN_TESTS(iters); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 597688a188ae..43b154a639e7 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -36,6 +36,7 @@ #define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory" #define __imm(name) [name]"i"(name) #define __imm_addr(name) [name]"i"(&name) +#define __imm_ptr(name) [name]"p"(&name) #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c new file mode 100644 index 000000000000..84e5dc10243c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include "bpf_misc.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +static volatile int zero = 0; + +int my_pid; +int arr[256]; +int small_arr[16] SEC(".data.small_arr"); + +#ifdef REAL_TEST +#define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0 +#else +#define MY_PID_GUARD() ({ }) +#endif + +SEC("?raw_tp") +__failure __msg("math between map_value pointer and register with unbounded min value is not allowed") +int iter_err_unsafe_c_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i = zero; /* obscure initial value of i */ + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 1000); + while ((v = bpf_iter_num_next(&it))) { + i++; + } + bpf_iter_num_destroy(&it); + + small_arr[i] = 123; /* invalid */ + + return 0; +} + +SEC("?raw_tp") +__failure __msg("unbounded memory access") +int iter_err_unsafe_asm_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i = 0; + + MY_PID_GUARD(); + + asm volatile ( + "r6 = %[zero];" /* iteration counter */ + "r1 = %[it];" /* iterator state */ + "r2 = 0;" + "r3 = 1000;" + "r4 = 1;" + "call %[bpf_iter_num_new];" + "loop:" + "r1 = %[it];" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto out;" + "r6 += 1;" + "goto loop;" + "out:" + "r1 = %[it];" + "call %[bpf_iter_num_destroy];" + "r1 = %[small_arr];" + "r2 = r6;" + "r2 <<= 2;" + "r1 += r2;" + "*(u32 *)(r1 + 0) = r6;" /* invalid */ + : + : [it]"r"(&it), + [small_arr]"p"(small_arr), + [zero]"p"(zero), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_common, "r6" + ); + + return 0; +} + +SEC("raw_tp") +__success +int iter_while_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_while_loop_auto_cleanup(const void *ctx) +{ + __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + /* (!) no explicit bpf_iter_num_destroy() */ + + return 0; +} + +SEC("raw_tp") +__success +int iter_for_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 5, 10); + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_bpf_for_each_macro(const void *ctx) +{ + int *v; + + MY_PID_GUARD(); + + bpf_for_each(num, v, 5, 10) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_bpf_for_macro(const void *ctx) +{ + int i; + + MY_PID_GUARD(); + + bpf_for(i, 5, 10) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", i); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_pragma_unroll_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 2); +#pragma nounroll + for (i = 0; i < 3; i++) { + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); + } + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_manual_unroll_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 100, 200); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_multiple_sequential_loops(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 3); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 5, 10); + for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) { + bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 0, 2); +#pragma nounroll + for (i = 0; i < 3; i++) { + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); + } + bpf_iter_num_destroy(&it); + + bpf_iter_num_new(&it, 100, 200); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1); + v = bpf_iter_num_next(&it); + bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1); + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_limit_cond_break_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, i = 0, sum = 0; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v); + sum += *v; + + i++; + if (i > 3) + break; + } + bpf_iter_num_destroy(&it); + + bpf_printk("ITER_SIMPLE: sum=%d\n", sum); + + return 0; +} + +SEC("raw_tp") +__success +int iter_obfuscate_counter(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + /* Make i's initial value unknowable for verifier to prevent it from + * pruning if/else branch inside the loop body and marking i as precise. + */ + int i = zero; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + int x; + + i += 1; + + /* If we initialized i as `int i = 0;` above, verifier would + * track that i becomes 1 on first iteration after increment + * above, and here verifier would eagerly prune else branch + * and mark i as precise, ruining open-coded iterator logic + * completely, as each next iteration would have a different + * *precise* value of i, and thus there would be no + * convergence of state. This would result in reaching maximum + * instruction limit, no matter what the limit is. + */ + if (i == 1) + x = 123; + else + x = i * 3 + 1; + + bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x); + + sum += x; + } + bpf_iter_num_destroy(&it); + + bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum); + + return 0; +} + +SEC("raw_tp") +__success +int iter_search_loop(const void *ctx) +{ + struct bpf_iter_num it; + int *v, *elem = NULL; + bool found = false; + + MY_PID_GUARD(); + + bpf_iter_num_new(&it, 0, 10); + + while ((v = bpf_iter_num_next(&it))) { + bpf_printk("ITER_SEARCH_LOOP: v=%d", *v); + + if (*v == 2) { + found = true; + elem = v; + barrier_var(elem); + } + } + + /* should fail to verify if bpf_iter_num_destroy() is here */ + + if (found) + /* here found element will be wrong, we should have copied + * value to a variable, but here we want to make sure we can + * access memory after the loop anyways + */ + bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem); + else + bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n"); + + bpf_iter_num_destroy(&it); + + return 0; +} + +SEC("raw_tp") +__success +int iter_array_fill(const void *ctx) +{ + int sum, i; + + MY_PID_GUARD(); + + bpf_for(i, 0, ARRAY_SIZE(arr)) { + arr[i] = i * 2; + } + + sum = 0; + bpf_for(i, 0, ARRAY_SIZE(arr)) { + sum += arr[i]; + } + + bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256); + + return 0; +} + +static int arr2d[4][5]; +static int arr2d_row_sums[4]; +static int arr2d_col_sums[5]; + +SEC("raw_tp") +__success +int iter_nested_iters(const void *ctx) +{ + int sum, row, col; + + MY_PID_GUARD(); + + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d[row][col] = row * col; + } + } + + /* zero-initialize sums */ + sum = 0; + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + arr2d_row_sums[row] = 0; + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d_col_sums[col] = 0; + } + + /* calculate sums */ + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + sum += arr2d[row][col]; + arr2d_row_sums[row] += arr2d[row][col]; + arr2d_col_sums[col] += arr2d[row][col]; + } + } + + bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum); + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]); + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s", + col, arr2d_col_sums[col], + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); + } + + return 0; +} + +SEC("raw_tp") +__success +int iter_nested_deeply_iters(const void *ctx) +{ + int sum = 0; + + MY_PID_GUARD(); + + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + bpf_repeat(10) { + sum += 1; + } + } + } + } + /* validate that we can break from inside bpf_repeat() */ + break; + } + + return sum; +} + +static __noinline void fill_inner_dimension(int row) +{ + int col; + + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d[row][col] = row * col; + } +} + +static __noinline int sum_inner_dimension(int row) +{ + int sum = 0, col; + + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + sum += arr2d[row][col]; + arr2d_row_sums[row] += arr2d[row][col]; + arr2d_col_sums[col] += arr2d[row][col]; + } + + return sum; +} + +SEC("raw_tp") +__success +int iter_subprog_iters(const void *ctx) +{ + int sum, row, col; + + MY_PID_GUARD(); + + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + fill_inner_dimension(row); + } + + /* zero-initialize sums */ + sum = 0; + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + arr2d_row_sums[row] = 0; + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + arr2d_col_sums[col] = 0; + } + + /* calculate sums */ + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + sum += sum_inner_dimension(row); + } + + bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum); + bpf_for(row, 0, ARRAY_SIZE(arr2d)) { + bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d", + row, arr2d_row_sums[row]); + } + bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) { + bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s", + col, arr2d_col_sums[col], + col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : ""); + } + + return 0; +} + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, int); + __uint(max_entries, 1000); +} arr_map SEC(".maps"); + +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int iter_err_too_permissive1(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + + MY_PID_GUARD(); + + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (!map_val) + return 0; + + bpf_repeat(1000000) { + map_val = NULL; + } + + *map_val = 123; + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid mem access 'map_value_or_null'") +int iter_err_too_permissive2(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + + MY_PID_GUARD(); + + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (!map_val) + return 0; + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + } + + *map_val = 123; + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid mem access 'map_value_or_null'") +int iter_err_too_permissive3(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + bool found = false; + + MY_PID_GUARD(); + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + found = true; + } + + if (found) + *map_val = 123; + + return 0; +} + +SEC("raw_tp") +__success +int iter_tricky_but_fine(const void *ctx) +{ + int *map_val = NULL; + int key = 0; + bool found = false; + + MY_PID_GUARD(); + + bpf_repeat(1000000) { + map_val = bpf_map_lookup_elem(&arr_map, &key); + if (map_val) { + found = true; + break; + } + } + + if (found) + *map_val = 123; + + return 0; +} + +#define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0) + +SEC("raw_tp") +__success +int iter_stack_array_loop(const void *ctx) +{ + long arr1[16], arr2[16], sum = 0; + int *v, i; + + MY_PID_GUARD(); + + /* zero-init arr1 and arr2 in such a way that verifier doesn't know + * it's all zeros; if we don't do that, we'll make BPF verifier track + * all combination of zero/non-zero stack slots for arr1/arr2, which + * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different + * states + */ + __bpf_memzero(arr1, sizeof(arr1)); + __bpf_memzero(arr2, sizeof(arr1)); + + /* validate that we can break and continue when using bpf_for() */ + bpf_for(i, 0, ARRAY_SIZE(arr1)) { + if (i & 1) { + arr1[i] = i; + continue; + } else { + arr2[i] = i; + break; + } + } + + bpf_for(i, 0, ARRAY_SIZE(arr1)) { + sum += arr1[i] + arr2[i]; + } + + return sum; +} + +static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) +{ + int *t, i; + + while ((t = bpf_iter_num_next(it))) { + i = *t; + if (i >= n) + break; + arr[i] = i * mul; + } +} + +static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) +{ + int *t, i, sum = 0;; + + while ((t = bpf_iter_num_next(it))) { + i = *t; + if (i >= n) + break; + sum += arr[i]; + } + + return sum; +} + +SEC("raw_tp") +__success +int iter_pass_iter_ptr_to_subprog(const void *ctx) +{ + int arr1[16], arr2[32]; + struct bpf_iter_num it; + int n, sum1, sum2; + + MY_PID_GUARD(); + + /* fill arr1 */ + n = ARRAY_SIZE(arr1); + bpf_iter_num_new(&it, 0, n); + fill(&it, arr1, n, 2); + bpf_iter_num_destroy(&it); + + /* fill arr2 */ + n = ARRAY_SIZE(arr2); + bpf_iter_num_new(&it, 0, n); + fill(&it, arr2, n, 10); + bpf_iter_num_destroy(&it); + + /* sum arr1 */ + n = ARRAY_SIZE(arr1); + bpf_iter_num_new(&it, 0, n); + sum1 = sum(&it, arr1, n); + bpf_iter_num_destroy(&it); + + /* sum arr2 */ + n = ARRAY_SIZE(arr2); + bpf_iter_num_new(&it, 0, n); + sum2 = sum(&it, arr2, n); + bpf_iter_num_destroy(&it); + + bpf_printk("sum1=%d, sum2=%d", sum1, sum2); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters_looping.c b/tools/testing/selftests/bpf/progs/iters_looping.c new file mode 100644 index 000000000000..05fa5ce7fc59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_looping.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +#define ITER_HELPERS \ + __imm(bpf_iter_num_new), \ + __imm(bpf_iter_num_next), \ + __imm(bpf_iter_num_destroy) + +SEC("?raw_tp") +__success +int force_clang_to_emit_btf_for_externs(void *ctx) +{ + /* we need this as a workaround to enforce compiler emitting BTF + * information for bpf_iter_num_{new,next,destroy}() kfuncs, + * as, apparently, it doesn't emit it for symbols only referenced from + * assembly (or cleanup attribute, for that matter, as well) + */ + bpf_repeat(0); + + return 0; +} + +SEC("?raw_tp") +__success +int consume_first_item_only(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first item */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto +1;" + "r0 = *(u32 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("R0 invalid mem access 'scalar'") +int missing_null_check_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first element */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + /* FAIL: deref with no NULL check */ + "r1 = *(u32 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure +__msg("invalid access to memory, mem_size=4 off=0 size=8") +__msg("R0 min value is outside of the allowed memory range") +int wrong_sized_read_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* consume first element */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto +1;" + /* FAIL: deref more than available 4 bytes */ + "r0 = *(u64 *)(r0 + 0);" + + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +int simplest_loop(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + "r6 = 0;" /* init sum */ + + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + + "1:" + /* consume next item */ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + + "if r0 == 0 goto 2f;" + "r0 = *(u32 *)(r0 + 0);" + "r6 += r0;" /* accumulate sum */ + "goto 1b;" + + "2:" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common, "r6" + ); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c new file mode 100644 index 000000000000..d47e59aba6de --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include +#include +#include +#include +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +#define ITER_HELPERS \ + __imm(bpf_iter_num_new), \ + __imm(bpf_iter_num_next), \ + __imm(bpf_iter_num_destroy) + +SEC("?raw_tp") +__success +int force_clang_to_emit_btf_for_externs(void *ctx) +{ + /* we need this as a workaround to enforce compiler emitting BTF + * information for bpf_iter_num_{new,next,destroy}() kfuncs, + * as, apparently, it doesn't emit it for symbols only referenced from + * assembly (or cleanup attribute, for that matter, as well) + */ + bpf_repeat(0); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") +int create_and_destroy(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("Unreleased reference id=1") +int create_and_forget_to_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int destroy_without_creating_fail(void *ctx) +{ + /* init with zeros to stop verifier complaining about uninit stack */ + struct bpf_iter_num iter; + + asm volatile ( + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int compromise_iter_w_direct_write_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* directly write over first half of iter state */ + "*(u64 *)(%[iter] + 0) = r0;" + + /* (attempt to) destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("Unreleased reference id=1") +int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* directly write over first half of iter state */ + "*(u64 *)(%[iter] + 0) = r0;" + + /* don't destroy iter, leaking ref, which should fail */ + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int compromise_iter_w_helper_write_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* overwrite 8th byte with bpf_probe_read_kernel() */ + "r1 = %[iter];" + "r1 += 7;" + "r2 = 1;" + "r3 = 0;" /* NULL */ + "call %[bpf_probe_read_kernel];" + + /* (attempt to) destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS, __imm(bpf_probe_read_kernel) + : __clobber_common + ); + + return 0; +} + +static __noinline void subprog_with_iter(void) +{ + struct bpf_iter_num iter; + + bpf_iter_num_new(&iter, 0, 1); + + return; +} + +SEC("?raw_tp") +__failure +/* ensure there was a call to subprog, which might happen without __noinline */ +__msg("returning from callee:") +__msg("Unreleased reference id=1") +int leak_iter_from_subprog_fail(void *ctx) +{ + subprog_with_iter(); + + return 0; +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("fp-8_w=iter_num(ref_id=1,state=active,depth=0)") +int valid_stack_reuse(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + + /* now reuse same stack slots */ + + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected uninitialized iter_num as arg #1") +int double_create_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* (attempt to) create iterator again */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int double_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + /* (attempt to) destroy iterator again */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int next_without_new_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* don't create iterator and try to iterate*/ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("expected an initialized iter_num as arg #1") +int next_after_destroy_fail(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* create iterator */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + /* destroy iterator */ + "r1 = %[iter];" + "call %[bpf_iter_num_destroy];" + /* don't create iterator and try to iterate*/ + "r1 = %[iter];" + "call %[bpf_iter_num_next];" + : + : __imm_ptr(iter), ITER_HELPERS + : __clobber_common + ); + + return 0; +} + +SEC("?raw_tp") +__failure __msg("invalid read from stack") +int __naked read_from_iter_slot_fail(void) +{ + asm volatile ( + /* r6 points to struct bpf_iter_num on the stack */ + "r6 = r10;" + "r6 += -24;" + + /* create iterator */ + "r1 = r6;" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* attemp to leak bpf_iter_num state */ + "r7 = *(u64 *)(r6 + 0);" + "r8 = *(u64 *)(r6 + 8);" + + /* destroy iterator */ + "r1 = r6;" + "call %[bpf_iter_num_destroy];" + + /* leak bpf_iter_num state */ + "r0 = r7;" + "if r7 > r8 goto +1;" + "r0 = r8;" + "exit;" + : + : ITER_HELPERS + : __clobber_common, "r6", "r7", "r8" + ); +} + +int zero; + +SEC("?raw_tp") +__failure +__flag(BPF_F_TEST_STATE_FREQ) +__msg("Unreleased reference") +int stacksafe_should_not_conflate_stack_spill_and_iter(void *ctx) +{ + struct bpf_iter_num iter; + + asm volatile ( + /* Create a fork in logic, with general setup as follows: + * - fallthrough (first) path is valid; + * - branch (second) path is invalid. + * Then depending on what we do in fallthrough vs branch path, + * we try to detect bugs in func_states_equal(), regsafe(), + * refsafe(), stack_safe(), and similar by tricking verifier + * into believing that branch state is a valid subset of + * a fallthrough state. Verifier should reject overall + * validation, unless there is a bug somewhere in verifier + * logic. + */ + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + + "if r6 > r7 goto bad;" /* fork */ + + /* spill r6 into stack slot of bpf_iter_num var */ + "*(u64 *)(%[iter] + 0) = r6;" + + "goto skip_bad;" + + "bad:" + /* create iterator in the same stack slot */ + "r1 = %[iter];" + "r2 = 0;" + "r3 = 1000;" + "call %[bpf_iter_num_new];" + + /* but then forget about it and overwrite it back to r6 spill */ + "*(u64 *)(%[iter] + 0) = r6;" + + "skip_bad:" + "goto +0;" /* force checkpoint */ + + /* corrupt stack slots, if they are really dynptr */ + "*(u64 *)(%[iter] + 0) = r6;" + : + : __imm_ptr(iter), + __imm_addr(zero), + __imm(bpf_get_prandom_u32), + __imm(bpf_dynptr_from_mem), + ITER_HELPERS + : __clobber_common, "r6", "r7" + ); + + return 0; +} -- cgit v1.2.3-70-g09d2 From f59b146092653bcf014ccdc9bd8bc94e79065ce3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 10:41:20 -0800 Subject: selftests/bpf: add number iterator tests Add number iterator (bpf_iter_num_{new,next,destroy}()) tests, validating the correct handling of various corner and common cases *at runtime*. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230308184121.1165081-8-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/iters.c | 49 +++++ tools/testing/selftests/bpf/progs/iters_num.c | 242 +++++++++++++++++++++++++ 2 files changed, 291 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/iters_num.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 414fb8d82145..2e7caff9523e 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -6,10 +6,59 @@ #include "iters.skel.h" #include "iters_state_safety.skel.h" #include "iters_looping.skel.h" +#include "iters_num.skel.h" + +static void subtest_num_iters(void) +{ + struct iters_num *skel; + int err; + + skel = iters_num__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = iters_num__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + usleep(1); + iters_num__detach(skel); + +#define VALIDATE_CASE(case_name) \ + ASSERT_EQ(skel->bss->res_##case_name, \ + skel->rodata->exp_##case_name, \ + #case_name) + + VALIDATE_CASE(empty_zero); + VALIDATE_CASE(empty_int_min); + VALIDATE_CASE(empty_int_max); + VALIDATE_CASE(empty_minus_one); + + VALIDATE_CASE(simple_sum); + VALIDATE_CASE(neg_sum); + VALIDATE_CASE(very_neg_sum); + VALIDATE_CASE(neg_pos_sum); + + VALIDATE_CASE(invalid_range); + VALIDATE_CASE(max_range); + VALIDATE_CASE(e2big_range); + + VALIDATE_CASE(succ_elem_cnt); + VALIDATE_CASE(overfetched_elem_cnt); + VALIDATE_CASE(fail_elem_cnt); + +#undef VALIDATE_CASE + +cleanup: + iters_num__destroy(skel); +} void test_iters(void) { RUN_TESTS(iters_state_safety); RUN_TESTS(iters_looping); RUN_TESTS(iters); + + if (test__start_subtest("num")) + subtest_num_iters(); } diff --git a/tools/testing/selftests/bpf/progs/iters_num.c b/tools/testing/selftests/bpf/progs/iters_num.c new file mode 100644 index 000000000000..7a77a8daee0d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_num.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include "vmlinux.h" +#include +#include "bpf_misc.h" + +const volatile __s64 exp_empty_zero = 0 + 1; +__s64 res_empty_zero; + +SEC("raw_tp/sys_enter") +int num_empty_zero(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, 0, 0) sum += i; + res_empty_zero = 1 + sum; + + return 0; +} + +const volatile __s64 exp_empty_int_min = 0 + 2; +__s64 res_empty_int_min; + +SEC("raw_tp/sys_enter") +int num_empty_int_min(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MIN, INT_MIN) sum += i; + res_empty_int_min = 2 + sum; + + return 0; +} + +const volatile __s64 exp_empty_int_max = 0 + 3; +__s64 res_empty_int_max; + +SEC("raw_tp/sys_enter") +int num_empty_int_max(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MAX, INT_MAX) sum += i; + res_empty_int_max = 3 + sum; + + return 0; +} + +const volatile __s64 exp_empty_minus_one = 0 + 4; +__s64 res_empty_minus_one; + +SEC("raw_tp/sys_enter") +int num_empty_minus_one(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -1, -1) sum += i; + res_empty_minus_one = 4 + sum; + + return 0; +} + +const volatile __s64 exp_simple_sum = 9 * 10 / 2; +__s64 res_simple_sum; + +SEC("raw_tp/sys_enter") +int num_simple_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, 0, 10) sum += i; + res_simple_sum = sum; + + return 0; +} + +const volatile __s64 exp_neg_sum = -11 * 10 / 2; +__s64 res_neg_sum; + +SEC("raw_tp/sys_enter") +int num_neg_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -10, 0) sum += i; + res_neg_sum = sum; + + return 0; +} + +const volatile __s64 exp_very_neg_sum = INT_MIN + (__s64)(INT_MIN + 1); +__s64 res_very_neg_sum; + +SEC("raw_tp/sys_enter") +int num_very_neg_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MIN, INT_MIN + 2) sum += i; + res_very_neg_sum = sum; + + return 0; +} + +const volatile __s64 exp_very_big_sum = (__s64)(INT_MAX - 1) + (__s64)(INT_MAX - 2); +__s64 res_very_big_sum; + +SEC("raw_tp/sys_enter") +int num_very_big_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, INT_MAX - 2, INT_MAX) sum += i; + res_very_big_sum = sum; + + return 0; +} + +const volatile __s64 exp_neg_pos_sum = -3; +__s64 res_neg_pos_sum; + +SEC("raw_tp/sys_enter") +int num_neg_pos_sum(const void *ctx) +{ + __s64 sum = 0, i; + + bpf_for(i, -3, 3) sum += i; + res_neg_pos_sum = sum; + + return 0; +} + +const volatile __s64 exp_invalid_range = -EINVAL; +__s64 res_invalid_range; + +SEC("raw_tp/sys_enter") +int num_invalid_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_invalid_range = bpf_iter_num_new(&it, 1, 0); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_max_range = 0 + 10; +__s64 res_max_range; + +SEC("raw_tp/sys_enter") +int num_max_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_max_range = 10 + bpf_iter_num_new(&it, 0, BPF_MAX_LOOPS); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_e2big_range = -E2BIG; +__s64 res_e2big_range; + +SEC("raw_tp/sys_enter") +int num_e2big_range(const void *ctx) +{ + struct bpf_iter_num it; + + res_e2big_range = bpf_iter_num_new(&it, -1, BPF_MAX_LOOPS); + bpf_iter_num_destroy(&it); + + return 0; +} + +const volatile __s64 exp_succ_elem_cnt = 10; +__s64 res_succ_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_succ_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v; + + bpf_iter_num_new(&it, 0, 10); + while ((v = bpf_iter_num_next(&it))) { + cnt++; + } + bpf_iter_num_destroy(&it); + + res_succ_elem_cnt = cnt; + + return 0; +} + +const volatile __s64 exp_overfetched_elem_cnt = 5; +__s64 res_overfetched_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_overfetched_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v, i; + + bpf_iter_num_new(&it, 0, 5); + for (i = 0; i < 10; i++) { + v = bpf_iter_num_next(&it); + if (v) + cnt++; + } + bpf_iter_num_destroy(&it); + + res_overfetched_elem_cnt = cnt; + + return 0; +} + +const volatile __s64 exp_fail_elem_cnt = 20 + 0; +__s64 res_fail_elem_cnt; + +SEC("raw_tp/sys_enter") +int num_fail_elem_cnt(const void *ctx) +{ + struct bpf_iter_num it; + int cnt = 0, *v, i; + + bpf_iter_num_new(&it, 100, 10); + for (i = 0; i < 10; i++) { + v = bpf_iter_num_next(&it); + if (v) + cnt++; + } + bpf_iter_num_destroy(&it); + + res_fail_elem_cnt = 20 + cnt; + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 7e86a8c4ac8d5dcf7dd58f5a4779d1a6ff0a827d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 10:41:21 -0800 Subject: selftests/bpf: implement and test custom testmod_seq iterator Implement a trivial iterator returning same specified integer value N times as part of bpf_testmod kernel module. Add selftests to validate everything works end to end. We also reuse these tests as "verification-only" tests to validate that kernel prints the state of custom kernel module-defined iterator correctly: fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0) "testmod_seq" part is an iterator type, and is coming from module's BTF data dynamically at runtime. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230308184121.1165081-9-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + .../selftests/bpf/bpf_testmod/bpf_testmod.c | 42 +++++++++++- .../selftests/bpf/bpf_testmod/bpf_testmod.h | 6 ++ tools/testing/selftests/bpf/prog_tests/iters.c | 42 ++++++++++++ .../selftests/bpf/progs/iters_testmod_seq.c | 79 ++++++++++++++++++++++ 5 files changed, 169 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/progs/iters_testmod_seq.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index a02a085e7f32..34cb8b2de8ca 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -8,6 +8,7 @@ dynptr/test_dynptr_skb_data dynptr/test_skb_readonly fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) +iters/testmod_seq* # s390x doesn't support kfuncs in modules yet kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test # relies on fentry ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 46500636d8cd..5e6e85c8d77d 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -65,6 +65,34 @@ bpf_testmod_test_mod_kfunc(int i) *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i; } +__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) +{ + if (cnt < 0) { + it->cnt = 0; + return -EINVAL; + } + + it->value = value; + it->cnt = cnt; + + return 0; +} + +__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it) +{ + if (it->cnt <= 0) + return NULL; + + it->cnt--; + + return &it->value; +} + +__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) +{ + it->cnt = 0; +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -220,6 +248,17 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; +BTF_SET8_START(bpf_testmod_common_kfunc_ids) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) +BTF_SET8_END(bpf_testmod_common_kfunc_ids) + +static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { + .owner = THIS_MODULE, + .set = &bpf_testmod_common_kfunc_ids, +}; + BTF_SET8_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_SET8_END(bpf_testmod_check_kfunc_ids) @@ -235,7 +274,8 @@ static int bpf_testmod_init(void) { int ret; - ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index 0d71e2607832..f32793efe095 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -22,4 +22,10 @@ struct bpf_testmod_test_writable_ctx { int val; }; +/* BPF iter that returns *value* *n* times in a row */ +struct bpf_iter_testmod_seq { + s64 value; + int cnt; +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 2e7caff9523e..10804ae5ae97 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -7,6 +7,7 @@ #include "iters_state_safety.skel.h" #include "iters_looping.skel.h" #include "iters_num.skel.h" +#include "iters_testmod_seq.skel.h" static void subtest_num_iters(void) { @@ -53,12 +54,53 @@ cleanup: iters_num__destroy(skel); } +static void subtest_testmod_seq_iters(void) +{ + struct iters_testmod_seq *skel; + int err; + + if (!env.has_testmod) { + test__skip(); + return; + } + + skel = iters_testmod_seq__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + err = iters_testmod_seq__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + usleep(1); + iters_testmod_seq__detach(skel); + +#define VALIDATE_CASE(case_name) \ + ASSERT_EQ(skel->bss->res_##case_name, \ + skel->rodata->exp_##case_name, \ + #case_name) + + VALIDATE_CASE(empty); + VALIDATE_CASE(full); + VALIDATE_CASE(truncated); + +#undef VALIDATE_CASE + +cleanup: + iters_testmod_seq__destroy(skel); +} + void test_iters(void) { RUN_TESTS(iters_state_safety); RUN_TESTS(iters_looping); RUN_TESTS(iters); + if (env.has_testmod) + RUN_TESTS(iters_testmod_seq); + if (test__start_subtest("num")) subtest_num_iters(); + if (test__start_subtest("testmod_seq")) + subtest_testmod_seq_iters(); } diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c new file mode 100644 index 000000000000..3873fb6c292a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include +#include "bpf_misc.h" + +struct bpf_iter_testmod_seq { + u64 :64; + u64 :64; +}; + +extern int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt) __ksym; +extern s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq *it) __ksym; +extern void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it) __ksym; + +const volatile __s64 exp_empty = 0 + 1; +__s64 res_empty; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_empty(const void *ctx) +{ + __s64 sum = 0, *i; + + bpf_for_each(testmod_seq, i, 1000, 0) sum += *i; + res_empty = 1 + sum; + + return 0; +} + +const volatile __s64 exp_full = 1000000; +__s64 res_full; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_full(const void *ctx) +{ + __s64 sum = 0, *i; + + bpf_for_each(testmod_seq, i, 1000, 1000) sum += *i; + res_full = sum; + + return 0; +} + +const volatile __s64 exp_truncated = 10 * 1000000; +__s64 res_truncated; + +static volatile int zero = 0; + +SEC("raw_tp/sys_enter") +__success __log_level(2) +__msg("fp-16_w=iter_testmod_seq(ref_id=1,state=active,depth=0)") +__msg("fp-16=iter_testmod_seq(ref_id=1,state=drained,depth=0)") +__msg("call bpf_iter_testmod_seq_destroy") +int testmod_seq_truncated(const void *ctx) +{ + __s64 sum = 0, *i; + int cnt = zero; + + bpf_for_each(testmod_seq, i, 10, 2000000) { + sum += *i; + cnt++; + if (cnt >= 1000000) + break; + } + res_truncated = sum; + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From a6865576317f6249f3f83cf4c10ab56e627ee153 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 8 Mar 2023 22:02:44 -0800 Subject: selftests/bpf: Fix flaky fib_lookup test There is a report that fib_lookup test is flaky when running in parallel. A symptom of slowness or delay. An example: Testing IPv6 stale neigh set_lookup_params:PASS:inet_pton(IPV6_IFACE_ADDR) 0 nsec test_fib_lookup:PASS:bpf_prog_test_run_opts 0 nsec test_fib_lookup:FAIL:fib_lookup_ret unexpected fib_lookup_ret: actual 0 != expected 7 test_fib_lookup:FAIL:dmac not match unexpected dmac not match: actual 1 != expected 0 dmac expected 11:11:11:11:11:11 actual 00:00:00:00:00:00 [ Note that the "fib_lookup_ret unexpected fib_lookup_ret actual 0 ..." is reversed in terms of expected and actual value. Fixing in this patch also. ] One possibility is the testing stale neigh entry was marked dead by the gc (in neigh_periodic_work). The default gc_stale_time sysctl is 60s. This patch increases it to 15 mins. It also: - fixes the reversed arg (actual vs expected) in one of the ASSERT_EQ test - removes the nodad command arg when adding v4 neigh entry which currently has a warning. Fixes: 168de0233586 ("selftests/bpf: Add bpf_fib_lookup test") Reported-by: Alexei Starovoitov Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230309060244.3242491-1-martin.lau@linux.dev --- tools/testing/selftests/bpf/prog_tests/fib_lookup.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index 429393caf612..a1e712105811 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -54,11 +54,19 @@ static int setup_netns(void) SYS(fail, "ip link add veth1 type veth peer name veth2"); SYS(fail, "ip link set dev veth1 up"); + err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900"); + if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)")) + goto fail; + + err = write_sysctl("/proc/sys/net/ipv6/neigh/veth1/gc_stale_time", "900"); + if (!ASSERT_OK(err, "write_sysctl(net.ipv6.neigh.veth1.gc_stale_time)")) + goto fail; + SYS(fail, "ip addr add %s/64 dev veth1 nodad", IPV6_IFACE_ADDR); SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV6_NUD_FAILED_ADDR); SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV6_NUD_STALE_ADDR, DMAC); - SYS(fail, "ip addr add %s/24 dev veth1 nodad", IPV4_IFACE_ADDR); + SYS(fail, "ip addr add %s/24 dev veth1", IPV4_IFACE_ADDR); SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); @@ -158,7 +166,7 @@ void test_fib_lookup(void) if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) continue; - ASSERT_EQ(tests[i].expected_ret, skel->bss->fib_lookup_ret, + ASSERT_EQ(skel->bss->fib_lookup_ret, tests[i].expected_ret, "fib_lookup_ret"); ret = memcmp(tests[i].dmac, fib_params->dmac, sizeof(tests[i].dmac)); -- cgit v1.2.3-70-g09d2 From 27a36bc3cdd5e0420eea90762d69bea34daf97e1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 9 Mar 2023 18:32:40 +0100 Subject: selftests/bpf: Use ifname instead of ifindex in XDP compliance test tool Rely on interface name instead of interface index in error messages or logs from XDP compliance test tool. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/7dc5a8ff56c252b1a7ae29b059d0b2b1543c8b5d.1678382940.git.lorenzo@kernel.org --- tools/testing/selftests/bpf/xdp_features.c | 44 ++++++++++++++++++------------ 1 file changed, 27 insertions(+), 17 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xdp_features.c b/tools/testing/selftests/bpf/xdp_features.c index fce12165213b..b060a0d24e44 100644 --- a/tools/testing/selftests/bpf/xdp_features.c +++ b/tools/testing/selftests/bpf/xdp_features.c @@ -25,6 +25,7 @@ static struct env { bool verbosity; + char ifname[IF_NAMESIZE]; int ifindex; bool is_tester; struct { @@ -179,7 +180,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) env.ifindex = if_nametoindex(arg); if (!env.ifindex) env.ifindex = strtoul(arg, NULL, 0); - if (!env.ifindex) { + if (!env.ifindex || !if_indextoname(env.ifindex, env.ifname)) { fprintf(stderr, "Bad interface index or name (%d): %s\n", errno, strerror(errno)); @@ -205,6 +206,7 @@ static void set_env_default(void) env.feature.drv_feature = NETDEV_XDP_ACT_NDO_XMIT; env.feature.action = -EINVAL; env.ifindex = -ENODEV; + strcpy(env.ifname, "unknown"); make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_CTRL_PORT, &env.dut_ctrl_addr, NULL); make_sockaddr(AF_INET6, "::ffff:127.0.0.1", DUT_ECHO_PORT, @@ -248,15 +250,18 @@ static int dut_run_echo_thread(pthread_t *t, int *sockfd) sockfd = start_reuseport_server(AF_INET6, SOCK_DGRAM, NULL, DUT_ECHO_PORT, 0, 1); if (!sockfd) { - fprintf(stderr, "Failed to create echo socket\n"); + fprintf(stderr, + "Failed creating data UDP socket on device %s\n", + env.ifname); return -errno; } /* start echo channel */ err = pthread_create(t, NULL, dut_echo_thread, sockfd); if (err) { - fprintf(stderr, "Failed creating dut_echo thread: %s\n", - strerror(-err)); + fprintf(stderr, + "Failed creating data UDP thread on device %s: %s\n", + env.ifname, strerror(-err)); free_fds(sockfd, 1); return -EINVAL; } @@ -320,9 +325,8 @@ static int dut_attach_xdp_prog(struct xdp_features *skel, int flags) err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL); if (err) - fprintf(stderr, - "Failed to attach XDP program to ifindex %d\n", - env.ifindex); + fprintf(stderr, "Failed attaching XDP program to device %s\n", + env.ifname); return err; } @@ -358,13 +362,16 @@ static int dut_run(struct xdp_features *skel) sockfd = start_reuseport_server(AF_INET6, SOCK_STREAM, NULL, DUT_CTRL_PORT, 0, 1); if (!sockfd) { - fprintf(stderr, "Failed to create DUT socket\n"); + fprintf(stderr, + "Failed creating control socket on device %s\n", env.ifname); return -errno; } ctrl_sockfd = accept(*sockfd, (struct sockaddr *)&ctrl_addr, &addrlen); if (ctrl_sockfd < 0) { - fprintf(stderr, "Failed to accept connection on DUT socket\n"); + fprintf(stderr, + "Failed accepting connections on device %s control socket\n", + env.ifname); free_fds(sockfd, 1); return -errno; } @@ -422,8 +429,8 @@ static int dut_run(struct xdp_features *skel) &opts); if (err) { fprintf(stderr, - "Failed to query XDP cap for ifindex %d\n", - env.ifindex); + "Failed querying XDP cap for device %s\n", + env.ifname); goto end_thread; } @@ -540,7 +547,9 @@ static int send_echo_msg(void) sockfd = socket(AF_INET6, SOCK_DGRAM, 0); if (sockfd < 0) { - fprintf(stderr, "Failed to create echo socket\n"); + fprintf(stderr, + "Failed creating data UDP socket on device %s\n", + env.ifname); return -errno; } @@ -596,8 +605,8 @@ static int tester_run(struct xdp_features *skel) err = bpf_xdp_attach(env.ifindex, bpf_program__fd(prog), flags, NULL); if (err) { - fprintf(stderr, "Failed to attach XDP program to ifindex %d\n", - env.ifindex); + fprintf(stderr, "Failed attaching XDP program to device %s\n", + env.ifname); goto out; } @@ -653,7 +662,7 @@ int main(int argc, char **argv) return err; if (env.ifindex < 0) { - fprintf(stderr, "Invalid ifindex\n"); + fprintf(stderr, "Invalid device name %s\n", env.ifname); return -ENODEV; } @@ -684,11 +693,12 @@ int main(int argc, char **argv) if (env.is_tester) { /* Tester */ - fprintf(stdout, "Starting tester on device %d\n", env.ifindex); + fprintf(stdout, "Starting tester service on device %s\n", + env.ifname); err = tester_run(skel); } else { /* DUT */ - fprintf(stdout, "Starting DUT on device %d\n", env.ifindex); + fprintf(stdout, "Starting test on device %s\n", env.ifname); err = dut_run(skel); } -- cgit v1.2.3-70-g09d2 From c1cd734c1bb3f4d9db75c51c23306e29d8749783 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 9 Mar 2023 18:32:41 +0100 Subject: selftests/bpf: Improve error logs in XDP compliance test tool Improve some error logs reported in the XDP compliance test tool. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/212fc5bd214ff706f6ef1acbe7272cf4d803ca9c.1678382940.git.lorenzo@kernel.org --- tools/testing/selftests/bpf/xdp_features.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xdp_features.c b/tools/testing/selftests/bpf/xdp_features.c index b060a0d24e44..b449788fbd39 100644 --- a/tools/testing/selftests/bpf/xdp_features.c +++ b/tools/testing/selftests/bpf/xdp_features.c @@ -152,20 +152,26 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case 'D': if (make_sockaddr(AF_INET6, arg, DUT_ECHO_PORT, &env.dut_addr, NULL)) { - fprintf(stderr, "Invalid DUT address: %s\n", arg); + fprintf(stderr, + "Invalid address assigned to the Device Under Test: %s\n", + arg); return ARGP_ERR_UNKNOWN; } break; case 'C': if (make_sockaddr(AF_INET6, arg, DUT_CTRL_PORT, &env.dut_ctrl_addr, NULL)) { - fprintf(stderr, "Invalid DUT CTRL address: %s\n", arg); + fprintf(stderr, + "Invalid address assigned to the Device Under Test: %s\n", + arg); return ARGP_ERR_UNKNOWN; } break; case 'T': if (make_sockaddr(AF_INET6, arg, 0, &env.tester_addr, NULL)) { - fprintf(stderr, "Invalid Tester address: %s\n", arg); + fprintf(stderr, + "Invalid address assigned to the Tester device: %s\n", + arg); return ARGP_ERR_UNKNOWN; } break; @@ -454,7 +460,8 @@ static int dut_run(struct xdp_features *skel) &key, sizeof(key), &val, sizeof(val), 0); if (err) { - fprintf(stderr, "bpf_map_lookup_elem failed\n"); + fprintf(stderr, + "bpf_map_lookup_elem failed (%d)\n", err); goto end_thread; } @@ -496,7 +503,7 @@ static bool tester_collect_detected_cap(struct xdp_features *skel, err = bpf_map__lookup_elem(skel->maps.stats, &key, sizeof(key), &val, sizeof(val), 0); if (err) { - fprintf(stderr, "bpf_map_lookup_elem failed\n"); + fprintf(stderr, "bpf_map_lookup_elem failed (%d)\n", err); return false; } @@ -574,7 +581,8 @@ static int tester_run(struct xdp_features *skel) sockfd = socket(AF_INET6, SOCK_STREAM, 0); if (sockfd < 0) { - fprintf(stderr, "Failed to create tester socket\n"); + fprintf(stderr, + "Failed creating tester service control socket\n"); return -errno; } @@ -584,7 +592,8 @@ static int tester_run(struct xdp_features *skel) err = connect(sockfd, (struct sockaddr *)&env.dut_ctrl_addr, sizeof(env.dut_ctrl_addr)); if (err) { - fprintf(stderr, "Failed to connect to the DUT\n"); + fprintf(stderr, + "Failed connecting to the Device Under Test control socket\n"); return -errno; } -- cgit v1.2.3-70-g09d2 From 63d78b7e8ca2d0eb8c687a355fa19d01b6fcc723 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 9 Mar 2023 17:24:10 -0800 Subject: selftests/bpf: Workaround verification failure for fexit_bpf2bpf/func_replace_return_code With latest llvm17, selftest fexit_bpf2bpf/func_replace_return_code has the following verification failure: 0: R1=ctx(off=0,imm=0) R10=fp0 ; int connect_v4_prog(struct bpf_sock_addr *ctx) 0: (bf) r7 = r1 ; R1=ctx(off=0,imm=0) R7_w=ctx(off=0,imm=0) 1: (b4) w6 = 0 ; R6_w=0 ; memset(&tuple.ipv4.saddr, 0, sizeof(tuple.ipv4.saddr)); ... ; return do_bind(ctx) ? 1 : 0; 179: (bf) r1 = r7 ; R1=ctx(off=0,imm=0) R7=ctx(off=0,imm=0) 180: (85) call pc+147 Func#3 is global and valid. Skipping. 181: R0_w=scalar() 181: (bc) w6 = w0 ; R0_w=scalar() R6_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 182: (05) goto pc-129 ; } 54: (bc) w0 = w6 ; R0_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R6_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 55: (95) exit At program exit the register R0 has value (0x0; 0xffffffff) should have been in (0x0; 0x1) processed 281 insns (limit 1000000) max_states_per_insn 1 total_states 26 peak_states 26 mark_read 13 -- END PROG LOAD LOG -- libbpf: prog 'connect_v4_prog': failed to load: -22 The corresponding source code: __attribute__ ((noinline)) int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; sa.sin_family = AF_INET; sa.sin_port = bpf_htons(0); sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4); if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) return 0; return 1; } ... SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { ... return do_bind(ctx) ? 1 : 0; } Insn 180 is a call to 'do_bind'. The call's return value is also the return value for the program. Since do_bind() returns 0/1, so it is legitimate for compiler to optimize 'return do_bind(ctx) ? 1 : 0' to 'return do_bind(ctx)'. However, such optimization breaks verifier as the return value of 'do_bind()' is marked as any scalar which violates the requirement of prog return value 0/1. There are two ways to fix this problem, (1) changing 'return 1' in do_bind() to e.g. 'return 10' so the compiler has to do 'do_bind(ctx) ? 1 :0', or (2) suggested by Andrii, marking do_bind() with __weak attribute so the compiler cannot make any assumption on do_bind() return value. This patch adopted adding __weak approach which is simpler and more resistant to potential compiler optimizations. Suggested-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230310012410.2920570-1-yhs@fb.com --- tools/testing/selftests/bpf/progs/connect4_prog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index ec25371de789..7ef49ec04838 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -32,7 +32,7 @@ #define IFNAMSIZ 16 #endif -__attribute__ ((noinline)) +__attribute__ ((noinline)) __weak int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; -- cgit v1.2.3-70-g09d2 From 2498e6231bfd44f8f85afbc838b37441551a4028 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 21:40:12 -0800 Subject: selftests/bpf: prevent unused variable warning in bpf_for() Add __attribute__((unused)) to inner __p variable inside bpf_for(), bpf_for_each(), and bpf_repeat() macros to avoid compiler warnings about unused variable. Reported-by: Tejun Heo Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230309054015.4068562-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 43b154a639e7..c95eb603403c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -115,7 +115,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ cleanup(bpf_iter_##type##_destroy))), \ /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ - *___p = (bpf_iter_##type##_new(&___it, ##args), \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_##type##_destroy, (void *)0); \ @@ -143,7 +144,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ cleanup(bpf_iter_num_destroy))), \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p = (bpf_iter_num_new(&___it, (start), (end)), \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_num_destroy, (void *)0); \ @@ -167,7 +169,8 @@ extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ cleanup(bpf_iter_num_destroy))), \ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p = (bpf_iter_num_new(&___it, 0, (N)), \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ (void)bpf_iter_num_destroy, (void *)0); \ -- cgit v1.2.3-70-g09d2 From 713461b895ef958ef444b00cc2d979f3ca3a82e2 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 21:40:13 -0800 Subject: selftests/bpf: add __sink() macro to fake variable consumption Add __sink(expr) macro that forces compiler to believe that passed in expression is both read and written. It used a simple embedded asm for this. This is useful in a lot of tests where we assign value to some variable to trigger some action, but later don't read variable, causing compiler to complain (if corresponding compiler warnings are turned on, which we'll do in the next patch). Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230309054015.4068562-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index c95eb603403c..3c03ec8056ce 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -76,6 +76,9 @@ #define FUNC_REG_ARG_CNT 5 #endif +/* make it look to compiler like value is read and written */ +#define __sink(expr) asm volatile("" : "+g"(expr)) + struct bpf_iter_num; extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; -- cgit v1.2.3-70-g09d2 From c8ed66859397237c649998c58a68a86b8ea5f417 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 21:40:14 -0800 Subject: selftests/bpf: fix lots of silly mistakes pointed out by compiler Once we enable -Wall for BPF sources, compiler will complain about lots of unused variables, variables that are set but never read, etc. Fix all these issues first before enabling -Wall in Makefile. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230309054015.4068562-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_iter_ksym.c | 1 - .../selftests/bpf/progs/bpf_iter_setsockopt.c | 1 - tools/testing/selftests/bpf/progs/bpf_loop.c | 2 - tools/testing/selftests/bpf/progs/cb_refs.c | 1 - .../bpf/progs/cgroup_skb_sk_lookup_kern.c | 1 - .../selftests/bpf/progs/cgrp_kfunc_failure.c | 1 + .../selftests/bpf/progs/cgrp_ls_attach_cgroup.c | 1 - .../selftests/bpf/progs/cgrp_ls_sleepable.c | 1 - tools/testing/selftests/bpf/progs/core_kern.c | 2 +- .../testing/selftests/bpf/progs/cpumask_failure.c | 3 ++ .../testing/selftests/bpf/progs/cpumask_success.c | 1 - tools/testing/selftests/bpf/progs/dynptr_fail.c | 5 ++- tools/testing/selftests/bpf/progs/dynptr_success.c | 5 +-- tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c | 2 - .../selftests/bpf/progs/freplace_attach_probe.c | 2 +- tools/testing/selftests/bpf/progs/iters.c | 11 +++--- tools/testing/selftests/bpf/progs/linked_funcs1.c | 3 ++ tools/testing/selftests/bpf/progs/linked_funcs2.c | 3 ++ tools/testing/selftests/bpf/progs/linked_list.c | 4 -- .../testing/selftests/bpf/progs/linked_list_fail.c | 1 - tools/testing/selftests/bpf/progs/local_storage.c | 1 - tools/testing/selftests/bpf/progs/map_kptr.c | 3 -- tools/testing/selftests/bpf/progs/netcnt_prog.c | 1 - .../selftests/bpf/progs/netif_receive_skb.c | 1 - tools/testing/selftests/bpf/progs/perfbuf_bench.c | 1 - tools/testing/selftests/bpf/progs/pyperf.h | 2 +- .../bpf/progs/rbtree_btf_fail__wrong_node_type.c | 11 ------ tools/testing/selftests/bpf/progs/rbtree_fail.c | 3 +- tools/testing/selftests/bpf/progs/rcu_read_lock.c | 4 -- .../bpf/progs/read_bpf_task_storage_busy.c | 1 - tools/testing/selftests/bpf/progs/recvmsg4_prog.c | 2 - tools/testing/selftests/bpf/progs/recvmsg6_prog.c | 2 - tools/testing/selftests/bpf/progs/sendmsg4_prog.c | 2 - .../selftests/bpf/progs/sockmap_verdict_prog.c | 4 ++ tools/testing/selftests/bpf/progs/strobemeta.h | 1 - .../selftests/bpf/progs/tailcall_bpf2bpf3.c | 11 ++++++ .../selftests/bpf/progs/tailcall_bpf2bpf6.c | 3 ++ .../selftests/bpf/progs/task_kfunc_failure.c | 1 + .../selftests/bpf/progs/task_kfunc_success.c | 6 --- tools/testing/selftests/bpf/progs/test_bpf_nf.c | 1 - .../selftests/bpf/progs/test_cls_redirect_dynptr.c | 1 - .../bpf/progs/test_core_reloc_bitfields_probed.c | 1 - .../selftests/bpf/progs/test_global_func1.c | 4 ++ .../selftests/bpf/progs/test_global_func2.c | 4 ++ .../selftests/bpf/progs/test_hash_large_key.c | 2 +- .../bpf/progs/test_ksyms_btf_write_check.c | 1 - .../selftests/bpf/progs/test_legacy_printk.c | 2 +- tools/testing/selftests/bpf/progs/test_map_lock.c | 2 +- tools/testing/selftests/bpf/progs/test_obj_id.c | 2 + .../selftests/bpf/progs/test_parse_tcp_hdr_opt.c | 1 - .../bpf/progs/test_parse_tcp_hdr_opt_dynptr.c | 2 +- .../testing/selftests/bpf/progs/test_pkt_access.c | 5 +++ tools/testing/selftests/bpf/progs/test_ringbuf.c | 1 - .../selftests/bpf/progs/test_ringbuf_map_key.c | 1 + .../selftests/bpf/progs/test_ringbuf_multi.c | 1 - .../bpf/progs/test_select_reuseport_kern.c | 2 +- tools/testing/selftests/bpf/progs/test_sk_assign.c | 4 +- tools/testing/selftests/bpf/progs/test_sk_lookup.c | 9 +---- .../selftests/bpf/progs/test_sk_lookup_kern.c | 2 - .../testing/selftests/bpf/progs/test_sock_fields.c | 2 +- .../selftests/bpf/progs/test_sockmap_kern.h | 14 +++++-- tools/testing/selftests/bpf/progs/test_spin_lock.c | 3 ++ tools/testing/selftests/bpf/progs/test_tc_dtime.c | 4 +- tools/testing/selftests/bpf/progs/test_tc_neigh.c | 4 +- .../testing/selftests/bpf/progs/test_tcpbpf_kern.c | 2 - .../testing/selftests/bpf/progs/test_tunnel_kern.c | 6 --- .../selftests/bpf/progs/test_usdt_multispec.c | 2 - .../selftests/bpf/progs/test_verif_scale1.c | 2 +- .../selftests/bpf/progs/test_verif_scale2.c | 2 +- .../selftests/bpf/progs/test_verif_scale3.c | 2 +- .../bpf/progs/test_xdp_adjust_tail_grow.c | 2 - .../testing/selftests/bpf/progs/test_xdp_bpf2bpf.c | 2 - .../testing/selftests/bpf/progs/test_xdp_dynptr.c | 2 - .../selftests/bpf/progs/test_xdp_noinline.c | 43 ---------------------- tools/testing/selftests/bpf/progs/test_xdp_vlan.c | 13 ------- tools/testing/selftests/bpf/progs/type_cast.c | 1 - tools/testing/selftests/bpf/progs/udp_limit.c | 2 - .../selftests/bpf/progs/user_ringbuf_success.c | 6 --- tools/testing/selftests/bpf/progs/xdp_features.c | 1 - tools/testing/selftests/bpf/progs/xdping_kern.c | 2 - tools/testing/selftests/bpf/progs/xdpwall.c | 1 - 81 files changed, 90 insertions(+), 187 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c index 9ba14c37bbcc..5ddcc46fd886 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ksym.c @@ -33,7 +33,6 @@ int dump_ksym(struct bpf_iter__ksym *ctx) __u32 seq_num = ctx->meta->seq_num; unsigned long value; char type; - int ret; if (!iter) return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c index b77adfd55d73..ec7f91850dec 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c @@ -42,7 +42,6 @@ int change_tcp_cc(struct bpf_iter__tcp *ctx) char cur_cc[TCP_CA_NAME_MAX]; struct tcp_sock *tp; struct sock *sk; - int ret; if (!bpf_tcp_sk(ctx->sk_common)) return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_loop.c b/tools/testing/selftests/bpf/progs/bpf_loop.c index de1fc82d2710..1d194455b109 100644 --- a/tools/testing/selftests/bpf/progs/bpf_loop.c +++ b/tools/testing/selftests/bpf/progs/bpf_loop.c @@ -138,8 +138,6 @@ static int callback_set_0f(int i, void *ctx) SEC("fentry/" SYS_PREFIX "sys_nanosleep") int prog_non_constant_callback(void *ctx) { - struct callback_ctx data = {}; - if (bpf_get_current_pid_tgid() >> 32 != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index ce96b33e38d6..50f95ec61165 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -52,7 +52,6 @@ int leak_prog(void *ctx) { struct prog_test_ref_kfunc *p; struct map_value *v; - unsigned long sl; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) diff --git a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c index 88638315c582..ac86a8a61605 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c @@ -66,7 +66,6 @@ static inline int is_allowed_peer_cg(struct __sk_buff *skb, SEC("cgroup_skb/ingress") int ingress_lookup(struct __sk_buff *skb) { - __u32 serv_port_key = 0; struct ipv6hdr ip6h; struct tcphdr tcph; diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index b42291ed9586..807fb0ac41e9 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -109,6 +109,7 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat acquired = bpf_cgroup_acquire(cgrp); /* Acquired cgroup is never released. */ + __sink(acquired); return 0; } diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c index 6652d18465b2..8aeba1b75c83 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c @@ -84,7 +84,6 @@ int BPF_PROG(update_cookie_tracing, struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct socket_cookie *p; - struct tcp_sock *tcp_sk; if (uaddr->sa_family != AF_INET6) return 0; diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c index 7615dc23d301..4c7844e1dbfa 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c @@ -24,7 +24,6 @@ void bpf_rcu_read_unlock(void) __ksym; SEC("?iter.s/cgroup") int cgroup_iter(struct bpf_iter__cgroup *ctx) { - struct seq_file *seq = ctx->meta->seq; struct cgroup *cgrp = ctx->cgroup; long *ptr; diff --git a/tools/testing/selftests/bpf/progs/core_kern.c b/tools/testing/selftests/bpf/progs/core_kern.c index 2715fe27d4cf..004f2acef2eb 100644 --- a/tools/testing/selftests/bpf/progs/core_kern.c +++ b/tools/testing/selftests/bpf/progs/core_kern.c @@ -77,7 +77,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index c16f7563b84e..cfe83f0ef9e2 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -23,6 +23,7 @@ int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags) struct bpf_cpumask *cpumask; cpumask = create_cpumask(); + __sink(cpumask); /* cpumask is never released. */ return 0; @@ -51,6 +52,7 @@ int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_fla /* Can't acquire a non-struct bpf_cpumask. */ cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr); + __sink(cpumask); return 0; } @@ -63,6 +65,7 @@ int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags) /* Can't set the CPU of a non-struct bpf_cpumask. */ bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr); + __sink(cpumask); return 0; } diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 1d38bc65d4b0..97ed08c4ff03 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -353,7 +353,6 @@ SEC("tp_btf/task_newtask") int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; cpumask = create_cpumask(); if (!cpumask) diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 20ce920d891d..759eb5c245cd 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -271,7 +271,7 @@ SEC("?raw_tp") __failure __msg("value is outside of the allowed memory range") int data_slice_out_of_bounds_map_value(void *ctx) { - __u32 key = 0, map_val; + __u32 map_val; struct bpf_dynptr ptr; void *data; @@ -388,7 +388,6 @@ int data_slice_missing_null_check2(void *ctx) /* this should fail */ *data2 = 3; -done: bpf_ringbuf_discard_dynptr(&ptr, 0); return 0; } @@ -440,6 +439,7 @@ int invalid_write1(void *ctx) /* this should fail */ data = bpf_dynptr_data(&ptr, 0, 1); + __sink(data); return 0; } @@ -1374,6 +1374,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) * changing packet data */ hdr = bpf_dynptr_slice_rdwr(&ptr, 0, buffer, sizeof(buffer)); + __sink(hdr); return 0; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index c8358a7c7924..b2fa6c47ecc0 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -35,7 +35,7 @@ SEC("?tp/syscalls/sys_enter_nanosleep") int test_read_write(void *ctx) { char write_data[64] = "hello there, world!!"; - char read_data[64] = {}, buf[64] = {}; + char read_data[64] = {}; struct bpf_dynptr ptr; int i; @@ -170,7 +170,6 @@ int test_skb_readonly(struct __sk_buff *skb) { __u8 write_data[2] = {1, 2}; struct bpf_dynptr ptr; - __u64 *data; int ret; if (bpf_dynptr_from_skb(skb, 0, &ptr)) { @@ -191,10 +190,8 @@ int test_skb_readonly(struct __sk_buff *skb) SEC("?cgroup_skb/egress") int test_dynptr_skb_data(struct __sk_buff *skb) { - __u8 write_data[2] = {1, 2}; struct bpf_dynptr ptr; __u64 *data; - int ret; if (bpf_dynptr_from_skb(skb, 0, &ptr)) { err = 1; diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c index 4547b059d487..983b7c233382 100644 --- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -120,8 +120,6 @@ int new_get_skb_ifindex(int val, struct __sk_buff *skb, int var) void *data = (void *)(long)skb->data; struct ipv6hdr ip6, *ip6p; int ifindex = skb->ifindex; - __u32 eth_proto; - __u32 nh_off; /* check that BPF extension can read packet via direct packet access */ if (data + 14 + sizeof(ip6) > data_end) diff --git a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c index bb2a77c5b62b..370a0e1922e0 100644 --- a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c +++ b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c @@ -23,7 +23,7 @@ struct { SEC("freplace/handle_kprobe") int new_handle_kprobe(struct pt_regs *ctx) { - struct hmap_elem zero = {}, *val; + struct hmap_elem *val; int key = 0; val = bpf_map_lookup_elem(&hash_map, &key); diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 84e5dc10243c..6b9b3c56f009 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -45,7 +45,6 @@ __failure __msg("unbounded memory access") int iter_err_unsafe_asm_loop(const void *ctx) { struct bpf_iter_num it; - int *v, i = 0; MY_PID_GUARD(); @@ -88,7 +87,7 @@ __success int iter_while_loop(const void *ctx) { struct bpf_iter_num it; - int *v, i; + int *v; MY_PID_GUARD(); @@ -106,7 +105,7 @@ __success int iter_while_loop_auto_cleanup(const void *ctx) { __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it; - int *v, i; + int *v; MY_PID_GUARD(); @@ -124,7 +123,7 @@ __success int iter_for_loop(const void *ctx) { struct bpf_iter_num it; - int *v, i; + int *v; MY_PID_GUARD(); @@ -192,7 +191,7 @@ __success int iter_manual_unroll_loop(const void *ctx) { struct bpf_iter_num it; - int *v, i; + int *v; MY_PID_GUARD(); @@ -621,7 +620,7 @@ __success int iter_stack_array_loop(const void *ctx) { long arr1[16], arr2[16], sum = 0; - int *v, i; + int i; MY_PID_GUARD(); diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c index b05571bc67d5..c4b49ceea967 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs1.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c @@ -5,6 +5,7 @@ #include #include #include +#include "bpf_misc.h" /* weak and shared between two files */ const volatile int my_tid __weak; @@ -51,6 +52,7 @@ __weak int set_output_weak(int x) * cause problems for BPF static linker */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); output_weak1 = x; return x; @@ -71,6 +73,7 @@ int BPF_PROG(handler1, struct pt_regs *regs, long id) /* make sure we have CO-RE relocations in main program */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); set_output_val2(1000); set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */ diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c index ee7e3848ee4f..013ff0645f0c 100644 --- a/tools/testing/selftests/bpf/progs/linked_funcs2.c +++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c @@ -5,6 +5,7 @@ #include #include #include +#include "bpf_misc.h" /* weak and shared between both files */ const volatile int my_tid __weak; @@ -51,6 +52,7 @@ __weak int set_output_weak(int x) * cause problems for BPF static linker */ whatever = 2 * bpf_core_type_size(struct task_struct); + __sink(whatever); output_weak2 = x; return 2 * x; @@ -71,6 +73,7 @@ int BPF_PROG(handler2, struct pt_regs *regs, long id) /* make sure we have CO-RE relocations in main program */ whatever = bpf_core_type_size(struct task_struct); + __sink(whatever); set_output_val1(2000); set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */ diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 4fa4a9b01bde..53ded51a3abb 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -313,7 +313,6 @@ SEC("tc") int map_list_push_pop_multiple(void *ctx) { struct map_value *v; - int ret; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) @@ -326,7 +325,6 @@ int inner_map_list_push_pop_multiple(void *ctx) { struct map_value *v; void *map; - int ret; map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); if (!map) @@ -352,7 +350,6 @@ SEC("tc") int map_list_in_list(void *ctx) { struct map_value *v; - int ret; v = bpf_map_lookup_elem(&array_map, &(int){0}); if (!v) @@ -365,7 +362,6 @@ int inner_map_list_in_list(void *ctx) { struct map_value *v; void *map; - int ret; map = bpf_map_lookup_elem(&map_of_maps, &(int){0}); if (!map) diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index 69cdc07cba13..41978b46f58e 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -557,7 +557,6 @@ SEC("?tc") int incorrect_head_off2(void *ctx) { struct foo *f; - struct bar *b; f = bpf_obj_new(typeof(*f)); if (!f) diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index 19423ed862e3..01c74bc870ae 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -77,7 +77,6 @@ int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; int err; diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index 3903d30217b8..dae5dab1bbf7 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -515,7 +515,6 @@ int test_ls_map_kptr_ref1(void *ctx) { struct task_struct *current; struct map_value *v; - int ret; current = bpf_get_current_task_btf(); if (!current) @@ -534,7 +533,6 @@ int test_ls_map_kptr_ref2(void *ctx) { struct task_struct *current; struct map_value *v; - int ret; current = bpf_get_current_task_btf(); if (!current) @@ -550,7 +548,6 @@ int test_ls_map_kptr_ref_del(void *ctx) { struct task_struct *current; struct map_value *v; - int ret; current = bpf_get_current_task_btf(); if (!current) diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c index f718b2c212dc..f9ef8aee56f1 100644 --- a/tools/testing/selftests/bpf/progs/netcnt_prog.c +++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c @@ -26,7 +26,6 @@ SEC("cgroup/skb") int bpf_nextcnt(struct __sk_buff *skb) { union percpu_net_cnt *percpu_cnt; - char fmt[] = "%d %llu %llu\n"; union net_cnt *cnt; __u64 ts, dt; int ret; diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c index 1d8918dfbd3f..c0062645fc68 100644 --- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c +++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c @@ -53,7 +53,6 @@ static int __strncmp(const void *m1, const void *m2, size_t len) do { \ static const char _expectedval[EXPECTED_STRSIZE] = \ _expected; \ - static const char _ptrtype[64] = #_type; \ __u64 _hflags = _flags | BTF_F_COMPACT; \ static _type _ptrdata = __VA_ARGS__; \ static struct btf_ptr _ptr = { }; \ diff --git a/tools/testing/selftests/bpf/progs/perfbuf_bench.c b/tools/testing/selftests/bpf/progs/perfbuf_bench.c index 45204fe0c570..29c1639fc78a 100644 --- a/tools/testing/selftests/bpf/progs/perfbuf_bench.c +++ b/tools/testing/selftests/bpf/progs/perfbuf_bench.c @@ -22,7 +22,6 @@ long dropped __attribute__((aligned(128))) = 0; SEC("fentry/" SYS_PREFIX "sys_getpgid") int bench_perfbuf(void *ctx) { - __u64 *sample; int i; for (i = 0; i < batch_cnt; i++) { diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index f2e7a31c8d75..026d573ce179 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -345,7 +345,7 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) SEC("raw_tracepoint/kfree_skb") int on_event(struct bpf_raw_tracepoint_args* ctx) { - int i, ret = 0; + int ret = 0; ret |= __on_event(ctx); ret |= __on_event(ctx); ret |= __on_event(ctx); diff --git a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c index 340f97da1084..7651843f5a80 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c +++ b/tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c @@ -16,17 +16,6 @@ struct node_data { struct bpf_list_node node; }; -static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) -{ - struct node_data *node_a; - struct node_data *node_b; - - node_a = container_of(a, struct node_data, node); - node_b = container_of(b, struct node_data, node); - - return node_a->key < node_b->key; -} - #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; private(A) struct bpf_rb_root groot __contains(node_data, node); diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index 1ced900f3fce..46d7d18a218f 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=2 alloc_insn=11") +__failure __msg("Unreleased reference id=2 alloc_insn=10") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; @@ -119,6 +119,7 @@ long rbtree_api_remove_no_drop(void *ctx) res = bpf_rbtree_remove(&groot, res); n = container_of(res, struct node_data, node); + __sink(n); bpf_spin_unlock(&glock); /* bpf_obj_drop(n) is missing here */ diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 7250bb76d18a..6a8c88e58df2 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -179,8 +179,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") int miss_lock(void *ctx) { struct task_struct *task; - struct css_set *cgroups; - struct cgroup *dfl_cgrp; /* missing bpf_rcu_read_lock() */ task = bpf_get_current_task_btf(); @@ -195,8 +193,6 @@ SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") int miss_unlock(void *ctx) { struct task_struct *task; - struct css_set *cgroups; - struct cgroup *dfl_cgrp; /* missing bpf_rcu_read_unlock() */ task = bpf_get_current_task_btf(); diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c index a47bb0120719..76556e0b42b2 100644 --- a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c +++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c @@ -23,7 +23,6 @@ SEC("raw_tp/sys_enter") int BPF_PROG(read_bpf_task_storage_busy) { int *value; - int key; if (!CONFIG_PREEMPT) return 0; diff --git a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c index 3d1ae8b3402f..59748c95471a 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg4_prog.c @@ -17,8 +17,6 @@ SEC("cgroup/recvmsg4") int recvmsg4_prog(struct bpf_sock_addr *ctx) { struct bpf_sock *sk; - __u32 user_ip4; - __u16 user_port; sk = ctx->sk; if (!sk) diff --git a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c index 27dfb21b21b4..d9a4016596d5 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg6_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg6_prog.c @@ -20,8 +20,6 @@ SEC("cgroup/recvmsg6") int recvmsg6_prog(struct bpf_sock_addr *ctx) { struct bpf_sock *sk; - __u32 user_ip4; - __u16 user_port; sk = ctx->sk; if (!sk) diff --git a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c index ea75a44cb7fc..351e79aef2fa 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg4_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg4_prog.c @@ -21,8 +21,6 @@ SEC("cgroup/sendmsg4") int sendmsg_v4_prog(struct bpf_sock_addr *ctx) { - int prio; - if (ctx->type != SOCK_DGRAM) return 0; diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c index e2468a6d01a5..0660f29dca95 100644 --- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c @@ -1,6 +1,7 @@ #include #include #include +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_SOCKMAP); @@ -40,6 +41,9 @@ int bpf_prog2(struct __sk_buff *skb) __u8 *d = data; __u8 sk, map; + __sink(lport); + __sink(rport); + if (data + 8 > data_end) return SK_DROP; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index e562be6356f3..e02cfd380746 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -391,7 +391,6 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, struct strobe_map_raw map; void *location; uint64_t len; - int i; descr->tag_len = 0; /* presume no tag is set */ descr->cnt = -1; /* presume no value is set */ diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c index 7fab39a3bb12..99c8d1d8a187 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c @@ -2,6 +2,7 @@ #include #include #include "bpf_legacy.h" +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); @@ -20,6 +21,8 @@ int subprog_tail2(struct __sk_buff *skb) else bpf_tail_call_static(skb, &jmp_table, 1); + __sink(arr[sizeof(arr) - 1]); + return skb->len; } @@ -30,6 +33,8 @@ int subprog_tail(struct __sk_buff *skb) bpf_tail_call_static(skb, &jmp_table, 0); + __sink(arr[sizeof(arr) - 1]); + return skb->len * 2; } @@ -38,6 +43,8 @@ int classifier_0(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return subprog_tail2(skb); } @@ -46,6 +53,8 @@ int classifier_1(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return skb->len * 3; } @@ -54,6 +63,8 @@ int entry(struct __sk_buff *skb) { volatile char arr[128] = {}; + __sink(arr[sizeof(arr) - 1]); + return subprog_tail(skb); } diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c index 41ce83da78e8..4a9f63bea66c 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include #include +#include "bpf_misc.h" #define __unused __attribute__((unused)) @@ -36,6 +37,8 @@ int entry(struct __sk_buff *skb) /* Have data on stack which size is not a multiple of 8 */ volatile char arr[1] = {}; + __sink(arr[0]); + return subprog_tail(skb); } diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index f19d54eda4f1..002c7f69e47f 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -109,6 +109,7 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_ acquired = bpf_task_acquire(task); /* Acquired task is never released. */ + __sink(acquired); return 0; } diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 9f359cfd29e7..aebc4bb14e7d 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -171,8 +171,6 @@ static void lookup_compare_pid(const struct task_struct *p) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) { - struct task_struct *acquired; - if (!is_test_kfunc_task()) return 0; @@ -183,8 +181,6 @@ int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags) { - struct task_struct *current, *acquired; - if (!is_test_kfunc_task()) return 0; @@ -208,8 +204,6 @@ static int is_pid_lookup_valid(s32 pid) SEC("tp_btf/task_newtask") int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags) { - struct task_struct *acquired; - if (!is_test_kfunc_task()) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 9fc603c9d673..77ad8adf68da 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -75,7 +75,6 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; struct bpf_sock_tuple bpf_tuple; struct nf_conn *ct; - int err; __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index f45a7095de7a..f41c81212ee9 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -455,7 +455,6 @@ static ret_t forward_to_next_hop(struct __sk_buff *skb, struct bpf_dynptr *dynpt static ret_t skip_next_hops(__u64 *offset, int n) { - __u32 res; switch (n) { case 1: *offset += sizeof(struct in_addr); diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c index ab1e647aeb31..b86fdda2a6ea 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c @@ -42,7 +42,6 @@ int test_core_bitfields(void *ctx) { struct core_reloc_bitfields *in = (void *)&data.in; struct core_reloc_bitfields_output *out = (void *)&data.out; - uint64_t res; out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1); out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2); diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 23970a20b324..b85fc8c423ba 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return f0(0, skb) + skb->len; } @@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func2.c b/tools/testing/selftests/bpf/progs/test_global_func2.c index 3dce97fb52a4..2beab9c3b68a 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func2.c +++ b/tools/testing/selftests/bpf/progs/test_global_func2.c @@ -18,6 +18,8 @@ int f1(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return f0(0, skb) + skb->len; } @@ -34,6 +36,8 @@ int f3(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_hash_large_key.c b/tools/testing/selftests/bpf/progs/test_hash_large_key.c index 473a22794a62..8b438128f46b 100644 --- a/tools/testing/selftests/bpf/progs/test_hash_large_key.c +++ b/tools/testing/selftests/bpf/progs/test_hash_large_key.c @@ -28,7 +28,7 @@ struct bigelement { SEC("raw_tracepoint/sys_enter") int bpf_hash_large_key_test(void *ctx) { - int zero = 0, err = 1, value = 42; + int zero = 0, value = 42; struct bigelement *key; key = bpf_map_lookup_elem(&key_map, &zero); diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c index a72a5bf3812a..27109b877714 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c @@ -35,7 +35,6 @@ SEC("raw_tp/sys_enter") int handler2(const void *ctx) { int *active; - __u32 cpu; active = bpf_this_cpu_ptr(&bpf_prog_active); write_active(active); diff --git a/tools/testing/selftests/bpf/progs/test_legacy_printk.c b/tools/testing/selftests/bpf/progs/test_legacy_printk.c index 64c2d9ced529..42718cd8e6a4 100644 --- a/tools/testing/selftests/bpf/progs/test_legacy_printk.c +++ b/tools/testing/selftests/bpf/progs/test_legacy_printk.c @@ -56,7 +56,7 @@ int handle_legacy(void *ctx) SEC("tp/raw_syscalls/sys_enter") int handle_modern(void *ctx) { - int zero = 0, cur_pid; + int cur_pid; cur_pid = bpf_get_current_pid_tgid() >> 32; if (cur_pid != my_pid_var) diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c index acf073db9e8b..1c02511b73cd 100644 --- a/tools/testing/selftests/bpf/progs/test_map_lock.c +++ b/tools/testing/selftests/bpf/progs/test_map_lock.c @@ -33,7 +33,7 @@ struct { SEC("cgroup/skb") int bpf_map_lock_test(struct __sk_buff *skb) { - struct hmap_elem zero = {}, *val; + struct hmap_elem *val; int rnd = bpf_get_prandom_u32(); int key = 0, err = 1, i; struct array_elem *q; diff --git a/tools/testing/selftests/bpf/progs/test_obj_id.c b/tools/testing/selftests/bpf/progs/test_obj_id.c index ded71b3ff6b4..2850ae788a91 100644 --- a/tools/testing/selftests/bpf/progs/test_obj_id.c +++ b/tools/testing/selftests/bpf/progs/test_obj_id.c @@ -4,6 +4,7 @@ #include #include #include +#include "bpf_misc.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -19,6 +20,7 @@ int test_obj_id(void *ctx) __u64 *value; value = bpf_map_lookup_elem(&test_map_id, &key); + __sink(value); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c index 79bab9b50e9e..d9b2ba7ac340 100644 --- a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c @@ -87,7 +87,6 @@ int xdp_ingress_v6(struct xdp_md *xdp) __u8 tcp_hdr_opt_len = 0; struct tcphdr *tcp_hdr; __u64 tcp_offset = 0; - __u32 off; int err; tcp_offset = sizeof(struct ethhdr) + sizeof(struct ipv6hdr); diff --git a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c index d3b319722e30..dc6e43bc6a62 100644 --- a/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c @@ -30,7 +30,7 @@ __u32 server_id; static int parse_hdr_opt(struct bpf_dynptr *ptr, __u32 *off, __u8 *hdr_bytes_remaining, __u32 *server_id) { - __u8 *tcp_opt, kind, hdr_len; + __u8 kind, hdr_len; __u8 buffer[sizeof(kind) + sizeof(hdr_len) + sizeof(*server_id)]; __u8 *data; diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 5cd7c096f62d..bce7173152c6 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -13,6 +13,7 @@ #include #include #include +#include "bpf_misc.h" /* llvm will optimize both subprograms into exactly the same BPF assembly * @@ -51,6 +52,8 @@ int get_skb_len(struct __sk_buff *skb) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->len; } @@ -73,6 +76,8 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var) { volatile char buf[MAX_STACK] = {}; + __sink(buf[MAX_STACK - 1]); + return skb->ifindex * val * var; } diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf.c b/tools/testing/selftests/bpf/progs/test_ringbuf.c index 5bdc0d38efc0..501cefa97633 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf.c @@ -41,7 +41,6 @@ int test_ringbuf(void *ctx) { int cur_pid = bpf_get_current_pid_tgid() >> 32; struct sample *sample; - int zero = 0; if (cur_pid != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c index 2760bf60d05a..21bb7da90ea5 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c @@ -53,6 +53,7 @@ int test_ringbuf_mem_map_key(void *ctx) /* test using 'sample' (PTR_TO_MEM | MEM_ALLOC) as map key arg */ lookup_val = (int *)bpf_map_lookup_elem(&hash_map, sample); + __sink(lookup_val); /* workaround - memcpy is necessary so that verifier doesn't * complain with: diff --git a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c index e416e0ce12b7..9626baa6779c 100644 --- a/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c +++ b/tools/testing/selftests/bpf/progs/test_ringbuf_multi.c @@ -59,7 +59,6 @@ int test_ringbuf(void *ctx) int cur_pid = bpf_get_current_pid_tgid() >> 32; struct sample *sample; void *rb; - int zero = 0; if (cur_pid != pid) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c index 7d56ed47cd4d..5eb25c6ad75b 100644 --- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c +++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c @@ -64,7 +64,7 @@ SEC("sk_reuseport") int _select_by_skb_data(struct sk_reuseport_md *reuse_md) { __u32 linum, index = 0, flags = 0, index_zero = 0; - __u32 *result_cnt, *linum_value; + __u32 *result_cnt; struct data_check data_check = {}; struct cmd *cmd, cmd_copy; void *data, *data_end; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 21b19b758c4e..3079244c7f96 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -15,6 +15,7 @@ #include #include #include +#include "bpf_misc.h" #if defined(IPROUTE2_HAVE_LIBBPF) /* Use a new-style map definition. */ @@ -57,7 +58,6 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) void *data = (void *)(long)skb->data; struct bpf_sock_tuple *result; struct ethhdr *eth; - __u64 tuple_len; __u8 proto = 0; __u64 ihl_len; @@ -94,6 +94,7 @@ get_tuple(struct __sk_buff *skb, bool *ipv4, bool *tcp) return NULL; *tcp = (proto == IPPROTO_TCP); + __sink(ihl_len); return result; } @@ -173,7 +174,6 @@ int bpf_sk_assign_test(struct __sk_buff *skb) struct bpf_sock_tuple *tuple; bool ipv4 = false; bool tcp = false; - int tuple_len; int ret = 0; tuple = get_tuple(skb, &ipv4, &tcp); diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c index 6058dcb11b36..71f844b9b902 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -391,7 +391,6 @@ SEC("sk_lookup") int ctx_narrow_access(struct bpf_sk_lookup *ctx) { struct bpf_sock *sk; - int err, family; __u32 val_u32; bool v4; @@ -645,9 +644,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) SEC("sk_lookup") int multi_prog_redir1(struct bpf_sk_lookup *ctx) { - int ret; - - ret = select_server_a(ctx); + (void)select_server_a(ctx); bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); return SK_PASS; } @@ -655,9 +652,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx) SEC("sk_lookup") int multi_prog_redir2(struct bpf_sk_lookup *ctx) { - int ret; - - ret = select_server_a(ctx); + (void)select_server_a(ctx); bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); return SK_PASS; } diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c index 6ccf6d546074..e9efc3263022 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c @@ -110,7 +110,6 @@ int err_modify_sk_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - __u32 family; sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); if (sk) { @@ -125,7 +124,6 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb) { struct bpf_sock_tuple tuple = {}; struct bpf_sock *sk; - __u32 family; sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0); sk += 1; diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index 9f4b8f9f1181..bbad3c2d9aa5 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -121,7 +121,7 @@ static void tpcpy(struct bpf_tcp_sock *dst, SEC("cgroup_skb/egress") int egress_read_sock_fields(struct __sk_buff *skb) { - struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F }; + struct bpf_spinlock_cnt cli_cnt_init = { .lock = {}, .cnt = 0xeB9F }; struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10; struct bpf_tcp_sock *tp, *tp_ret; struct bpf_sock *sk, *sk_ret; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 6c85b00f27b2..baf9ebc6d903 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -14,6 +14,7 @@ #include #include #include +#include "bpf_misc.h" /* Sockmap sample program connects a client and a backend together * using cgroups. @@ -111,12 +112,15 @@ int bpf_prog2(struct __sk_buff *skb) int len, *f, ret, zero = 0; __u64 flags = 0; + __sink(rport); if (lport == 10000) ret = 10; else ret = 1; len = (__u32)skb->data_end - (__u32)skb->data; + __sink(len); + f = bpf_map_lookup_elem(&sock_skb_opts, &zero); if (f && *f) { ret = 3; @@ -180,7 +184,6 @@ int bpf_prog3(struct __sk_buff *skb) if (err) return SK_DROP; bpf_write_pass(skb, 13); -tls_out: return ret; } @@ -188,8 +191,7 @@ SEC("sockops") int bpf_sockmap(struct bpf_sock_ops *skops) { __u32 lport, rport; - int op, err = 0, index, key, ret; - + int op, err, ret; op = (int) skops->op; @@ -228,6 +230,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops) break; } + __sink(err); + return 0; } @@ -321,6 +325,10 @@ int bpf_prog8(struct sk_msg_md *msg) } else { return SK_DROP; } + + __sink(data_end); + __sink(data); + return SK_PASS; } SEC("sk_msg4") diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index 5bd10409285b..b2440a0ff422 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -3,6 +3,7 @@ #include #include #include +#include "bpf_misc.h" struct hmap_elem { volatile int cnt; @@ -89,6 +90,8 @@ int bpf_spin_lock_test(struct __sk_buff *skb) credit = q->credit; bpf_spin_unlock(&q->lock); + __sink(credit); + /* spin_lock in cgroup local storage */ cls = bpf_get_local_storage(&cls_map, 0); bpf_spin_lock(&cls->lock); diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c index 125beec31834..74ec09f040b7 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c +++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c @@ -163,9 +163,9 @@ static int skb_get_type(struct __sk_buff *skb) ip6h = data + sizeof(struct ethhdr); if (ip6h + 1 > data_end) return -1; - if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_src)) + if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_src}})) ns = SRC_NS; - else if (v6_equal(ip6h->saddr, (struct in6_addr)ip6_dst)) + else if (v6_equal(ip6h->saddr, (struct in6_addr){{ip6_dst}})) ns = DST_NS; inet_proto = ip6h->nexthdr; trans = ip6h + 1; diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c index 3e32ea375ab4..de15155f2609 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c @@ -94,7 +94,7 @@ int tc_dst(struct __sk_buff *skb) redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src)); break; case __bpf_constant_htons(ETH_P_IPV6): - redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src); + redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_src}}); break; } @@ -119,7 +119,7 @@ int tc_src(struct __sk_buff *skb) redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst)); break; case __bpf_constant_htons(ETH_P_IPV6): - redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst); + redirect = is_remote_ep_v6(skb, (struct in6_addr){{ip6_dst}}); break; } diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 3ded05280757..cf7ed8cbb1fe 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -46,8 +46,6 @@ int bpf_testcb(struct bpf_sock_ops *skops) struct bpf_sock_ops *reuse = skops; struct tcphdr *thdr; int window_clamp = 9216; - int good_call_rv = 0; - int bad_call_rv = 0; int save_syn = 1; int rv = -1; int v = 0; diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 95b4aa0928ba..9ab2d55ab7c0 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -209,7 +209,6 @@ int erspan_get_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key; struct erspan_metadata md; - __u32 index; int ret; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); @@ -289,7 +288,6 @@ int ip4ip6erspan_get_tunnel(struct __sk_buff *skb) { struct bpf_tunnel_key key; struct erspan_metadata md; - __u32 index; int ret; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), @@ -405,8 +403,6 @@ int vxlan_get_tunnel_src(struct __sk_buff *skb) int ret; struct bpf_tunnel_key key; struct vxlan_metadata md; - __u32 orig_daddr; - __u32 index = 0; ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), BPF_F_TUNINFO_FLAGS); @@ -443,9 +439,7 @@ int veth_set_outer_dst(struct __sk_buff *skb) void *data_end = (void *)(long)skb->data_end; struct udphdr *udph; struct iphdr *iph; - __u32 index = 0; int ret = 0; - int shrink; __s64 csum; if ((void *)eth + sizeof(*eth) > data_end) { diff --git a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c index aa6de32b50d1..962f3462066a 100644 --- a/tools/testing/selftests/bpf/progs/test_usdt_multispec.c +++ b/tools/testing/selftests/bpf/progs/test_usdt_multispec.c @@ -18,8 +18,6 @@ int usdt_100_sum; SEC("usdt//proc/self/exe:test:usdt_100") int BPF_USDT(usdt_100, int x) { - long tmp; - if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale1.c b/tools/testing/selftests/bpf/progs/test_verif_scale1.c index ac6135d9374c..323a73fb2e8c 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale1.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale1.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale2.c b/tools/testing/selftests/bpf/progs/test_verif_scale2.c index f90ffcafd1e8..f5318f757084 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale2.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale2.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 14; diff --git a/tools/testing/selftests/bpf/progs/test_verif_scale3.c b/tools/testing/selftests/bpf/progs/test_verif_scale3.c index ca33a9b711c4..2e06dbb1ad5c 100644 --- a/tools/testing/selftests/bpf/progs/test_verif_scale3.c +++ b/tools/testing/selftests/bpf/progs/test_verif_scale3.c @@ -11,7 +11,7 @@ int balancer_ingress(struct __sk_buff *ctx) void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; void *ptr; - int ret = 0, nh_off, i = 0; + int nh_off, i = 0; nh_off = 32; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c index 297c260fc364..81bb38d72ced 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c @@ -5,8 +5,6 @@ SEC("xdp") int _xdp_adjust_tail_grow(struct xdp_md *xdp) { - void *data_end = (void *)(long)xdp->data_end; - void *data = (void *)(long)xdp->data; int data_len = bpf_xdp_get_buff_len(xdp); int offset = 0; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) */ diff --git a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c index 3379d303f41a..ee48c4963971 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c @@ -45,8 +45,6 @@ SEC("fentry/FUNC") int BPF_PROG(trace_on_entry, struct xdp_buff *xdp) { struct meta meta; - void *data_end = (void *)(long)xdp->data_end; - void *data = (void *)(long)xdp->data; meta.ifindex = xdp->rxq->dev->ifindex; meta.pkt_len = bpf_xdp_get_buff_len((struct xdp_md *)xdp); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c index 7521a805b506..25ee4a22e48d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -82,7 +82,6 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp, struct bpf_dynptr *xd struct iptnl_info *tnl; struct ethhdr *new_eth; struct ethhdr *old_eth; - __u32 transport_hdr_sz; struct iphdr *iph; __u16 *next_iph; __u16 payload_len; @@ -165,7 +164,6 @@ static __always_inline int handle_ipv6(struct xdp_md *xdp, struct bpf_dynptr *xd struct iptnl_info *tnl; struct ethhdr *new_eth; struct ethhdr *old_eth; - __u32 transport_hdr_sz; struct ipv6hdr *ip6h; __u16 payload_len; struct vip vip = {}; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index ba48fcb98ab2..42c8f6ded0e4 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -371,45 +371,6 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, return true; } -static __attribute__ ((noinline)) -bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) -{ - struct eth_hdr *new_eth; - struct eth_hdr *old_eth; - - old_eth = *data; - new_eth = *data + sizeof(struct ipv6hdr); - memcpy(new_eth->eth_source, old_eth->eth_source, 6); - memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); - if (inner_v4) - new_eth->eth_proto = 8; - else - new_eth->eth_proto = 56710; - if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr))) - return false; - *data = (void *)(long)xdp->data; - *data_end = (void *)(long)xdp->data_end; - return true; -} - -static __attribute__ ((noinline)) -bool decap_v4(struct xdp_md *xdp, void **data, void **data_end) -{ - struct eth_hdr *new_eth; - struct eth_hdr *old_eth; - - old_eth = *data; - new_eth = *data + sizeof(struct iphdr); - memcpy(new_eth->eth_source, old_eth->eth_source, 6); - memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); - new_eth->eth_proto = 8; - if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) - return false; - *data = (void *)(long)xdp->data; - *data_end = (void *)(long)xdp->data_end; - return true; -} - static __attribute__ ((noinline)) int swap_mac_and_send(void *data, void *data_end) { @@ -430,7 +391,6 @@ int send_icmp_reply(void *data, void *data_end) __u16 *next_iph_u16; __u32 tmp_addr = 0; struct iphdr *iph; - __u32 csum1 = 0; __u32 csum = 0; __u64 off = 0; @@ -662,7 +622,6 @@ static int process_l3_headers_v4(struct packet_description *pckt, void *data_end) { struct iphdr *iph; - __u64 iph_len; int action; iph = data + off; @@ -696,7 +655,6 @@ static int process_packet(void *data, __u64 off, void *data_end, struct packet_description pckt = { }; struct vip_definition vip = { }; struct lb_stats *data_stats; - struct eth_hdr *eth = data; void *lru_map = &lru_cache; struct vip_meta *vip_info; __u32 lru_stats_key = 513; @@ -704,7 +662,6 @@ static int process_packet(void *data, __u64 off, void *data_end, __u32 stats_key = 512; struct ctl_value *cval; __u16 pkt_bytes; - __u64 iph_len; __u8 protocol; __u32 vip_num; int action; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c index 4ddcb6dfe500..f3ec8086482d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_vlan.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_vlan.c @@ -209,19 +209,6 @@ int xdp_prognum2(struct xdp_md *ctx) return XDP_PASS; } -static __always_inline -void shift_mac_4bytes_16bit(void *data) -{ - __u16 *p = data; - - p[7] = p[5]; /* delete p[7] was vlan_hdr->h_vlan_TCI */ - p[6] = p[4]; /* delete p[6] was ethhdr->h_proto */ - p[5] = p[3]; - p[4] = p[2]; - p[3] = p[1]; - p[2] = p[0]; -} - static __always_inline void shift_mac_4bytes_32bit(void *data) { diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c index eb78e6f03129..a9629ac230fd 100644 --- a/tools/testing/selftests/bpf/progs/type_cast.c +++ b/tools/testing/selftests/bpf/progs/type_cast.c @@ -63,7 +63,6 @@ SEC("?tp_btf/sys_enter") int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) { struct task_struct *task, *task_dup; - long *ptr; task = bpf_get_current_task_btf(); task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c index 165e3c2dd9a3..4767451b59ac 100644 --- a/tools/testing/selftests/bpf/progs/udp_limit.c +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -17,7 +17,6 @@ SEC("cgroup/sock_create") int sock(struct bpf_sock *ctx) { int *sk_storage; - __u32 key; if (ctx->type != SOCK_DGRAM) return 1; @@ -46,7 +45,6 @@ SEC("cgroup/sock_release") int sock_release(struct bpf_sock *ctx) { int *sk_storage; - __u32 key; if (ctx->type != SOCK_DGRAM) return 1; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c index 0ade1110613b..dd3bdf672633 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_success.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_success.c @@ -162,8 +162,6 @@ SEC("fentry/" SYS_PREFIX "sys_prctl") int test_user_ringbuf_protocol(void *ctx) { long status = 0; - struct sample *sample = NULL; - struct bpf_dynptr ptr; if (!is_test_process()) return 0; @@ -183,10 +181,6 @@ int test_user_ringbuf_protocol(void *ctx) SEC("fentry/" SYS_PREFIX "sys_getpgid") int test_user_ringbuf(void *ctx) { - int status = 0; - struct sample *sample = NULL; - struct bpf_dynptr ptr; - if (!is_test_process()) return 0; diff --git a/tools/testing/selftests/bpf/progs/xdp_features.c b/tools/testing/selftests/bpf/progs/xdp_features.c index 87c247d56f72..67424084a38a 100644 --- a/tools/testing/selftests/bpf/progs/xdp_features.c +++ b/tools/testing/selftests/bpf/progs/xdp_features.c @@ -70,7 +70,6 @@ xdp_process_echo_packet(struct xdp_md *xdp, bool dut) struct tlv_hdr *tlv; struct udphdr *uh; __be16 port; - __u8 *cmd; if (eh + 1 > (struct ethhdr *)data_end) return -EINVAL; diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 4ad73847b8a5..54cf1765118b 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -89,7 +89,6 @@ static __always_inline int icmp_check(struct xdp_md *ctx, int type) SEC("xdp") int xdping_client(struct xdp_md *ctx) { - void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct pinginfo *pinginfo = NULL; struct ethhdr *eth = data; @@ -153,7 +152,6 @@ int xdping_client(struct xdp_md *ctx) SEC("xdp") int xdping_server(struct xdp_md *ctx) { - void *data_end = (void *)(long)ctx->data_end; void *data = (void *)(long)ctx->data; struct ethhdr *eth = data; struct icmphdr *icmph; diff --git a/tools/testing/selftests/bpf/progs/xdpwall.c b/tools/testing/selftests/bpf/progs/xdpwall.c index 7a891a0c3a39..c2dd0c28237a 100644 --- a/tools/testing/selftests/bpf/progs/xdpwall.c +++ b/tools/testing/selftests/bpf/progs/xdpwall.c @@ -321,7 +321,6 @@ int edgewall(struct xdp_md *ctx) void *data = (void *)(long)(ctx->data); struct fw_match_info match_info = {}; struct pkt_info info = {}; - __u8 parse_err = NO_ERR; void *transport_hdr; struct ethhdr *eth; bool filter_res; -- cgit v1.2.3-70-g09d2 From 3d5a55ddc25508fe950991603d0224c0bba60558 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 8 Mar 2023 21:40:15 -0800 Subject: selftests/bpf: make BPF compiler flags stricter We recently added -Wuninitialized, but it's not enough to catch various silly mistakes or omissions. Let's go all the way to -Wall, just like we do for user-space code. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230309054015.4068562-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 16f404aa1b23..606e2d738dd8 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -352,12 +352,12 @@ CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) endif CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) -BPF_CFLAGS = -g -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ +BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ -I$(abspath $(OUTPUT)/../usr/include) CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ - -Wno-compare-distinct-pointer-types -Wuninitialized + -Wno-compare-distinct-pointer-types $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline -- cgit v1.2.3-70-g09d2 From 4a54de65964d37c3929379271ab31355e93ccddf Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 10 Mar 2023 00:19:09 -0600 Subject: bpf/selftests: Fix send_signal tracepoint tests The send_signal tracepoint tests are non-deterministically failing in CI. The test works as follows: 1. Two pairs of file descriptors are created using the pipe() function. One pair is used to communicate between a parent process -> child process, and the other for the reverse direction. 2. A child is fork()'ed. The child process registers a signal handler, notifies its parent that the signal handler is registered, and then and waits for its parent to have enabled a BPF program that sends a signal. 3. The parent opens and loads a BPF skeleton with programs that send signals to the child process. The different programs are triggered by different perf events (either NMI or normal perf), or by regular tracepoints. The signal is delivered to the child whenever the child triggers the program. 4. The child's signal handler is invoked, which sets a flag saying that the signal handler was reached. The child then signals to the parent that it received the signal, and the test ends. The perf testcases (send_signal_perf{_thread} and send_signal_nmi{_thread}) work 100% of the time, but the tracepoint testcases fail non-deterministically because the tracepoint is not always being fired for the child. There are two tracepoint programs registered in the test: 'tracepoint/sched/sched_switch', and 'tracepoint/syscalls/sys_enter_nanosleep'. The child never intentionally blocks, nor sleeps, so neither tracepoint is guaranteed to be triggered. To fix this, we can have the child trigger the nanosleep program with a usleep(). Before this patch, the test would fail locally every 2-3 runs. Now, it doesn't fail after more than 1000 runs. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230310061909.1420887-1-void@manifault.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/send_signal.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index d63a20fbed33..b15b343ebb6b 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -64,8 +64,12 @@ static void test_send_signal_common(struct perf_event_attr *attr, ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); /* wait a little for signal handler */ - for (int i = 0; i < 1000000000 && !sigusr1_received; i++) + for (int i = 0; i < 1000000000 && !sigusr1_received; i++) { j /= i + j + 1; + if (!attr) + /* trigger the nanosleep tracepoint program. */ + usleep(1); + } buf[0] = sigusr1_received ? '2' : '0'; ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); -- cgit v1.2.3-70-g09d2 From 57ef77152b58770cbd54d624babd8f5d90805ea7 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 7 Mar 2023 22:59:34 -0800 Subject: selftests/bpf: Replace CHECK with ASSERT in test_local_storage This patch migrates the CHECK macro to ASSERT macro. Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20230308065936.1550103-16-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/test_local_storage.c | 47 ++++++++-------------- 1 file changed, 17 insertions(+), 30 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c index 9c77cd6b1eaf..563a9c746b7b 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c @@ -13,8 +13,6 @@ #include "network_helpers.h" #include "task_local_storage_helpers.h" -static unsigned int duration; - #define TEST_STORAGE_VALUE 0xbeefdead struct storage { @@ -60,36 +58,30 @@ static bool check_syscall_operations(int map_fd, int obj_fd) /* Looking up an existing element should fail initially */ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); - if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", - "err:%d errno:%d\n", err, errno)) + if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem")) return false; /* Create a new element */ err = bpf_map_update_elem(map_fd, &obj_fd, &val, BPF_NOEXIST); - if (CHECK(err < 0, "bpf_map_update_elem", "err:%d errno:%d\n", err, - errno)) + if (!ASSERT_OK(err, "bpf_map_update_elem")) return false; /* Lookup the newly created element */ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); - if (CHECK(err < 0, "bpf_map_lookup_elem", "err:%d errno:%d", err, - errno)) + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) return false; /* Check the value of the newly created element */ - if (CHECK(lookup_val.value != val.value, "bpf_map_lookup_elem", - "value got = %x errno:%d", lookup_val.value, val.value)) + if (!ASSERT_EQ(lookup_val.value, val.value, "bpf_map_lookup_elem")) return false; err = bpf_map_delete_elem(map_fd, &obj_fd); - if (CHECK(err, "bpf_map_delete_elem()", "err:%d errno:%d\n", err, - errno)) + if (!ASSERT_OK(err, "bpf_map_delete_elem()")) return false; /* The lookup should fail, now that the element has been deleted */ err = bpf_map_lookup_elem_flags(map_fd, &obj_fd, &lookup_val, 0); - if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem", - "err:%d errno:%d\n", err, errno)) + if (!ASSERT_EQ(err, -ENOENT, "bpf_map_lookup_elem")) return false; return true; @@ -104,35 +96,32 @@ void test_test_local_storage(void) char cmd[256]; skel = local_storage__open_and_load(); - if (CHECK(!skel, "skel_load", "lsm skeleton failed\n")) + if (!ASSERT_OK_PTR(skel, "skel_load")) goto close_prog; err = local_storage__attach(skel); - if (CHECK(err, "attach", "lsm attach failed: %d\n", err)) + if (!ASSERT_OK(err, "attach")) goto close_prog; task_fd = sys_pidfd_open(getpid(), 0); - if (CHECK(task_fd < 0, "pidfd_open", - "failed to get pidfd err:%d, errno:%d", task_fd, errno)) + if (!ASSERT_GE(task_fd, 0, "pidfd_open")) goto close_prog; if (!check_syscall_operations(bpf_map__fd(skel->maps.task_storage_map), task_fd)) goto close_prog; - if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp", - "unable to create tmpdir: %d\n", errno)) + if (!ASSERT_OK_PTR(mkdtemp(tmp_dir_path), "mkdtemp")) goto close_prog; snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm", tmp_dir_path); snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path); - if (CHECK_FAIL(system(cmd))) + if (!ASSERT_OK(system(cmd), "system(cp)")) goto close_prog_rmdir; rm_fd = open(tmp_exec_path, O_RDONLY); - if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d", - tmp_exec_path, rm_fd, errno)) + if (!ASSERT_GE(rm_fd, 0, "open(tmp_exec_path)")) goto close_prog_rmdir; if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map), @@ -145,7 +134,7 @@ void test_test_local_storage(void) * LSM program. */ err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path); - if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err)) + if (!ASSERT_EQ(err, EPERM, "run_self_unlink")) goto close_prog_rmdir; /* Set the process being monitored to be the current process */ @@ -156,18 +145,16 @@ void test_test_local_storage(void) */ snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr", tmp_dir_path, tmp_dir_path); - if (CHECK_FAIL(system(cmd))) + if (!ASSERT_OK(system(cmd), "system(mv)")) goto close_prog_rmdir; - CHECK(skel->data->inode_storage_result != 0, "inode_storage_result", - "inode_local_storage not set\n"); + ASSERT_EQ(skel->data->inode_storage_result, 0, "inode_storage_result"); serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); - if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) + if (!ASSERT_GE(serv_sk, 0, "start_server")) goto close_prog_rmdir; - CHECK(skel->data->sk_storage_result != 0, "sk_storage_result", - "sk_local_storage not set\n"); + ASSERT_EQ(skel->data->sk_storage_result, 0, "sk_storage_result"); if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map), serv_sk)) -- cgit v1.2.3-70-g09d2 From 1f443d0f2b5702bad5f03aab544858ddd33999b7 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 7 Mar 2023 22:59:35 -0800 Subject: selftests/bpf: Check freeing sk->sk_local_storage with sk_local_storage->smap is NULL This patch tweats the socket_bind bpf prog to test the local_storage->smap == NULL case in the bpf_local_storage_free() code path. The idea is to create the local_storage with the sk_storage_map's selem first. Then add the sk_storage_map2's selem and then delete the earlier sk_storeage_map's selem. Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20230308065936.1550103-17-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/local_storage.c | 29 ++++++++++++++++------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index 01c74bc870ae..c8ba7207f5a5 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -108,18 +108,17 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, { __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; - int err; if (pid != monitored_pid) return 0; - storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, - BPF_LOCAL_STORAGE_GET_F_CREATE); + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0); if (!storage) return 0; + sk_storage_result = -1; if (storage->value != DUMMY_STORAGE_VALUE) - sk_storage_result = -1; + return 0; /* This tests that we can associate multiple elements * with the local storage. @@ -129,14 +128,26 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, if (!storage) return 0; - err = bpf_sk_storage_delete(&sk_storage_map, sock->sk); - if (err) + if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk)) return 0; - err = bpf_sk_storage_delete(&sk_storage_map2, sock->sk); - if (!err) - sk_storage_result = err; + storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage) + return 0; + + if (bpf_sk_storage_delete(&sk_storage_map, sock->sk)) + return 0; + + /* Ensure that the sk_storage_map is disconnected from the storage. + * The storage memory should not be freed back to the + * bpf_mem_alloc of the sk_bpf_storage_map because + * sk_bpf_storage_map may have been gone. + */ + if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap) + return 0; + sk_storage_result = 0; return 0; } -- cgit v1.2.3-70-g09d2 From 4659fba121dac21a3516986a3c2cf8459c7ac3bc Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Tue, 7 Mar 2023 22:59:36 -0800 Subject: selftests/bpf: Add local-storage-create benchmark MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch tests how many kmallocs is needed to create and free a batch of UDP sockets and each socket has a 64bytes bpf storage. It also measures how fast the UDP sockets can be created. The result is from my qemu setup. Before bpf_mem_cache_alloc/free: ./bench -p 1 local-storage-create Setting up benchmark 'local-storage-create'... Benchmark 'local-storage-create' started. Iter 0 ( 73.193us): creates 213.552k/s (213.552k/prod), 3.09 kmallocs/create Iter 1 (-20.724us): creates 211.908k/s (211.908k/prod), 3.09 kmallocs/create Iter 2 ( 9.280us): creates 212.574k/s (212.574k/prod), 3.12 kmallocs/create Iter 3 ( 11.039us): creates 213.209k/s (213.209k/prod), 3.12 kmallocs/create Iter 4 (-11.411us): creates 213.351k/s (213.351k/prod), 3.12 kmallocs/create Iter 5 ( -7.915us): creates 214.754k/s (214.754k/prod), 3.12 kmallocs/create Iter 6 ( 11.317us): creates 210.942k/s (210.942k/prod), 3.12 kmallocs/create Summary: creates 212.789 ± 1.310k/s (212.789k/prod), 3.12 kmallocs/create After bpf_mem_cache_alloc/free: ./bench -p 1 local-storage-create Setting up benchmark 'local-storage-create'... Benchmark 'local-storage-create' started. Iter 0 ( 68.265us): creates 243.984k/s (243.984k/prod), 1.04 kmallocs/create Iter 1 ( 30.357us): creates 238.424k/s (238.424k/prod), 1.04 kmallocs/create Iter 2 (-18.712us): creates 232.963k/s (232.963k/prod), 1.04 kmallocs/create Iter 3 (-15.885us): creates 238.879k/s (238.879k/prod), 1.04 kmallocs/create Iter 4 ( 5.590us): creates 237.490k/s (237.490k/prod), 1.04 kmallocs/create Iter 5 ( 8.577us): creates 237.521k/s (237.521k/prod), 1.04 kmallocs/create Iter 6 ( -6.263us): creates 238.508k/s (238.508k/prod), 1.04 kmallocs/create Summary: creates 237.298 ± 2.198k/s (237.298k/prod), 1.04 kmallocs/create Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20230308065936.1550103-18-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 2 + tools/testing/selftests/bpf/bench.c | 2 + .../bpf/benchs/bench_local_storage_create.c | 141 +++++++++++++++++++++ .../bpf/progs/bench_local_storage_create.c | 57 +++++++++ 4 files changed, 202 insertions(+) create mode 100644 tools/testing/selftests/bpf/benchs/bench_local_storage_create.c create mode 100644 tools/testing/selftests/bpf/progs/bench_local_storage_create.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 606e2d738dd8..55811c448eb7 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -639,6 +639,7 @@ $(OUTPUT)/bench_strncmp.o: $(OUTPUT)/strncmp_bench.skel.h $(OUTPUT)/bench_bpf_hashmap_full_update.o: $(OUTPUT)/bpf_hashmap_full_update_bench.skel.h $(OUTPUT)/bench_local_storage.o: $(OUTPUT)/local_storage_bench.skel.h $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o: $(OUTPUT)/local_storage_rcu_tasks_trace_bench.skel.h +$(OUTPUT)/bench_local_storage_create.o: $(OUTPUT)/bench_local_storage_create.skel.h $(OUTPUT)/bench_bpf_hashmap_lookup.o: $(OUTPUT)/bpf_hashmap_lookup.skel.h $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) $(OUTPUT)/bench: LDLIBS += -lm @@ -656,6 +657,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \ $(OUTPUT)/bench_local_storage.o \ $(OUTPUT)/bench_local_storage_rcu_tasks_trace.o \ $(OUTPUT)/bench_bpf_hashmap_lookup.o \ + $(OUTPUT)/bench_local_storage_create.o \ # $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 0b2a53bb8460..dc3827c1f139 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -515,6 +515,7 @@ extern const struct bench bench_local_storage_cache_interleaved_get; extern const struct bench bench_local_storage_cache_hashmap_control; extern const struct bench bench_local_storage_tasks_trace; extern const struct bench bench_bpf_hashmap_lookup; +extern const struct bench bench_local_storage_create; static const struct bench *benchs[] = { &bench_count_global, @@ -555,6 +556,7 @@ static const struct bench *benchs[] = { &bench_local_storage_cache_hashmap_control, &bench_local_storage_tasks_trace, &bench_bpf_hashmap_lookup, + &bench_local_storage_create, }; static void find_benchmark(void) diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c new file mode 100644 index 000000000000..f8b2a640ccbe --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "bench.h" +#include "bench_local_storage_create.skel.h" + +#define BATCH_SZ 32 + +struct thread { + int fds[BATCH_SZ]; +}; + +static struct bench_local_storage_create *skel; +static struct thread *threads; +static long socket_errs; + +static void validate(void) +{ + if (env.consumer_cnt > 1) { + fprintf(stderr, + "local-storage-create benchmark does not need consumer\n"); + exit(1); + } +} + +static void setup(void) +{ + skel = bench_local_storage_create__open_and_load(); + if (!skel) { + fprintf(stderr, "error loading skel\n"); + exit(1); + } + + skel->bss->bench_pid = getpid(); + + if (!bpf_program__attach(skel->progs.socket_post_create)) { + fprintf(stderr, "Error attaching bpf program\n"); + exit(1); + } + + if (!bpf_program__attach(skel->progs.kmalloc)) { + fprintf(stderr, "Error attaching bpf program\n"); + exit(1); + } + + threads = calloc(env.producer_cnt, sizeof(*threads)); + + if (!threads) { + fprintf(stderr, "cannot alloc thread_res\n"); + exit(1); + } +} + +static void measure(struct bench_res *res) +{ + res->hits = atomic_swap(&skel->bss->create_cnts, 0); + res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0); +} + +static void *consumer(void *input) +{ + return NULL; +} + +static void *producer(void *input) +{ + struct thread *t = &threads[(long)(input)]; + int *fds = t->fds; + int i; + + while (true) { + for (i = 0; i < BATCH_SZ; i++) { + fds[i] = socket(AF_INET6, SOCK_DGRAM, 0); + if (fds[i] == -1) + atomic_inc(&socket_errs); + } + + for (i = 0; i < BATCH_SZ; i++) { + if (fds[i] != -1) + close(fds[i]); + } + } + + return NULL; +} + +static void report_progress(int iter, struct bench_res *res, long delta_ns) +{ + double creates_per_sec, kmallocs_per_create; + + creates_per_sec = res->hits / 1000.0 / (delta_ns / 1000000000.0); + kmallocs_per_create = (double)res->drops / res->hits; + + printf("Iter %3d (%7.3lfus): ", + iter, (delta_ns - 1000000000) / 1000.0); + printf("creates %8.3lfk/s (%7.3lfk/prod), ", + creates_per_sec, creates_per_sec / env.producer_cnt); + printf("%3.2lf kmallocs/create\n", kmallocs_per_create); +} + +static void report_final(struct bench_res res[], int res_cnt) +{ + double creates_mean = 0.0, creates_stddev = 0.0; + long total_creates = 0, total_kmallocs = 0; + int i; + + for (i = 0; i < res_cnt; i++) { + creates_mean += res[i].hits / 1000.0 / (0.0 + res_cnt); + total_creates += res[i].hits; + total_kmallocs += res[i].drops; + } + + if (res_cnt > 1) { + for (i = 0; i < res_cnt; i++) + creates_stddev += (creates_mean - res[i].hits / 1000.0) * + (creates_mean - res[i].hits / 1000.0) / + (res_cnt - 1.0); + creates_stddev = sqrt(creates_stddev); + } + printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ", + creates_mean, creates_stddev, creates_mean / env.producer_cnt); + printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates); + if (socket_errs || skel->bss->create_errs) + printf("socket() errors %ld create_errs %ld\n", socket_errs, + skel->bss->create_errs); +} + +/* Benchmark performance of creating bpf local storage */ +const struct bench bench_local_storage_create = { + .name = "local-storage-create", + .validate = validate, + .setup = setup, + .producer_thread = producer, + .consumer_thread = consumer, + .measure = measure, + .report_progress = report_progress, + .report_final = report_final, +}; diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c new file mode 100644 index 000000000000..2814bab54d28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include +#include + +long create_errs = 0; +long create_cnts = 0; +long kmalloc_cnts = 0; +__u32 bench_pid = 0; + +struct storage { + __u8 data[64]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct storage); +} sk_storage_map SEC(".maps"); + +SEC("raw_tp/kmalloc") +int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, + int node) +{ + __sync_fetch_and_add(&kmalloc_cnts, 1); + + return 0; +} + +SEC("lsm.s/socket_post_create") +int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, + int protocol, int kern) +{ + struct storage *stg; + __u32 pid; + + pid = bpf_get_current_pid_tgid() >> 32; + if (pid != bench_pid) + return 0; + + stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL, + BPF_LOCAL_STORAGE_GET_F_CREATE); + + if (stg) + __sync_fetch_and_add(&create_cnts, 1); + else + __sync_fetch_and_add(&create_errs, 1); + + return 0; +} + +char __license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 5d8d6634cccf1ebd0db4e220e52e7128b030c7b4 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Fri, 10 Mar 2023 15:07:43 -0800 Subject: selftests/bpf: Add local kptr stashing test Add a new selftest, local_kptr_stash, which uses bpf_kptr_xchg to stash a bpf_obj_new-allocated object in a map. Test the following scenarios: * Stash two rb_nodes in an arraymap, don't unstash them, rely on map free to destruct them * Stash two rb_nodes in an arraymap, unstash the second one in a separate program, rely on map free to destruct first Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230310230743.2320707-4-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/local_kptr_stash.c | 60 ++++++++++++ .../testing/selftests/bpf/progs/local_kptr_stash.c | 108 +++++++++++++++++++++ 2 files changed, 168 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c create mode 100644 tools/testing/selftests/bpf/progs/local_kptr_stash.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c new file mode 100644 index 000000000000..76f1da877f81 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "local_kptr_stash.skel.h" +static void test_local_kptr_stash_simple(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct local_kptr_stash *skel; + int ret; + + skel = local_kptr_stash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts); + ASSERT_OK(ret, "local_kptr_stash_add_nodes run"); + ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval"); + + local_kptr_stash__destroy(skel); +} + +static void test_local_kptr_stash_unstash(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct local_kptr_stash *skel; + int ret; + + skel = local_kptr_stash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_rb_nodes), &opts); + ASSERT_OK(ret, "local_kptr_stash_add_nodes run"); + ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.unstash_rb_node), &opts); + ASSERT_OK(ret, "local_kptr_stash_add_nodes run"); + ASSERT_EQ(opts.retval, 42, "local_kptr_stash_add_nodes retval"); + + local_kptr_stash__destroy(skel); +} + +void test_local_kptr_stash_success(void) +{ + if (test__start_subtest("local_kptr_stash_simple")) + test_local_kptr_stash_simple(); + if (test__start_subtest("local_kptr_stash_unstash")) + test_local_kptr_stash_unstash(); +} diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c new file mode 100644 index 000000000000..0ef286da092b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_experimental.h" + +struct node_data { + long key; + long data; + struct bpf_rb_node node; +}; + +struct map_value { + struct prog_test_ref_kfunc *not_kptr; + struct prog_test_ref_kfunc __kptr *val; + struct node_data __kptr *node; +}; + +/* This is necessary so that LLVM generates BTF for node_data struct + * If it's not included, a fwd reference for node_data will be generated but + * no struct. Example BTF of "node" field in map_value when not included: + * + * [10] PTR '(anon)' type_id=35 + * [34] FWD 'node_data' fwd_kind=struct + * [35] TYPE_TAG 'kptr_ref' type_id=34 + * + * (with no node_data struct defined) + * Had to do the same w/ bpf_kfunc_call_test_release below + */ +struct node_data *just_here_because_btf_bug; + +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 2); +} some_nodes SEC(".maps"); + +static int create_and_stash(int idx, int val) +{ + struct map_value *mapval; + struct node_data *res; + + mapval = bpf_map_lookup_elem(&some_nodes, &idx); + if (!mapval) + return 1; + + res = bpf_obj_new(typeof(*res)); + if (!res) + return 1; + res->key = val; + + res = bpf_kptr_xchg(&mapval->node, res); + if (res) + bpf_obj_drop(res); + return 0; +} + +SEC("tc") +long stash_rb_nodes(void *ctx) +{ + return create_and_stash(0, 41) ?: create_and_stash(1, 42); +} + +SEC("tc") +long unstash_rb_node(void *ctx) +{ + struct map_value *mapval; + struct node_data *res; + long retval; + int key = 1; + + mapval = bpf_map_lookup_elem(&some_nodes, &key); + if (!mapval) + return 1; + + res = bpf_kptr_xchg(&mapval->node, NULL); + if (res) { + retval = res->key; + bpf_obj_drop(res); + return retval; + } + return 1; +} + +SEC("tc") +long stash_test_ref_kfunc(void *ctx) +{ + struct prog_test_ref_kfunc *res; + struct map_value *mapval; + int key = 0; + + mapval = bpf_map_lookup_elem(&some_nodes, &key); + if (!mapval) + return 1; + + res = bpf_kptr_xchg(&mapval->val, NULL); + if (res) + bpf_kfunc_call_test_release(res); + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From c66b2111c9c952f3bbf454a755768e80308cc6e2 Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Thu, 9 Mar 2023 14:55:54 -0300 Subject: selftests: tc-testing: add tests for action binding Add tests that check if filters can bind actions, that is create an action independently and then bind to a filter. tdc-tests under category 'infra': 1..18 ok 1 abdc - Reference pedit action object in filter ok 2 7a70 - Reference mpls action object in filter ok 3 d241 - Reference bpf action object in filter ok 4 383a - Reference connmark action object in filter ok 5 c619 - Reference csum action object in filter ok 6 a93d - Reference ct action object in filter ok 7 8bb5 - Reference ctinfo action object in filter ok 8 2241 - Reference gact action object in filter ok 9 35e9 - Reference gate action object in filter ok 10 b22e - Reference ife action object in filter ok 11 ef74 - Reference mirred action object in filter ok 12 2c81 - Reference nat action object in filter ok 13 ac9d - Reference police action object in filter ok 14 68be - Reference sample action object in filter ok 15 cf01 - Reference skbedit action object in filter ok 16 c109 - Reference skbmod action object in filter ok 17 4abc - Reference tunnel_key action object in filter ok 18 dadd - Reference vlan action object in filter Reviewed-by: Jamal Hadi Salim Signed-off-by: Pedro Tammela Link: https://lore.kernel.org/r/20230309175554.304824-1-pctammela@mojatatu.com Signed-off-by: Jakub Kicinski --- .../tc-testing/tc-tests/infra/actions.json | 416 +++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 tools/testing/selftests/tc-testing/tc-tests/infra/actions.json (limited to 'tools/testing') diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json new file mode 100644 index 000000000000..16f3a83605e4 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/actions.json @@ -0,0 +1,416 @@ +[ + { + "id": "abdc", + "name": "Reference pedit action object in filter", + "category": [ + "infra", + "pedit" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC action add action pedit munge offset 0 u8 clear index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action pedit index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action pedit" + ] + }, + { + "id": "7a70", + "name": "Reference mpls action object in filter", + "category": [ + "infra", + "mpls" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC action add action mpls pop protocol ipv4 index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action mpls index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action mpls" + ] + }, + { + "id": "d241", + "name": "Reference bpf action object in filter", + "category": [ + "infra", + "bpf" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action bpf index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action bpf" + ] + }, + { + "id": "383a", + "name": "Reference connmark action object in filter", + "category": [ + "infra", + "connmark" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action connmark" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action connmark index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action connmark" + ] + }, + { + "id": "c619", + "name": "Reference csum action object in filter", + "category": [ + "infra", + "csum" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action csum ip4h index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action csum index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action csum" + ] + }, + { + "id": "a93d", + "name": "Reference ct action object in filter", + "category": [ + "infra", + "ct" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action ct index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action ct index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action ct" + ] + }, + { + "id": "8bb5", + "name": "Reference ctinfo action object in filter", + "category": [ + "infra", + "ctinfo" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC action add action ctinfo index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action ctinfo index 10", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action ctinfo" + ] + }, + { + "id": "2241", + "name": "Reference gact action object in filter", + "category": [ + "infra", + "gact" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action pass index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action gact index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action gact" + ] + }, + { + "id": "35e9", + "name": "Reference gate action object in filter", + "category": [ + "infra", + "gate" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC action add action gate priority 1 sched-entry close 100000000ns index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action gate index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action gate" + ] + }, + { + "id": "b22e", + "name": "Reference ife action object in filter", + "category": [ + "infra", + "ife" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action ife encode allow mark pass index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action ife index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action ife" + ] + }, + { + "id": "ef74", + "name": "Reference mirred action object in filter", + "category": [ + "infra", + "mirred" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action mirred egress mirror index 1 dev lo" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action mirred index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action mirred" + ] + }, + { + "id": "2c81", + "name": "Reference nat action object in filter", + "category": [ + "infra", + "nat" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action nat ingress 192.168.1.1 200.200.200.1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action nat index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action nat" + ] + }, + { + "id": "ac9d", + "name": "Reference police action object in filter", + "category": [ + "infra", + "police" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action police rate 1kbit burst 10k index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action police index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action police" + ] + }, + { + "id": "68be", + "name": "Reference sample action object in filter", + "category": [ + "infra", + "sample" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action sample rate 10 group 1 index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action sample index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action sample" + ] + }, + { + "id": "cf01", + "name": "Reference skbedit action object in filter", + "category": [ + "infra", + "skbedit" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action skbedit mark 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action skbedit index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action skbedit" + ] + }, + { + "id": "c109", + "name": "Reference skbmod action object in filter", + "category": [ + "infra", + "skbmod" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action skbmod set dmac 11:22:33:44:55:66 index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action skbmod index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action skbmod" + ] + }, + { + "id": "4abc", + "name": "Reference tunnel_key action object in filter", + "category": [ + "infra", + "tunnel_key" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 id 1 index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action tunnel_key index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action tunnel_key" + ] + }, + { + "id": "dadd", + "name": "Reference vlan action object in filter", + "category": [ + "infra", + "tunnel_key" + ], + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY ingress", + "$TC actions add action vlan pop pipe index 1" + ], + "cmdUnderTest": "$TC filter add dev $DUMMY parent ffff: handle 0x1 prio 1 protocol ip matchall action vlan index 1", + "expExitCode": "0", + "verifyCmd": "$TC filter get dev $DUMMY parent ffff: handle 1 prio 1 protocol ip matchall", + "matchPattern": "^filter parent ffff: protocol ip pref 1 matchall.*handle 0x1.*", + "matchCount": "1", + "teardown": [ + "$TC qdisc del dev $DUMMY ingress", + "$IP link del dev $DUMMY type dummy", + "$TC actions flush action vlan" + ] + } +] -- cgit v1.2.3-70-g09d2 From ab4c15feb2ebcf9f4abe31457d7cbc8f3de9c2ab Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Mon, 13 Mar 2023 14:56:28 -0600 Subject: selftests/bpf: use canonical ftrace path The canonical location for the tracefs filesystem is at /sys/kernel/tracing. But, from Documentation/trace/ftrace.rst: Before 4.1, all ftrace tracing control files were within the debugfs file system, which is typically located at /sys/kernel/debug/tracing. For backward compatibility, when mounting the debugfs file system, the tracefs file system will be automatically mounted at: /sys/kernel/debug/tracing Many tests in the bpf selftest code still refer to this older debugfs path, so let's update them to avoid confusion. Signed-off-by: Ross Zwisler Acked-by: Michael S. Tsirkin Reviewed-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20230313205628.1058720-3-zwisler@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/get_cgroup_id_user.c | 9 +++++++-- tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c | 7 ++++++- tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c | 9 +++++++-- tools/testing/selftests/bpf/prog_tests/tp_attach_query.c | 9 +++++++-- tools/testing/selftests/bpf/prog_tests/trace_printk.c | 10 +++++++--- tools/testing/selftests/bpf/prog_tests/trace_vprintk.c | 10 +++++++--- tools/testing/selftests/bpf/progs/test_stacktrace_map.c | 2 +- tools/testing/selftests/bpf/progs/test_tracepoint.c | 2 +- tools/testing/selftests/bpf/test_ftrace.sh | 7 ++++++- tools/testing/selftests/bpf/test_tunnel.sh | 13 +++++++++---- tools/testing/selftests/bpf/trace_helpers.c | 8 ++++++-- 11 files changed, 64 insertions(+), 22 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c index 156743cf5870..aefd83ebdcd7 100644 --- a/tools/testing/selftests/bpf/get_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c @@ -86,8 +86,13 @@ int main(int argc, char **argv) pid = getpid(); bpf_map_update_elem(pidmap_fd, &key, &pid, 0); - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/%s/id", probe_name); + if (access("/sys/kernel/tracing/trace", F_OK) == 0) { + snprintf(buf, sizeof(buf), + "/sys/kernel/tracing/events/%s/id", probe_name); + } else { + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/%s/id", probe_name); + } efd = open(buf, O_RDONLY, 0); if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 113dba349a57..22be0a9a5a0a 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -338,7 +338,12 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel) * Filtering out duplicates by using hashmap__add, which won't * add existing entry. */ - f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); + + if (access("/sys/kernel/tracing/trace", F_OK) == 0) + f = fopen("/sys/kernel/tracing/available_filter_functions", "r"); + else + f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); + if (!f) return -EINVAL; diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c index c717741bf8b6..c91eda624657 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c @@ -17,8 +17,13 @@ static void test_task_fd_query_tp_core(const char *probe_name, if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno)) goto close_prog; - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/%s/id", probe_name); + if (access("/sys/kernel/tracing/trace", F_OK) == 0) { + snprintf(buf, sizeof(buf), + "/sys/kernel/tracing/events/%s/id", probe_name); + } else { + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/%s/id", probe_name); + } efd = open(buf, O_RDONLY, 0); if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c index 770fcc3bb1ba..655d69f0ff0b 100644 --- a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c +++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c @@ -16,8 +16,13 @@ void serial_test_tp_attach_query(void) for (i = 0; i < num_progs; i++) obj[i] = NULL; - snprintf(buf, sizeof(buf), - "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + if (access("/sys/kernel/tracing/trace", F_OK) == 0) { + snprintf(buf, sizeof(buf), + "/sys/kernel/tracing/events/sched/sched_switch/id"); + } else { + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + } efd = open(buf, O_RDONLY, 0); if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c index cade7f12315f..7b9124d506a5 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_printk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -5,7 +5,8 @@ #include "trace_printk.lskel.h" -#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" +#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" #define SEARCHMSG "testing,testing" void serial_test_trace_printk(void) @@ -34,8 +35,11 @@ void serial_test_trace_printk(void) if (!ASSERT_OK(err, "trace_printk__attach")) goto cleanup; - fp = fopen(TRACEBUF, "r"); - if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) + if (access(TRACEFS_PIPE, F_OK) == 0) + fp = fopen(TRACEFS_PIPE, "r"); + else + fp = fopen(DEBUGFS_PIPE, "r"); + if (!ASSERT_OK_PTR(fp, "fopen(TRACE_PIPE)")) goto cleanup; /* We do not want to wait forever if this test fails... */ diff --git a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c index 7a4e313e8558..44ea2fd88f4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_vprintk.c @@ -5,7 +5,8 @@ #include "trace_vprintk.lskel.h" -#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" +#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" #define SEARCHMSG "1,2,3,4,5,6,7,8,9,10" void serial_test_trace_vprintk(void) @@ -27,8 +28,11 @@ void serial_test_trace_vprintk(void) if (!ASSERT_OK(err, "trace_vprintk__attach")) goto cleanup; - fp = fopen(TRACEBUF, "r"); - if (!ASSERT_OK_PTR(fp, "fopen(TRACEBUF)")) + if (access(TRACEFS_PIPE, F_OK) == 0) + fp = fopen(TRACEFS_PIPE, "r"); + else + fp = fopen(DEBUGFS_PIPE, "r"); + if (!ASSERT_OK_PTR(fp, "fopen(TRACE_PIPE)")) goto cleanup; /* We do not want to wait forever if this test fails... */ diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c index 728dbd39eff0..47568007b668 100644 --- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c +++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c @@ -38,7 +38,7 @@ struct { __type(value, stack_trace_t); } stack_amap SEC(".maps"); -/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ struct sched_switch_args { unsigned long long pad; char prev_comm[TASK_COMM_LEN]; diff --git a/tools/testing/selftests/bpf/progs/test_tracepoint.c b/tools/testing/selftests/bpf/progs/test_tracepoint.c index 43bd7a20cc50..4cb8bbb6a320 100644 --- a/tools/testing/selftests/bpf/progs/test_tracepoint.c +++ b/tools/testing/selftests/bpf/progs/test_tracepoint.c @@ -4,7 +4,7 @@ #include #include -/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */ struct sched_switch_args { unsigned long long pad; char prev_comm[TASK_COMM_LEN]; diff --git a/tools/testing/selftests/bpf/test_ftrace.sh b/tools/testing/selftests/bpf/test_ftrace.sh index 20de7bb873bc..f5109eb0e951 100755 --- a/tools/testing/selftests/bpf/test_ftrace.sh +++ b/tools/testing/selftests/bpf/test_ftrace.sh @@ -1,6 +1,11 @@ #!/bin/bash -TR=/sys/kernel/debug/tracing/ +if [[ -e /sys/kernel/tracing/trace ]]; then + TR=/sys/kernel/tracing/ +else + TR=/sys/kernel/debug/tracing/ +fi + clear_trace() { # reset trace output echo > $TR/trace } diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh index 06857b689c11..2dec7dbf29a2 100755 --- a/tools/testing/selftests/bpf/test_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tunnel.sh @@ -571,8 +571,13 @@ setup_xfrm_tunnel() test_xfrm_tunnel() { + if [[ -e /sys/kernel/tracing/trace ]]; then + TRACE=/sys/kernel/tracing/trace + else + TRACE=/sys/kernel/debug/tracing/trace + fi config_device - > /sys/kernel/debug/tracing/trace + > ${TRACE} setup_xfrm_tunnel mkdir -p ${BPF_PIN_TUNNEL_DIR} bpftool prog loadall ${BPF_FILE} ${BPF_PIN_TUNNEL_DIR} @@ -581,11 +586,11 @@ test_xfrm_tunnel() ${BPF_PIN_TUNNEL_DIR}/xfrm_get_state ip netns exec at_ns0 ping $PING_ARG 10.1.1.200 sleep 1 - grep "reqid 1" /sys/kernel/debug/tracing/trace + grep "reqid 1" ${TRACE} check_err $? - grep "spi 0x1" /sys/kernel/debug/tracing/trace + grep "spi 0x1" ${TRACE} check_err $? - grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace + grep "remote ip 0xac100164" ${TRACE} check_err $? cleanup diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 09a16a77bae4..934bf28fc888 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -12,7 +12,8 @@ #include #include "trace_helpers.h" -#define DEBUGFS "/sys/kernel/debug/tracing/" +#define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" +#define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" #define MAX_SYMS 300000 static struct ksym syms[MAX_SYMS]; @@ -136,7 +137,10 @@ void read_trace_pipe(void) { int trace_fd; - trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0); + if (access(TRACEFS_PIPE, F_OK) == 0) + trace_fd = open(TRACEFS_PIPE, O_RDONLY, 0); + else + trace_fd = open(DEBUGFS_PIPE, O_RDONLY, 0); if (trace_fd < 0) return; -- cgit v1.2.3-70-g09d2 From f25fd6088216bd257902e5c212177cddcb291218 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 13 Mar 2023 16:58:45 -0700 Subject: selftests/bpf: Add various tests to check helper access into ptr_to_btf_id. Add various tests to check helper access into ptr_to_btf_id. Signed-off-by: Alexei Starovoitov Acked-by: David Vernet Link: https://lore.kernel.org/r/20230313235845.61029-4-alexei.starovoitov@gmail.com Signed-off-by: Martin KaFai Lau --- .../selftests/bpf/progs/task_kfunc_failure.c | 36 ++++++++++++++++++++++ .../selftests/bpf/progs/task_kfunc_success.c | 4 +++ 2 files changed, 40 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 002c7f69e47f..27994d6b2914 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -301,3 +301,39 @@ int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) bpf_task_release(acquired); return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("access beyond the end of member comm") +int BPF_PROG(task_access_comm1, struct task_struct *task, u64 clone_flags) +{ + bpf_strncmp(task->comm, 17, "foo"); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("access beyond the end of member comm") +int BPF_PROG(task_access_comm2, struct task_struct *task, u64 clone_flags) +{ + bpf_strncmp(task->comm + 1, 16, "foo"); + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("write into memory") +int BPF_PROG(task_access_comm3, struct task_struct *task, u64 clone_flags) +{ + bpf_probe_read_kernel(task->comm, 16, task->comm); + return 0; +} + +SEC("fentry/__set_task_comm") +__failure __msg("R1 type=ptr_ expected") +int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool exec) +{ + /* + * task->comm is a legacy ptr_to_btf_id. The verifier cannot guarantee + * its safety. Hence it cannot be accessed with normal load insns. + */ + bpf_strncmp(task->comm, 16, "foo"); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index aebc4bb14e7d..4f61596b0242 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -207,6 +207,10 @@ int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_fla if (!is_test_kfunc_task()) return 0; + bpf_strncmp(task->comm, 12, "foo"); + bpf_strncmp(task->comm, 16, "foo"); + bpf_strncmp(&task->comm[8], 4, "foo"); + if (is_pid_lookup_valid(-1)) { err = 1; return 0; -- cgit v1.2.3-70-g09d2 From 487deb3e3393cccff0f148c4703efb185d46e314 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 13 Mar 2023 22:55:50 +0100 Subject: selftests/bpf: robustify test_xdp_do_redirect with more payload magics Currently, the test relies on that only dropped ("xmitted") frames will be recycled and if a frame became an skb, it will be freed later by the stack and never come back to its page_pool. So, it easily gets broken by trying to recycle skbs[0]: test_xdp_do_redirect:PASS:pkt_count_xdp 0 nsec test_xdp_do_redirect:FAIL:pkt_count_zero unexpected pkt_count_zero: actual 9936 != expected 2 test_xdp_do_redirect:PASS:pkt_count_tc 0 nsec That huge mismatch happened because after the TC ingress hook zeroes the magic, the page gets recycled when skb is freed, not returned to the MM layer. "Live frames" mode initializes only new pages and keeps the recycled ones as is by design, so they appear with zeroed magic on the Rx path again. Expand the possible magic values from two: 0 (was "xmitted"/dropped or did hit the TC hook) and 0x42 (hit the input XDP prog) to three: the new one will mark frames hit the TC hook, so that they will elide both @pkt_count_zero and @pkt_count_xdp. They can then be recycled to their page_pool or returned to the page allocator, this won't affect the counters anyhow. Just make sure to mark them as "input" (0x42) when they appear on the Rx path again. Also make an enum from those magics, so that they will be always visible and can be changed in just one place anytime. This also eases adding any new marks later on. Link: https://github.com/kernel-patches/bpf/actions/runs/4386538411/jobs/7681081789 Signed-off-by: Alexander Lobakin Link: https://lore.kernel.org/r/20230313215553.1045175-2-aleksander.lobakin@intel.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/progs/test_xdp_do_redirect.c | 36 ++++++++++++++++------ 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c index 77a123071940..cd2d4e3258b8 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -4,6 +4,19 @@ #define ETH_ALEN 6 #define HDR_SZ (sizeof(struct ethhdr) + sizeof(struct ipv6hdr) + sizeof(struct udphdr)) + +/** + * enum frame_mark - magics to distinguish page/packet paths + * @MARK_XMIT: page was recycled due to the frame being "xmitted" by the NIC. + * @MARK_IN: frame is being processed by the input XDP prog. + * @MARK_SKB: frame did hit the TC ingress hook as an skb. + */ +enum frame_mark { + MARK_XMIT = 0U, + MARK_IN = 0x42, + MARK_SKB = 0x45, +}; + const volatile int ifindex_out; const volatile int ifindex_in; const volatile __u8 expect_dst[ETH_ALEN]; @@ -34,10 +47,10 @@ int xdp_redirect(struct xdp_md *xdp) if (*metadata != 0x42) return XDP_ABORTED; - if (*payload == 0) { - *payload = 0x42; + if (*payload == MARK_XMIT) pkts_seen_zero++; - } + + *payload = MARK_IN; if (bpf_xdp_adjust_meta(xdp, 4)) return XDP_ABORTED; @@ -51,7 +64,7 @@ int xdp_redirect(struct xdp_md *xdp) return ret; } -static bool check_pkt(void *data, void *data_end) +static bool check_pkt(void *data, void *data_end, const __u32 mark) { struct ipv6hdr *iph = data + sizeof(struct ethhdr); __u8 *payload = data + HDR_SZ; @@ -59,13 +72,13 @@ static bool check_pkt(void *data, void *data_end) if (payload + 1 > data_end) return false; - if (iph->nexthdr != IPPROTO_UDP || *payload != 0x42) + if (iph->nexthdr != IPPROTO_UDP || *payload != MARK_IN) return false; /* reset the payload so the same packet doesn't get counted twice when * it cycles back through the kernel path and out the dst veth */ - *payload = 0; + *payload = mark; return true; } @@ -75,11 +88,11 @@ int xdp_count_pkts(struct xdp_md *xdp) void *data = (void *)(long)xdp->data; void *data_end = (void *)(long)xdp->data_end; - if (check_pkt(data, data_end)) + if (check_pkt(data, data_end, MARK_XMIT)) pkts_seen_xdp++; - /* Return XDP_DROP to make sure the data page is recycled, like when it - * exits a physical NIC. Recycled pages will be counted in the + /* Return %XDP_DROP to recycle the data page with %MARK_XMIT, like + * it exited a physical NIC. Those pages will be counted in the * pkts_seen_zero counter above. */ return XDP_DROP; @@ -91,9 +104,12 @@ int tc_count_pkts(struct __sk_buff *skb) void *data = (void *)(long)skb->data; void *data_end = (void *)(long)skb->data_end; - if (check_pkt(data, data_end)) + if (check_pkt(data, data_end, MARK_SKB)) pkts_seen_tc++; + /* Will be either recycled or freed, %MARK_SKB makes sure it won't + * hit any of the counters above. + */ return 0; } -- cgit v1.2.3-70-g09d2 From 3c2611bac08a834697be918ac357eaff2e47d5b3 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 14 Mar 2023 15:28:11 -0700 Subject: selftests/bpf: Fix trace_virtqueue_add_sgs test issue with LLVM 17. LLVM commit https://reviews.llvm.org/D143726 introduced hoistMinMax optimization that transformed (i < VIRTIO_MAX_SGS) && (i < out_sgs) into i < MIN(VIRTIO_MAX_SGS, out_sgs) and caused the verifier to stop recognizing such loop as bounded. Which resulted in the following test failure: libbpf: prog 'trace_virtqueue_add_sgs': BPF program load failed: Bad address libbpf: prog 'trace_virtqueue_add_sgs': -- BEGIN PROG LOAD LOG -- The sequence of 8193 jumps is too complex. verification time 789206 usec stack depth 56 processed 156446 insns (limit 1000000) max_states_per_insn 7 total_states 1746 peak_states 1701 mark_read 12 -- END PROG LOAD LOG -- libbpf: prog 'trace_virtqueue_add_sgs': failed to load: -14 libbpf: failed to load object 'loop6.bpf.o' Workaround the verifier limitation for now with inline asm that prevents this particular optimization. Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/loop6.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/loop6.c b/tools/testing/selftests/bpf/progs/loop6.c index 38de0331e6b4..e4ff97fbcce1 100644 --- a/tools/testing/selftests/bpf/progs/loop6.c +++ b/tools/testing/selftests/bpf/progs/loop6.c @@ -5,6 +5,7 @@ #include #include #include +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; @@ -76,6 +77,7 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs, return 0; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) { + __sink(out_sgs); for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX); sgp = __sg_next(sgp)) { bpf_probe_read_kernel(&len, sizeof(len), &sgp->length); @@ -85,6 +87,7 @@ int BPF_KPROBE(trace_virtqueue_add_sgs, void *unused, struct scatterlist **sgs, } for (i = 0; (i < VIRTIO_MAX_SGS) && (i < in_sgs); i++) { + __sink(in_sgs); for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX); sgp = __sg_next(sgp)) { bpf_probe_read_kernel(&len, sizeof(len), &sgp->length); -- cgit v1.2.3-70-g09d2 From aa3d65de4b9004d799f97700751a86d3ebd7d5f9 Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Fri, 10 Mar 2023 08:41:00 +0100 Subject: bpf/selftests: Test fentry attachment to shadowed functions Adds a new test that tries to attach a program to fentry of two functions of the same name, one located in vmlinux and the other in bpf_testmod. To avoid conflicts with existing tests, a new function "bpf_fentry_shadow_test" was created both in vmlinux and in bpf_testmod. The previous commit fixed a bug which caused this test to fail. The verifier would always use the vmlinux function's address as the target trampoline address, hence trying to create two trampolines for a single address, which is forbidden. The test (similarly to other fentry/fexit tests) is not working on arm64 at the moment. Signed-off-by: Viktor Malik Acked-by: Jiri Olsa Link: https://lore.kernel.org/r/5fe2f364190b6f79b085066ed7c5989c5bc475fa.1678432753.git.vmalik@redhat.com Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 5 + tools/testing/selftests/bpf/DENYLIST.aarch64 | 1 + .../selftests/bpf/bpf_testmod/bpf_testmod.c | 6 + .../bpf/prog_tests/module_fentry_shadow.c | 128 +++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c (limited to 'tools/testing') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 6a8b33a103a4..71226f68270d 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -560,6 +560,11 @@ long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) return (long)a + (long)b + (long)c + d; } +int noinline bpf_fentry_shadow_test(int a) +{ + return a + 1; +} + struct prog_test_member1 { int a; }; diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 99cc33c51eaa..0a6837f97c32 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -44,6 +44,7 @@ lookup_key # test_lookup_key__attach unexp lru_bug # lru_bug__attach unexpected error: -524 (errno 524) modify_return # modify_return__attach failed unexpected error: -524 (errno 524) module_attach # skel_attach skeleton attach failed: -524 +module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0 mptcp/base # run_test mptcp unexpected error: -524 (errno 524) netcnt # packets unexpected packets: actual 10001 != expected 10000 rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22 diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 5e6e85c8d77d..7999476b9446 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -268,6 +268,12 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .set = &bpf_testmod_check_kfunc_ids, }; +noinline int bpf_fentry_shadow_test(int a) +{ + return a + 2; +} +EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) diff --git a/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c new file mode 100644 index 000000000000..c7636e18b1eb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Red Hat */ +#include +#include +#include "bpf/libbpf_internal.h" +#include "cgroup_helpers.h" + +static const char *module_name = "bpf_testmod"; +static const char *symbol_name = "bpf_fentry_shadow_test"; + +static int get_bpf_testmod_btf_fd(void) +{ + struct bpf_btf_info info; + char name[64]; + __u32 id = 0, len; + int err, fd; + + while (true) { + err = bpf_btf_get_next_id(id, &id); + if (err) { + log_err("failed to iterate BTF objects"); + return err; + } + + fd = bpf_btf_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; /* expected race: BTF was unloaded */ + err = -errno; + log_err("failed to get FD for BTF object #%d", id); + return err; + } + + len = sizeof(info); + memset(&info, 0, sizeof(info)); + info.name = ptr_to_u64(name); + info.name_len = sizeof(name); + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + err = -errno; + log_err("failed to get info for BTF object #%d", id); + close(fd); + return err; + } + + if (strcmp(name, module_name) == 0) + return fd; + + close(fd); + } + return -ENOENT; +} + +void test_module_fentry_shadow(void) +{ + struct btf *vmlinux_btf = NULL, *mod_btf = NULL; + int err, i; + int btf_fd[2] = {}; + int prog_fd[2] = {}; + int link_fd[2] = {}; + __s32 btf_id[2] = {}; + + LIBBPF_OPTS(bpf_prog_load_opts, load_opts, + .expected_attach_type = BPF_TRACE_FENTRY, + ); + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux_btf")) + return; + + btf_fd[1] = get_bpf_testmod_btf_fd(); + if (!ASSERT_GE(btf_fd[1], 0, "get_bpf_testmod_btf_fd")) + goto out; + + mod_btf = btf_get_from_fd(btf_fd[1], vmlinux_btf); + if (!ASSERT_OK_PTR(mod_btf, "btf_get_from_fd")) + goto out; + + btf_id[0] = btf__find_by_name_kind(vmlinux_btf, symbol_name, BTF_KIND_FUNC); + if (!ASSERT_GT(btf_id[0], 0, "btf_find_by_name")) + goto out; + + btf_id[1] = btf__find_by_name_kind(mod_btf, symbol_name, BTF_KIND_FUNC); + if (!ASSERT_GT(btf_id[1], 0, "btf_find_by_name")) + goto out; + + for (i = 0; i < 2; i++) { + load_opts.attach_btf_id = btf_id[i]; + load_opts.attach_btf_obj_fd = btf_fd[i]; + prog_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL", + trace_program, + sizeof(trace_program) / sizeof(struct bpf_insn), + &load_opts); + if (!ASSERT_GE(prog_fd[i], 0, "bpf_prog_load")) + goto out; + + /* If the verifier incorrectly resolves addresses of the + * shadowed functions and uses the same address for both the + * vmlinux and the bpf_testmod functions, this will fail on + * attempting to create two trampolines for the same address, + * which is forbidden. + */ + link_fd[i] = bpf_link_create(prog_fd[i], 0, BPF_TRACE_FENTRY, NULL); + if (!ASSERT_GE(link_fd[i], 0, "bpf_link_create")) + goto out; + } + + err = bpf_prog_test_run_opts(prog_fd[0], NULL); + ASSERT_OK(err, "running test"); + +out: + btf__free(vmlinux_btf); + btf__free(mod_btf); + for (i = 0; i < 2; i++) { + if (btf_fd[i]) + close(btf_fd[i]); + if (prog_fd[i] > 0) + close(prog_fd[i]); + if (link_fd[i] > 0) + close(link_fd[i]); + } +} -- cgit v1.2.3-70-g09d2 From ed01385c0d78a025bdc72128b7aa7c3309cd5852 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 15 Mar 2023 17:07:25 -0700 Subject: selftests/bpf: Use ASSERT_EQ instead ASSERT_OK for testing memcmp result In tcp_hdr_options test, it ensures the received tcp hdr option and the sk local storage have the expected values. It uses memcmp to check that. Testing the memcmp result with ASSERT_OK is confusing because ASSERT_OK will print out the errno which is not set. This patch uses ASSERT_EQ to check for 0 instead. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20230316000726.1016773-1-martin.lau@linux.dev --- tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c index 5cf85d0f9827..13bcaeb028b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c @@ -151,7 +151,7 @@ static int check_hdr_opt(const struct bpf_test_option *exp, const struct bpf_test_option *act, const char *hdr_desc) { - if (!ASSERT_OK(memcmp(exp, act, sizeof(*exp)), hdr_desc)) { + if (!ASSERT_EQ(memcmp(exp, act, sizeof(*exp)), 0, hdr_desc)) { print_option(exp, "expected: "); print_option(act, " actual: "); return -1; @@ -169,7 +169,7 @@ static int check_hdr_stg(const struct hdr_stg *exp, int fd, "map_lookup(hdr_stg_map_fd)")) return -1; - if (!ASSERT_OK(memcmp(exp, &act, sizeof(*exp)), stg_desc)) { + if (!ASSERT_EQ(memcmp(exp, &act, sizeof(*exp)), 0, stg_desc)) { print_hdr_stg(exp, "expected: "); print_hdr_stg(&act, " actual: "); return -1; -- cgit v1.2.3-70-g09d2 From 226efec2b0efad60d4a6c4b2c3a8710dafc4dc21 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 15 Mar 2023 17:07:26 -0700 Subject: selftests/bpf: Fix a fd leak in an error path in network_helpers.c In __start_server, it leaks a fd when setsockopt(SO_REUSEPORT) fails. This patch fixes it. Fixes: eed92afdd14c ("bpf: selftest: Test batching and bpf_(get|set)sockopt in bpf tcp iter") Reported-by: Andrii Nakryiko Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20230316000726.1016773-2-martin.lau@linux.dev --- tools/testing/selftests/bpf/network_helpers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 01de33191226..596caa176582 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -95,7 +95,7 @@ static int __start_server(int type, int protocol, const struct sockaddr *addr, if (reuseport && setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &on, sizeof(on))) { log_err("Failed to set SO_REUSEPORT"); - return -1; + goto error_close; } if (bind(fd, addr, addrlen) < 0) { -- cgit v1.2.3-70-g09d2 From a5a197df58c44ce32a86b57e970da4bd7b71b399 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Thu, 16 Mar 2023 00:40:26 -0500 Subject: bpf/selftests: Test using global cpumask kptr with RCU Now that struct bpf_cpumask * is considered an RCU-safe type according to the verifier, we should add tests that validate its common usages. This patch adds those tests to the cpumask test suite. A subsequent changes will remove bpf_cpumask_kptr_get(), and will adjust the selftest and BPF documentation accordingly. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230316054028.88924-4-void@manifault.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/cpumask.c | 1 + tools/testing/selftests/bpf/progs/cpumask_common.h | 6 +++ .../testing/selftests/bpf/progs/cpumask_failure.c | 62 ++++++++++++++++++++++ .../testing/selftests/bpf/progs/cpumask_success.c | 33 ++++++++++++ 4 files changed, 102 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index 5fbe457c4ebe..6c0fe23498c7 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -17,6 +17,7 @@ static const char * const cpumask_success_testcases[] = { "test_insert_leave", "test_insert_remove_release", "test_insert_kptr_get_release", + "test_global_mask_rcu", }; static void verify_success(const char *prog_name) diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 65e5496ca1b2..7623782fbd62 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -9,6 +9,9 @@ int err; +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(MASK) static struct bpf_cpumask __kptr * global_mask; + struct __cpumask_map_value { struct bpf_cpumask __kptr * cpumask; }; @@ -51,6 +54,9 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym u32 bpf_cpumask_any(const struct cpumask *src) __ksym; u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + static inline const struct cpumask *cast(struct bpf_cpumask *cpumask) { return (const struct cpumask *)cpumask; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index cfe83f0ef9e2..9f726d55f747 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -127,3 +127,65 @@ int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("R2 must be a rcu pointer") +int BPF_PROG(test_global_mask_out_of_rcu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local, *prev; + + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; + return 0; + } + + bpf_rcu_read_lock(); + local = global_mask; + if (!local) { + err = 4; + bpf_rcu_read_unlock(); + return 0; + } + + bpf_rcu_read_unlock(); + + /* RCU region is exited before calling KF_RCU kfunc. */ + + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); + + return 0; +} + +SEC("tp_btf/task_newtask") +__failure __msg("NULL pointer passed to trusted arg1") +int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local, *prev; + + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; + return 0; + } + + bpf_rcu_read_lock(); + local = global_mask; + + /* No NULL check is performed on global cpumask kptr. */ + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); + + bpf_rcu_read_unlock(); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 97ed08c4ff03..fe928ff72a06 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -423,3 +423,36 @@ int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_f return 0; } + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local, *prev; + + if (!is_test_task()) + return 0; + + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask, local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; + return 0; + } + + bpf_rcu_read_lock(); + local = global_mask; + if (!local) { + err = 4; + bpf_rcu_read_unlock(); + return 0; + } + + bpf_cpumask_test_cpu(0, (const struct cpumask *)local); + bpf_rcu_read_unlock(); + + return 0; +} -- cgit v1.2.3-70-g09d2 From 1b403ce77dfbf234723a91bc411dfb03a0499d6e Mon Sep 17 00:00:00 2001 From: David Vernet Date: Thu, 16 Mar 2023 00:40:27 -0500 Subject: bpf: Remove bpf_cpumask_kptr_get() kfunc Now that struct bpf_cpumask is RCU safe, there's no need for this kfunc. Rather than doing the following: private(MASK) static struct bpf_cpumask __kptr *global; int BPF_PROG(prog, s32 cpu, ...) { struct bpf_cpumask *cpumask; bpf_rcu_read_lock(); cpumask = bpf_cpumask_kptr_get(&global); if (!cpumask) { bpf_rcu_read_unlock(); return -1; } bpf_cpumask_setall(cpumask); ... bpf_cpumask_release(cpumask); bpf_rcu_read_unlock(); } Programs can instead simply do (assume same global cpumask): int BPF_PROG(prog, ...) { struct bpf_cpumask *cpumask; bpf_rcu_read_lock(); cpumask = global; if (!cpumask) { bpf_rcu_read_unlock(); return -1; } bpf_cpumask_setall(cpumask); ... bpf_rcu_read_unlock(); } In other words, no extra atomic acquire / release, and less boilerplate code. This patch removes both the kfunc, as well as its selftests and documentation. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230316054028.88924-5-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/cpumask.c | 29 --------------------- tools/testing/selftests/bpf/prog_tests/cpumask.c | 1 - tools/testing/selftests/bpf/progs/cpumask_common.h | 1 - .../testing/selftests/bpf/progs/cpumask_failure.c | 24 ----------------- .../testing/selftests/bpf/progs/cpumask_success.c | 30 ---------------------- 5 files changed, 85 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index 98eea62b6b7b..db9da2194c1a 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -82,34 +82,6 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) return cpumask; } -/** - * bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask - * stored in a map. - * @cpumaskp: A pointer to a BPF cpumask map value. - * - * Attempts to acquire a reference to a BPF cpumask stored in a map value. The - * cpumask returned by this function must either be embedded in a map as a - * kptr, or freed with bpf_cpumask_release(). This function may return NULL if - * no BPF cpumask was found in the specified map value. - */ -__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp) -{ - struct bpf_cpumask *cpumask; - - /* The BPF memory allocator frees memory backing its caches in an RCU - * callback. Thus, we can safely use RCU to ensure that the cpumask is - * safe to read. - */ - rcu_read_lock(); - - cpumask = READ_ONCE(*cpumaskp); - if (cpumask && !refcount_inc_not_zero(&cpumask->usage)) - cpumask = NULL; - - rcu_read_unlock(); - return cpumask; -} - static void cpumask_free_cb(struct rcu_head *head) { struct bpf_cpumask *cpumask; @@ -435,7 +407,6 @@ BTF_SET8_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) -BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU) diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index 6c0fe23498c7..cdf4acc18e4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -16,7 +16,6 @@ static const char * const cpumask_success_testcases[] = { "test_copy_any_anyand", "test_insert_leave", "test_insert_remove_release", - "test_insert_kptr_get_release", "test_global_mask_rcu", }; diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 7623782fbd62..0c5b785a93e4 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -26,7 +26,6 @@ struct array_map { struct bpf_cpumask *bpf_cpumask_create(void) __ksym; void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; -struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index 9f726d55f747..db4f94e72b61 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -94,30 +94,6 @@ int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_ return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("Unreleased reference") -int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags) -{ - struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; - - cpumask = create_cpumask(); - if (!cpumask) - return 0; - - if (cpumask_map_insert(cpumask)) - return 0; - - v = cpumask_map_value_lookup(); - if (!v) - return 0; - - cpumask = bpf_cpumask_kptr_get(&v->cpumask); - - /* cpumask is never released. */ - return 0; -} - SEC("tp_btf/task_newtask") __failure __msg("NULL pointer passed to trusted arg0") int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags) diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index fe928ff72a06..2fcdd7f68ac7 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -394,36 +394,6 @@ int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_fla return 0; } -SEC("tp_btf/task_newtask") -int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags) -{ - struct bpf_cpumask *cpumask; - struct __cpumask_map_value *v; - - cpumask = create_cpumask(); - if (!cpumask) - return 0; - - if (cpumask_map_insert(cpumask)) { - err = 3; - return 0; - } - - v = cpumask_map_value_lookup(); - if (!v) { - err = 4; - return 0; - } - - cpumask = bpf_cpumask_kptr_get(&v->cpumask); - if (cpumask) - bpf_cpumask_release(cpumask); - else - err = 5; - - return 0; -} - SEC("tp_btf/task_newtask") int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) { -- cgit v1.2.3-70-g09d2 From 5640b6d894342d153b719644681b0345fd28ee96 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 16 Mar 2023 18:50:51 +0100 Subject: selftests/bpf: fix "metadata marker" getting overwritten by the netstack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alexei noticed xdp_do_redirect test on BPF CI started failing on BE systems after skb PP recycling was enabled: test_xdp_do_redirect:PASS:prog_run 0 nsec test_xdp_do_redirect:PASS:pkt_count_xdp 0 nsec test_xdp_do_redirect:PASS:pkt_count_zero 0 nsec test_xdp_do_redirect:FAIL:pkt_count_tc unexpected pkt_count_tc: actual 220 != expected 9998 test_max_pkt_size:PASS:prog_run_max_size 0 nsec test_max_pkt_size:PASS:prog_run_too_big 0 nsec close_netns:PASS:setns 0 nsec #289 xdp_do_redirect:FAIL Summary: 270/1674 PASSED, 30 SKIPPED, 1 FAILED and it doesn't happen on LE systems. Ilya then hunted it down to: #0 0x0000000000aaeee6 in neigh_hh_output (hh=0x83258df0, skb=0x88142200) at linux/include/net/neighbour.h:503 #1 0x0000000000ab2cda in neigh_output (skip_cache=false, skb=0x88142200, n=) at linux/include/net/neighbour.h:544 #2 ip6_finish_output2 (net=net@entry=0x88edba00, sk=sk@entry=0x0, skb=skb@entry=0x88142200) at linux/net/ipv6/ip6_output.c:134 #3 0x0000000000ab4cbc in __ip6_finish_output (skb=0x88142200, sk=0x0, net=0x88edba00) at linux/net/ipv6/ip6_output.c:195 #4 ip6_finish_output (net=0x88edba00, sk=0x0, skb=0x88142200) at linux/net/ipv6/ip6_output.c:206 xdp_do_redirect test places a u32 marker (0x42) right before the Ethernet header to check it then in the XDP program and return %XDP_ABORTED if it's not there. Neigh xmit code likes to round up hard header length to speed up copying the header, so it overwrites two bytes in front of the Eth header. On LE systems, 0x42 is one byte at `data - 4`, while on BE it's `data - 1`, what explains why it happens only there. It didn't happen previously due to that %XDP_PASS meant the page will be discarded and replaced by a new one, but now it can be recycled as well, while bpf_test_run code doesn't reinitialize the content of recycled pages. This mark is limited to this particular test and its setup though, so there's no need to predict 1000 different possible cases. Just move it 4 bytes to the left, still keeping it 32 bit to match on more bytes. Fixes: 9c94bbf9a87b ("xdp: recycle Page Pool backed skbs built from XDP frames") Reported-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/CAADnVQ+B_JOU+EpP=DKhbY9yXdN6GiRPnpTTXfEZ9sNkUeb-yQ@mail.gmail.com Reported-by: Ilya Leoshkevich # + debugging Link: https://lore.kernel.org/bpf/8341c1d9f935f410438e79d3bd8a9cc50aefe105.camel@linux.ibm.com Signed-off-by: Alexander Lobakin Acked-by: Toke Høiland-Jørgensen Tested-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20230316175051.922550-3-aleksander.lobakin@intel.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c | 7 ++++--- tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index 856cbc29e6a1..4eaa3dcaebc8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -86,12 +86,12 @@ static void test_max_pkt_size(int fd) void test_xdp_do_redirect(void) { int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; - char data[sizeof(pkt_udp) + sizeof(__u32)]; + char data[sizeof(pkt_udp) + sizeof(__u64)]; struct test_xdp_do_redirect *skel = NULL; struct nstoken *nstoken = NULL; struct bpf_link *link; LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); - struct xdp_md ctx_in = { .data = sizeof(__u32), + struct xdp_md ctx_in = { .data = sizeof(__u64), .data_end = sizeof(data) }; DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, .data_in = &data, @@ -105,8 +105,9 @@ void test_xdp_do_redirect(void) DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); - memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp)); + memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp)); *((__u32 *)data) = 0x42; /* metadata test value */ + *((__u32 *)data + 4) = 0; skel = test_xdp_do_redirect__open(); if (!ASSERT_OK_PTR(skel, "skel")) diff --git a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c index cd2d4e3258b8..5baaafed0d2d 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c @@ -52,7 +52,7 @@ int xdp_redirect(struct xdp_md *xdp) *payload = MARK_IN; - if (bpf_xdp_adjust_meta(xdp, 4)) + if (bpf_xdp_adjust_meta(xdp, sizeof(__u64))) return XDP_ABORTED; if (retcode > XDP_PASS) -- cgit v1.2.3-70-g09d2 From 62199e3f16583e766f46d1767deca109fd8ea408 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Mar 2023 15:11:55 +0200 Subject: selftests: net: Add VXLAN MDB test Add test cases for VXLAN MDB, testing the control and data paths. Two different sets of namespaces (i.e., ns{1,2}_v4 and ns{1,2}_v6) are used in order to test VXLAN MDB with both IPv4 and IPv6 underlays, respectively. Example truncated output: # ./test_vxlan_mdb.sh [...] Tests passed: 620 Tests failed: 0 Signed-off-by: Ido Schimmel Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 1 + tools/testing/selftests/net/config | 1 + tools/testing/selftests/net/test_vxlan_mdb.sh | 2318 +++++++++++++++++++++++++ 3 files changed, 2320 insertions(+) create mode 100755 tools/testing/selftests/net/test_vxlan_mdb.sh (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 099741290184..a179fbd6f972 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -81,6 +81,7 @@ TEST_GEN_FILES += sctp_hello TEST_GEN_FILES += csum TEST_GEN_FILES += nat6to4.o TEST_GEN_FILES += ip_local_port_range +TEST_PROGS += test_vxlan_mdb.sh TEST_FILES := settings diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index cc9fd55ab869..4c7ce07afa2f 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -48,3 +48,4 @@ CONFIG_BAREUDP=m CONFIG_IPV6_IOAM6_LWTUNNEL=y CONFIG_CRYPTO_SM4_GENERIC=y CONFIG_AMT=m +CONFIG_VXLAN=m diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh new file mode 100755 index 000000000000..31e5f0f8859d --- /dev/null +++ b/tools/testing/selftests/net/test_vxlan_mdb.sh @@ -0,0 +1,2318 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# This test is for checking VXLAN MDB functionality. The topology consists of +# two sets of namespaces: One for the testing of IPv4 underlay and another for +# IPv6. In both cases, both IPv4 and IPv6 overlay traffic are tested. +# +# Data path functionality is tested by sending traffic from one of the upper +# namespaces and checking using ingress tc filters that the expected traffic +# was received by one of the lower namespaces. +# +# +------------------------------------+ +------------------------------------+ +# | ns1_v4 | | ns1_v6 | +# | | | | +# | br0.10 br0.4000 br0.20 | | br0.10 br0.4000 br0.20 | +# | + + + | | + + + | +# | | | | | | | | | | +# | | | | | | | | | | +# | +---------+---------+ | | +---------+---------+ | +# | | | | | | +# | | | | | | +# | + | | + | +# | br0 | | br0 | +# | + | | + | +# | | | | | | +# | | | | | | +# | + | | + | +# | vx0 | | vx0 | +# | | | | +# | | | | +# | veth0 | | veth0 | +# | + | | + | +# +-----------------|------------------+ +-----------------|------------------+ +# | | +# +-----------------|------------------+ +-----------------|------------------+ +# | + | | + | +# | veth0 | | veth0 | +# | | | | +# | | | | +# | vx0 | | vx0 | +# | + | | + | +# | | | | | | +# | | | | | | +# | + | | + | +# | br0 | | br0 | +# | + | | + | +# | | | | | | +# | | | | | | +# | +---------+---------+ | | +---------+---------+ | +# | | | | | | | | | | +# | | | | | | | | | | +# | + + + | | + + + | +# | br0.10 br0.4000 br0.10 | | br0.10 br0.4000 br0.20 | +# | | | | +# | ns2_v4 | | ns2_v6 | +# +------------------------------------+ +------------------------------------+ + +ret=0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +CONTROL_PATH_TESTS=" + basic_star_g_ipv4_ipv4 + basic_star_g_ipv6_ipv4 + basic_star_g_ipv4_ipv6 + basic_star_g_ipv6_ipv6 + basic_sg_ipv4_ipv4 + basic_sg_ipv6_ipv4 + basic_sg_ipv4_ipv6 + basic_sg_ipv6_ipv6 + star_g_ipv4_ipv4 + star_g_ipv6_ipv4 + star_g_ipv4_ipv6 + star_g_ipv6_ipv6 + sg_ipv4_ipv4 + sg_ipv6_ipv4 + sg_ipv4_ipv6 + sg_ipv6_ipv6 + dump_ipv4_ipv4 + dump_ipv6_ipv4 + dump_ipv4_ipv6 + dump_ipv6_ipv6 +" + +DATA_PATH_TESTS=" + encap_params_ipv4_ipv4 + encap_params_ipv6_ipv4 + encap_params_ipv4_ipv6 + encap_params_ipv6_ipv6 + starg_exclude_ir_ipv4_ipv4 + starg_exclude_ir_ipv6_ipv4 + starg_exclude_ir_ipv4_ipv6 + starg_exclude_ir_ipv6_ipv6 + starg_include_ir_ipv4_ipv4 + starg_include_ir_ipv6_ipv4 + starg_include_ir_ipv4_ipv6 + starg_include_ir_ipv6_ipv6 + starg_exclude_p2mp_ipv4_ipv4 + starg_exclude_p2mp_ipv6_ipv4 + starg_exclude_p2mp_ipv4_ipv6 + starg_exclude_p2mp_ipv6_ipv6 + starg_include_p2mp_ipv4_ipv4 + starg_include_p2mp_ipv6_ipv4 + starg_include_p2mp_ipv4_ipv6 + starg_include_p2mp_ipv6_ipv6 + egress_vni_translation_ipv4_ipv4 + egress_vni_translation_ipv6_ipv4 + egress_vni_translation_ipv4_ipv6 + egress_vni_translation_ipv6_ipv6 + all_zeros_mdb_ipv4 + all_zeros_mdb_ipv6 + mdb_fdb_ipv4_ipv4 + mdb_fdb_ipv6_ipv4 + mdb_fdb_ipv4_ipv6 + mdb_fdb_ipv6_ipv6 + mdb_torture_ipv4_ipv4 + mdb_torture_ipv6_ipv4 + mdb_torture_ipv4_ipv6 + mdb_torture_ipv6_ipv6 +" + +# All tests in this script. Can be overridden with -t option. +TESTS=" + $CONTROL_PATH_TESTS + $DATA_PATH_TESTS +" +VERBOSE=0 +PAUSE_ON_FAIL=no +PAUSE=no + +################################################################################ +# Utilities + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + if [ ${rc} -eq ${expected} ]; then + printf "TEST: %-60s [ OK ]\n" "${msg}" + nsuccess=$((nsuccess+1)) + else + ret=1 + nfail=$((nfail+1)) + printf "TEST: %-60s [FAIL]\n" "${msg}" + if [ "$VERBOSE" = "1" ]; then + echo " rc=$rc, expected $expected" + fi + + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi + + if [ "${PAUSE}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + + [ "$VERBOSE" = "1" ] && echo +} + +run_cmd() +{ + local cmd="$1" + local out + local stderr="2>/dev/null" + + if [ "$VERBOSE" = "1" ]; then + printf "COMMAND: $cmd\n" + stderr= + fi + + out=$(eval $cmd $stderr) + rc=$? + if [ "$VERBOSE" = "1" -a -n "$out" ]; then + echo " $out" + fi + + return $rc +} + +tc_check_packets() +{ + local ns=$1; shift + local id=$1; shift + local handle=$1; shift + local count=$1; shift + local pkts + + sleep 0.1 + pkts=$(tc -n $ns -j -s filter show $id \ + | jq ".[] | select(.options.handle == $handle) | \ + .options.actions[0].stats.packets") + [[ $pkts == $count ]] +} + +################################################################################ +# Setup + +setup_common_ns() +{ + local ns=$1; shift + local local_addr=$1; shift + + ip netns exec $ns sysctl -qw net.ipv4.ip_forward=1 + ip netns exec $ns sysctl -qw net.ipv4.fib_multipath_use_neigh=1 + ip netns exec $ns sysctl -qw net.ipv4.conf.default.ignore_routes_with_linkdown=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.all.forwarding=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.default.forwarding=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.default.ignore_routes_with_linkdown=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.all.accept_dad=0 + ip netns exec $ns sysctl -qw net.ipv6.conf.default.accept_dad=0 + + ip -n $ns link set dev lo up + ip -n $ns address add $local_addr dev lo + + ip -n $ns link set dev veth0 up + + ip -n $ns link add name br0 up type bridge vlan_filtering 1 \ + vlan_default_pvid 0 mcast_snooping 0 + + ip -n $ns link add link br0 name br0.10 up type vlan id 10 + bridge -n $ns vlan add vid 10 dev br0 self + + ip -n $ns link add link br0 name br0.20 up type vlan id 20 + bridge -n $ns vlan add vid 20 dev br0 self + + ip -n $ns link add link br0 name br0.4000 up type vlan id 4000 + bridge -n $ns vlan add vid 4000 dev br0 self + + ip -n $ns link add name vx0 up master br0 type vxlan \ + local $local_addr dstport 4789 external vnifilter + bridge -n $ns link set dev vx0 vlan_tunnel on + + bridge -n $ns vlan add vid 10 dev vx0 + bridge -n $ns vlan add vid 10 dev vx0 tunnel_info id 10010 + bridge -n $ns vni add vni 10010 dev vx0 + + bridge -n $ns vlan add vid 20 dev vx0 + bridge -n $ns vlan add vid 20 dev vx0 tunnel_info id 10020 + bridge -n $ns vni add vni 10020 dev vx0 + + bridge -n $ns vlan add vid 4000 dev vx0 pvid + bridge -n $ns vlan add vid 4000 dev vx0 tunnel_info id 14000 + bridge -n $ns vni add vni 14000 dev vx0 +} + +setup_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local local_addr1=$1; shift + local local_addr2=$1; shift + + ip netns add $ns1 + ip netns add $ns2 + + ip link add name veth0 type veth peer name veth1 + ip link set dev veth0 netns $ns1 name veth0 + ip link set dev veth1 netns $ns2 name veth0 + + setup_common_ns $ns1 $local_addr1 + setup_common_ns $ns2 $local_addr2 +} + +setup_v4() +{ + setup_common ns1_v4 ns2_v4 192.0.2.1 192.0.2.2 + + ip -n ns1_v4 address add 192.0.2.17/28 dev veth0 + ip -n ns2_v4 address add 192.0.2.18/28 dev veth0 + + ip -n ns1_v4 route add default via 192.0.2.18 + ip -n ns2_v4 route add default via 192.0.2.17 +} + +cleanup_v4() +{ + ip netns del ns2_v4 + ip netns del ns1_v4 +} + +setup_v6() +{ + setup_common ns1_v6 ns2_v6 2001:db8:1::1 2001:db8:1::2 + + ip -n ns1_v6 address add 2001:db8:2::1/64 dev veth0 nodad + ip -n ns2_v6 address add 2001:db8:2::2/64 dev veth0 nodad + + ip -n ns1_v6 route add default via 2001:db8:2::2 + ip -n ns2_v6 route add default via 2001:db8:2::1 +} + +cleanup_v6() +{ + ip netns del ns2_v6 + ip netns del ns1_v6 +} + +setup() +{ + set -e + + setup_v4 + setup_v6 + + sleep 5 + + set +e +} + +cleanup() +{ + cleanup_v6 &> /dev/null + cleanup_v4 &> /dev/null +} + +################################################################################ +# Tests - Control path + +basic_common() +{ + local ns1=$1; shift + local grp_key=$1; shift + local vtep_ip=$1; shift + + # Test basic control path operations common to all MDB entry types. + + # Basic add, replace and delete behavior. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + log_test $? 0 "MDB entry addition" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\"" + log_test $? 0 "MDB entry presence after addition" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + log_test $? 0 "MDB entry replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\"" + log_test $? 0 "MDB entry presence after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + log_test $? 0 "MDB entry deletion" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\"" + log_test $? 1 "MDB entry presence after deletion" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + log_test $? 255 "Non-existent MDB entry deletion" + + # Default protocol and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \"proto static\"" + log_test $? 0 "MDB entry default protocol" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 $grp_key permanent proto 123 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \"proto 123\"" + log_test $? 0 "MDB entry protocol replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + + # Default destination port and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \" dst_port \"" + log_test $? 1 "MDB entry default destination port" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 $grp_key permanent dst $vtep_ip dst_port 1234 src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \"dst_port 1234\"" + log_test $? 0 "MDB entry destination port replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + + # Default destination VNI and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \" vni \"" + log_test $? 1 "MDB entry default destination VNI" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 $grp_key permanent dst $vtep_ip vni 1234 src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \"vni 1234\"" + log_test $? 0 "MDB entry destination VNI replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + + # Default outgoing interface and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \" via \"" + log_test $? 1 "MDB entry default outgoing interface" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010 via veth0" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep \"$grp_key\" | grep \"via veth0\"" + log_test $? 0 "MDB entry outgoing interface replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" + + # Common error cases. + run_cmd "bridge -n $ns1 mdb add dev vx0 port veth0 $grp_key permanent dst $vtep_ip src_vni 10010" + log_test $? 255 "MDB entry with mismatch between device and port" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key temp dst $vtep_ip src_vni 10010" + log_test $? 255 "MDB entry with temp state" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent vid 10 dst $vtep_ip src_vni 10010" + log_test $? 255 "MDB entry with VLAN" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp 01:02:03:04:05:06 permanent dst $vtep_ip src_vni 10010" + log_test $? 255 "MDB entry MAC address" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent" + log_test $? 255 "MDB entry without extended parameters" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent proto 3 dst $vtep_ip src_vni 10010" + log_test $? 255 "MDB entry with an invalid protocol" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip vni $((2 ** 24)) src_vni 10010" + log_test $? 255 "MDB entry with an invalid destination VNI" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni $((2 ** 24))" + log_test $? 255 "MDB entry with an invalid source VNI" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent src_vni 10010" + log_test $? 255 "MDB entry without a remote destination IP" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 $grp_key permanent dst $vtep_ip src_vni 10010" + log_test $? 255 "Duplicate MDB entries" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 $grp_key dst $vtep_ip src_vni 10010" +} + +basic_star_g_ipv4_ipv4() +{ + local ns1=ns1_v4 + local grp_key="grp 239.1.1.1" + local vtep_ip=198.51.100.100 + + echo + echo "Control path: Basic (*, G) operations - IPv4 overlay / IPv4 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_star_g_ipv6_ipv4() +{ + local ns1=ns1_v4 + local grp_key="grp ff0e::1" + local vtep_ip=198.51.100.100 + + echo + echo "Control path: Basic (*, G) operations - IPv6 overlay / IPv4 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_star_g_ipv4_ipv6() +{ + local ns1=ns1_v6 + local grp_key="grp 239.1.1.1" + local vtep_ip=2001:db8:1000::1 + + echo + echo "Control path: Basic (*, G) operations - IPv4 overlay / IPv6 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_star_g_ipv6_ipv6() +{ + local ns1=ns1_v6 + local grp_key="grp ff0e::1" + local vtep_ip=2001:db8:1000::1 + + echo + echo "Control path: Basic (*, G) operations - IPv6 overlay / IPv6 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_sg_ipv4_ipv4() +{ + local ns1=ns1_v4 + local grp_key="grp 239.1.1.1 src 192.0.2.129" + local vtep_ip=198.51.100.100 + + echo + echo "Control path: Basic (S, G) operations - IPv4 overlay / IPv4 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_sg_ipv6_ipv4() +{ + local ns1=ns1_v4 + local grp_key="grp ff0e::1 src 2001:db8:100::1" + local vtep_ip=198.51.100.100 + + echo + echo "Control path: Basic (S, G) operations - IPv6 overlay / IPv4 underlay" + echo "---------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_sg_ipv4_ipv6() +{ + local ns1=ns1_v6 + local grp_key="grp 239.1.1.1 src 192.0.2.129" + local vtep_ip=2001:db8:1000::1 + + echo + echo "Control path: Basic (S, G) operations - IPv4 overlay / IPv6 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +basic_sg_ipv6_ipv6() +{ + local ns1=ns1_v6 + local grp_key="grp ff0e::1 src 2001:db8:100::1" + local vtep_ip=2001:db8:1000::1 + + echo + echo "Control path: Basic (S, G) operations - IPv6 overlay / IPv6 underlay" + echo "--------------------------------------------------------------------" + + basic_common $ns1 "$grp_key" $vtep_ip +} + +star_g_common() +{ + local ns1=$1; shift + local grp=$1; shift + local src1=$1; shift + local src2=$1; shift + local src3=$1; shift + local vtep_ip=$1; shift + local all_zeros_grp=$1; shift + + # Test control path operations specific to (*, G) entries. + + # Basic add, replace and delete behavior. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + log_test $? 0 "(*, G) MDB entry addition with source list" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \"" + log_test $? 0 "(*, G) MDB entry presence after addition" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry presence after addition" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + log_test $? 0 "(*, G) MDB entry replacement with source list" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \"" + log_test $? 0 "(*, G) MDB entry presence after replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry presence after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + log_test $? 0 "(*, G) MDB entry deletion" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \"" + log_test $? 1 "(*, G) MDB entry presence after deletion" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 1 "(S, G) MDB entry presence after deletion" + + # Default filter mode and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep exclude" + log_test $? 0 "(*, G) MDB entry default filter mode" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep include" + log_test $? 0 "(*, G) MDB entry after replacing filter mode to \"include\"" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry after replacing filter mode to \"include\"" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\" | grep blocked" + log_test $? 1 "\"blocked\" flag after replacing filter mode to \"include\"" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep exclude" + log_test $? 0 "(*, G) MDB entry after replacing filter mode to \"exclude\"" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry after replacing filter mode to \"exclude\"" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\" | grep blocked" + log_test $? 0 "\"blocked\" flag after replacing filter mode to \"exclude\"" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Default source list and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep source_list" + log_test $? 1 "(*, G) MDB entry default source list" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1,$src2,$src3 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry of 1st source after replacing source list" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src2\"" + log_test $? 0 "(S, G) MDB entry of 2nd source after replacing source list" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src3\"" + log_test $? 0 "(S, G) MDB entry of 3rd source after replacing source list" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1,$src3 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src1\"" + log_test $? 0 "(S, G) MDB entry of 1st source after removing source" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src2\"" + log_test $? 1 "(S, G) MDB entry of 2nd source after removing source" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \"src $src3\"" + log_test $? 0 "(S, G) MDB entry of 3rd source after removing source" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Default protocol and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \"proto static\"" + log_test $? 0 "(*, G) MDB entry default protocol" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \"proto static\"" + log_test $? 0 "(S, G) MDB entry default protocol" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 proto bgp dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \"proto bgp\"" + log_test $? 0 "(*, G) MDB entry protocol after replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \"proto bgp\"" + log_test $? 0 "(S, G) MDB entry protocol after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Default destination port and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" dst_port \"" + log_test $? 1 "(*, G) MDB entry default destination port" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" dst_port \"" + log_test $? 1 "(S, G) MDB entry default destination port" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip dst_port 1234 src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" dst_port 1234 \"" + log_test $? 0 "(*, G) MDB entry destination port after replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" dst_port 1234 \"" + log_test $? 0 "(S, G) MDB entry destination port after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Default destination VNI and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" vni \"" + log_test $? 1 "(*, G) MDB entry default destination VNI" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" vni \"" + log_test $? 1 "(S, G) MDB entry default destination VNI" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip vni 1234 src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" vni 1234 \"" + log_test $? 0 "(*, G) MDB entry destination VNI after replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" vni 1234 \"" + log_test $? 0 "(S, G) MDB entry destination VNI after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Default outgoing interface and replacement. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" via \"" + log_test $? 1 "(*, G) MDB entry default outgoing interface" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" via \"" + log_test $? 1 "(S, G) MDB entry default outgoing interface" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $src1 dst $vtep_ip src_vni 10010 via veth0" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep -v \" src \" | grep \" via veth0 \"" + log_test $? 0 "(*, G) MDB entry outgoing interface after replacement" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep \" src \" | grep \" via veth0 \"" + log_test $? 0 "(S, G) MDB entry outgoing interface after replacement" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep_ip src_vni 10010" + + # Error cases. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $all_zeros_grp permanent filter_mode exclude dst $vtep_ip src_vni 10010" + log_test $? 255 "All-zeros group with filter mode" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $all_zeros_grp permanent source_list $src1 dst $vtep_ip src_vni 10010" + log_test $? 255 "All-zeros group with source list" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode include dst $vtep_ip src_vni 10010" + log_test $? 255 "(*, G) INCLUDE with an empty source list" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $grp dst $vtep_ip src_vni 10010" + log_test $? 255 "Invalid source in source list" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp permanent source_list $src1 dst $vtep_ip src_vni 10010" + log_test $? 255 "Source list without filter mode" +} + +star_g_ipv4_ipv4() +{ + local ns1=ns1_v4 + local grp=239.1.1.1 + local src1=192.0.2.129 + local src2=192.0.2.130 + local src3=192.0.2.131 + local vtep_ip=198.51.100.100 + local all_zeros_grp=0.0.0.0 + + echo + echo "Control path: (*, G) operations - IPv4 overlay / IPv4 underlay" + echo "--------------------------------------------------------------" + + star_g_common $ns1 $grp $src1 $src2 $src3 $vtep_ip $all_zeros_grp +} + +star_g_ipv6_ipv4() +{ + local ns1=ns1_v4 + local grp=ff0e::1 + local src1=2001:db8:100::1 + local src2=2001:db8:100::2 + local src3=2001:db8:100::3 + local vtep_ip=198.51.100.100 + local all_zeros_grp=:: + + echo + echo "Control path: (*, G) operations - IPv6 overlay / IPv4 underlay" + echo "--------------------------------------------------------------" + + star_g_common $ns1 $grp $src1 $src2 $src3 $vtep_ip $all_zeros_grp +} + +star_g_ipv4_ipv6() +{ + local ns1=ns1_v6 + local grp=239.1.1.1 + local src1=192.0.2.129 + local src2=192.0.2.130 + local src3=192.0.2.131 + local vtep_ip=2001:db8:1000::1 + local all_zeros_grp=0.0.0.0 + + echo + echo "Control path: (*, G) operations - IPv4 overlay / IPv6 underlay" + echo "--------------------------------------------------------------" + + star_g_common $ns1 $grp $src1 $src2 $src3 $vtep_ip $all_zeros_grp +} + +star_g_ipv6_ipv6() +{ + local ns1=ns1_v6 + local grp=ff0e::1 + local src1=2001:db8:100::1 + local src2=2001:db8:100::2 + local src3=2001:db8:100::3 + local vtep_ip=2001:db8:1000::1 + local all_zeros_grp=:: + + echo + echo "Control path: (*, G) operations - IPv6 overlay / IPv6 underlay" + echo "--------------------------------------------------------------" + + star_g_common $ns1 $grp $src1 $src2 $src3 $vtep_ip $all_zeros_grp +} + +sg_common() +{ + local ns1=$1; shift + local grp=$1; shift + local src=$1; shift + local vtep_ip=$1; shift + local all_zeros_grp=$1; shift + + # Test control path operations specific to (S, G) entries. + + # Default filter mode. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp src $src permanent dst $vtep_ip src_vni 10010" + run_cmd "bridge -n $ns1 -d -s mdb show dev vx0 | grep $grp | grep include" + log_test $? 0 "(S, G) MDB entry default filter mode" + + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp src $src permanent dst $vtep_ip src_vni 10010" + + # Error cases. + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp src $src permanent filter_mode include dst $vtep_ip src_vni 10010" + log_test $? 255 "(S, G) with filter mode" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp src $src permanent source_list $src dst $vtep_ip src_vni 10010" + log_test $? 255 "(S, G) with source list" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp src $grp permanent dst $vtep_ip src_vni 10010" + log_test $? 255 "(S, G) with an invalid source list" + + run_cmd "bridge -n $ns1 mdb add dev vx0 port vx0 grp $all_zeros_grp src $src permanent dst $vtep_ip src_vni 10010" + log_test $? 255 "All-zeros group with source" +} + +sg_ipv4_ipv4() +{ + local ns1=ns1_v4 + local grp=239.1.1.1 + local src=192.0.2.129 + local vtep_ip=198.51.100.100 + local all_zeros_grp=0.0.0.0 + + echo + echo "Control path: (S, G) operations - IPv4 overlay / IPv4 underlay" + echo "--------------------------------------------------------------" + + sg_common $ns1 $grp $src $vtep_ip $all_zeros_grp +} + +sg_ipv6_ipv4() +{ + local ns1=ns1_v4 + local grp=ff0e::1 + local src=2001:db8:100::1 + local vtep_ip=198.51.100.100 + local all_zeros_grp=:: + + echo + echo "Control path: (S, G) operations - IPv6 overlay / IPv4 underlay" + echo "--------------------------------------------------------------" + + sg_common $ns1 $grp $src $vtep_ip $all_zeros_grp +} + +sg_ipv4_ipv6() +{ + local ns1=ns1_v6 + local grp=239.1.1.1 + local src=192.0.2.129 + local vtep_ip=2001:db8:1000::1 + local all_zeros_grp=0.0.0.0 + + echo + echo "Control path: (S, G) operations - IPv4 overlay / IPv6 underlay" + echo "--------------------------------------------------------------" + + sg_common $ns1 $grp $src $vtep_ip $all_zeros_grp +} + +sg_ipv6_ipv6() +{ + local ns1=ns1_v6 + local grp=ff0e::1 + local src=2001:db8:100::1 + local vtep_ip=2001:db8:1000::1 + local all_zeros_grp=:: + + echo + echo "Control path: (S, G) operations - IPv6 overlay / IPv6 underlay" + echo "--------------------------------------------------------------" + + sg_common $ns1 $grp $src $vtep_ip $all_zeros_grp +} + +ipv4_grps_get() +{ + local max_grps=$1; shift + local i + + for i in $(seq 0 $((max_grps - 1))); do + echo "239.1.1.$i" + done +} + +ipv6_grps_get() +{ + local max_grps=$1; shift + local i + + for i in $(seq 0 $((max_grps - 1))); do + echo "ff0e::$(printf %x $i)" + done +} + +dump_common() +{ + local ns1=$1; shift + local local_addr=$1; shift + local remote_prefix=$1; shift + local fn=$1; shift + local max_vxlan_devs=2 + local max_remotes=64 + local max_grps=256 + local num_entries + local batch_file + local grp + local i j + + # The kernel maintains various markers for the MDB dump. Add a test for + # large scale MDB dump to make sure that all the configured entries are + # dumped and that the markers are used correctly. + + # Create net devices. + for i in $(seq 1 $max_vxlan_devs); do + ip -n $ns1 link add name vx-test${i} up type vxlan \ + local $local_addr dstport 4789 external vnifilter + done + + # Create batch file with MDB entries. + batch_file=$(mktemp) + for i in $(seq 1 $max_vxlan_devs); do + for j in $(seq 1 $max_remotes); do + for grp in $($fn $max_grps); do + echo "mdb add dev vx-test${i} port vx-test${i} grp $grp permanent dst ${remote_prefix}${j}" >> $batch_file + done + done + done + + # Program the batch file and check for expected number of entries. + bridge -n $ns1 -b $batch_file + for i in $(seq 1 $max_vxlan_devs); do + num_entries=$(bridge -n $ns1 mdb show dev vx-test${i} | grep "permanent" | wc -l) + [[ $num_entries -eq $((max_grps * max_remotes)) ]] + log_test $? 0 "Large scale dump - VXLAN device #$i" + done + + rm -rf $batch_file +} + +dump_ipv4_ipv4() +{ + local ns1=ns1_v4 + local local_addr=192.0.2.1 + local remote_prefix=198.51.100. + local fn=ipv4_grps_get + + echo + echo "Control path: Large scale MDB dump - IPv4 overlay / IPv4 underlay" + echo "-----------------------------------------------------------------" + + dump_common $ns1 $local_addr $remote_prefix $fn +} + +dump_ipv6_ipv4() +{ + local ns1=ns1_v4 + local local_addr=192.0.2.1 + local remote_prefix=198.51.100. + local fn=ipv6_grps_get + + echo + echo "Control path: Large scale MDB dump - IPv6 overlay / IPv4 underlay" + echo "-----------------------------------------------------------------" + + dump_common $ns1 $local_addr $remote_prefix $fn +} + +dump_ipv4_ipv6() +{ + local ns1=ns1_v6 + local local_addr=2001:db8:1::1 + local remote_prefix=2001:db8:1000:: + local fn=ipv4_grps_get + + echo + echo "Control path: Large scale MDB dump - IPv4 overlay / IPv6 underlay" + echo "-----------------------------------------------------------------" + + dump_common $ns1 $local_addr $remote_prefix $fn +} + +dump_ipv6_ipv6() +{ + local ns1=ns1_v6 + local local_addr=2001:db8:1::1 + local remote_prefix=2001:db8:1000:: + local fn=ipv6_grps_get + + echo + echo "Control path: Large scale MDB dump - IPv6 overlay / IPv6 underlay" + echo "-----------------------------------------------------------------" + + dump_common $ns1 $local_addr $remote_prefix $fn +} + +################################################################################ +# Tests - Data path + +encap_params_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local plen=$1; shift + local enc_ethtype=$1; shift + local grp=$1; shift + local src=$1; shift + local mz=$1; shift + + # Test that packets forwarded by the VXLAN MDB are encapsulated with + # the correct parameters. Transmit packets from the first namespace and + # check that they hit the corresponding filters on the ingress of the + # second namespace. + + run_cmd "tc -n $ns2 qdisc replace dev veth0 clsact" + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "ip -n $ns2 address replace $vtep1_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep2_ip/$plen dev lo" + + # Check destination IP. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep2_ip src_vni 10020" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Destination IP - match" + + run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Destination IP - no match" + + run_cmd "tc -n $ns2 filter del dev vx0 ingress pref 1 handle 101 flower" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10020" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010" + + # Check destination port. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip dst_port 1111 src_vni 10020" + + run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 4789 action pass" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev veth0 ingress" 101 1 + log_test $? 0 "Default destination port - match" + + run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev veth0 ingress" 101 1 + log_test $? 0 "Default destination port - no match" + + run_cmd "tc -n $ns2 filter replace dev veth0 ingress pref 1 handle 101 proto $enc_ethtype flower ip_proto udp dst_port 1111 action pass" + run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev veth0 ingress" 101 1 + log_test $? 0 "Non-default destination port - match" + + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev veth0 ingress" 101 1 + log_test $? 0 "Non-default destination port - no match" + + run_cmd "tc -n $ns2 filter del dev veth0 ingress pref 1 handle 101 flower" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10020" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010" + + # Check default VNI. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10020" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10010 action pass" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Default destination VNI - match" + + run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Default destination VNI - no match" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip vni 10020 src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip vni 10010 src_vni 10020" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_key_id 10020 action pass" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Non-default destination VNI - match" + + run_cmd "ip netns exec $ns1 $mz br0.20 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Non-default destination VNI - no match" + + run_cmd "tc -n $ns2 filter del dev vx0 ingress pref 1 handle 101 flower" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10020" + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010" +} + +encap_params_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local enc_ethtype="ip" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: Encapsulation parameters - IPv4 overlay / IPv4 underlay" + echo "------------------------------------------------------------------" + + encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \ + $grp $src "mausezahn" +} + +encap_params_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local enc_ethtype="ip" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: Encapsulation parameters - IPv6 overlay / IPv4 underlay" + echo "------------------------------------------------------------------" + + encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \ + $grp $src "mausezahn -6" +} + +encap_params_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local enc_ethtype="ipv6" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: Encapsulation parameters - IPv4 overlay / IPv6 underlay" + echo "------------------------------------------------------------------" + + encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \ + $grp $src "mausezahn" +} + +encap_params_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local enc_ethtype="ipv6" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: Encapsulation parameters - IPv6 overlay / IPv6 underlay" + echo "------------------------------------------------------------------" + + encap_params_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $enc_ethtype \ + $grp $src "mausezahn -6" +} + +starg_exclude_ir_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local plen=$1; shift + local grp=$1; shift + local valid_src=$1; shift + local invalid_src=$1; shift + local mz=$1; shift + + # Install a (*, G) EXCLUDE MDB entry with one source and two remote + # VTEPs. Make sure that the source in the source list is not forwarded + # and that a source not in the list is forwarded. Remove one of the + # VTEPs from the entry and make sure that packets are only forwarded to + # the remaining VTEP. + + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "ip -n $ns2 address replace $vtep1_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep2_ip/$plen dev lo" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 102 proto all flower enc_dst_ip $vtep2_ip action pass" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $vtep2_ip src_vni 10010" + + # Check that invalid source is not forwarded to any VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 0 + log_test $? 0 "Block excluded source - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 0 + log_test $? 0 "Block excluded source - second VTEP" + + # Check that valid source is forwarded to both VTEPs. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Forward valid source - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Forward valid source - second VTEP" + + # Remove second VTEP. + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010" + + # Check that invalid source is not forwarded to any VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Block excluded source after removal - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Block excluded source after removal - second VTEP" + + # Check that valid source is forwarded to the remaining VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Forward valid source after removal - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Forward valid source after removal - second VTEP" +} + +starg_exclude_ir_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) EXCLUDE - IR - IPv4 overlay / IPv4 underlay" + echo "-------------------------------------------------------------" + + starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_exclude_ir_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) EXCLUDE - IR - IPv6 overlay / IPv4 underlay" + echo "-------------------------------------------------------------" + + starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_exclude_ir_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) EXCLUDE - IR - IPv4 overlay / IPv6 underlay" + echo "-------------------------------------------------------------" + + starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_exclude_ir_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) EXCLUDE - IR - IPv6 overlay / IPv6 underlay" + echo "-------------------------------------------------------------" + + starg_exclude_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_include_ir_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local plen=$1; shift + local grp=$1; shift + local valid_src=$1; shift + local invalid_src=$1; shift + local mz=$1; shift + + # Install a (*, G) INCLUDE MDB entry with one source and two remote + # VTEPs. Make sure that the source in the source list is forwarded and + # that a source not in the list is not forwarded. Remove one of the + # VTEPs from the entry and make sure that packets are only forwarded to + # the remaining VTEP. + + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "ip -n $ns2 address replace $vtep1_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep2_ip/$plen dev lo" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 102 proto all flower enc_dst_ip $vtep2_ip action pass" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $vtep2_ip src_vni 10010" + + # Check that invalid source is not forwarded to any VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 0 + log_test $? 0 "Block excluded source - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 0 + log_test $? 0 "Block excluded source - second VTEP" + + # Check that valid source is forwarded to both VTEPs. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Forward valid source - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Forward valid source - second VTEP" + + # Remove second VTEP. + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep2_ip src_vni 10010" + + # Check that invalid source is not forwarded to any VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Block excluded source after removal - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Block excluded source after removal - second VTEP" + + # Check that valid source is forwarded to the remaining VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Forward valid source after removal - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Forward valid source after removal - second VTEP" +} + +starg_include_ir_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) INCLUDE - IR - IPv4 overlay / IPv4 underlay" + echo "-------------------------------------------------------------" + + starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_include_ir_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) INCLUDE - IR - IPv6 overlay / IPv4 underlay" + echo "-------------------------------------------------------------" + + starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_include_ir_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) INCLUDE - IR - IPv4 overlay / IPv6 underlay" + echo "-------------------------------------------------------------" + + starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_include_ir_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) INCLUDE - IR - IPv6 overlay / IPv6 underlay" + echo "-------------------------------------------------------------" + + starg_include_ir_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_exclude_p2mp_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local mcast_grp=$1; shift + local plen=$1; shift + local grp=$1; shift + local valid_src=$1; shift + local invalid_src=$1; shift + local mz=$1; shift + + # Install a (*, G) EXCLUDE MDB entry with one source and one multicast + # group to which packets are sent. Make sure that the source in the + # source list is not forwarded and that a source not in the list is + # forwarded. + + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "ip -n $ns2 address replace $mcast_grp/$plen dev veth0 autojoin" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $mcast_grp action pass" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode exclude source_list $invalid_src dst $mcast_grp src_vni 10010 via veth0" + + # Check that invalid source is not forwarded. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 0 + log_test $? 0 "Block excluded source" + + # Check that valid source is forwarded. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Forward valid source" + + # Remove the VTEP from the multicast group. + run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0" + + # Check that valid source is not received anymore. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Receive of valid source after removal from group" +} + +starg_exclude_p2mp_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv4 underlay" + echo "---------------------------------------------------------------" + + starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_exclude_p2mp_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv4 underlay" + echo "---------------------------------------------------------------" + + starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_exclude_p2mp_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) EXCLUDE - P2MP - IPv4 overlay / IPv6 underlay" + echo "---------------------------------------------------------------" + + starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_exclude_p2mp_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) EXCLUDE - P2MP - IPv6 overlay / IPv6 underlay" + echo "---------------------------------------------------------------" + + starg_exclude_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_include_p2mp_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local mcast_grp=$1; shift + local plen=$1; shift + local grp=$1; shift + local valid_src=$1; shift + local invalid_src=$1; shift + local mz=$1; shift + + # Install a (*, G) INCLUDE MDB entry with one source and one multicast + # group to which packets are sent. Make sure that the source in the + # source list is forwarded and that a source not in the list is not + # forwarded. + + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "ip -n $ns2 address replace $mcast_grp/$plen dev veth0 autojoin" + + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $mcast_grp action pass" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent filter_mode include source_list $valid_src dst $mcast_grp src_vni 10010 via veth0" + + # Check that invalid source is not forwarded. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $invalid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 0 + log_test $? 0 "Block excluded source" + + # Check that valid source is forwarded. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Forward valid source" + + # Remove the VTEP from the multicast group. + run_cmd "ip -n $ns2 address del $mcast_grp/$plen dev veth0" + + # Check that valid source is not received anymore. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $valid_src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Receive of valid source after removal from group" +} + +starg_include_p2mp_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv4 underlay" + echo "---------------------------------------------------------------" + + starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_include_p2mp_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv4 underlay" + echo "---------------------------------------------------------------" + + starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +starg_include_p2mp_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local grp=239.1.1.1 + local valid_src=192.0.2.129 + local invalid_src=192.0.2.145 + + echo + echo "Data path: (*, G) INCLUDE - P2MP - IPv4 overlay / IPv6 underlay" + echo "---------------------------------------------------------------" + + starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn" +} + +starg_include_p2mp_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local grp=ff0e::1 + local valid_src=2001:db8:100::1 + local invalid_src=2001:db8:200::1 + + echo + echo "Data path: (*, G) INCLUDE - P2MP - IPv6 overlay / IPv6 underlay" + echo "---------------------------------------------------------------" + + starg_include_p2mp_common $ns1 $ns2 $mcast_grp $plen $grp \ + $valid_src $invalid_src "mausezahn -6" +} + +egress_vni_translation_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local mcast_grp=$1; shift + local plen=$1; shift + local proto=$1; shift + local grp=$1; shift + local src=$1; shift + local mz=$1; shift + + # When P2MP tunnels are used with optimized inter-subnet multicast + # (OISM) [1], the ingress VTEP does not perform VNI translation and + # uses the VNI of the source broadcast domain (BD). If the egress VTEP + # is a member in the source BD, then no VNI translation is needed. + # Otherwise, the egress VTEP needs to translate the VNI to the + # supplementary broadcast domain (SBD) VNI, which is usually the L3VNI. + # + # In this test, remove the VTEP in the second namespace from VLAN 10 + # (VNI 10010) and make sure that a packet sent from this VLAN on the + # first VTEP is received by the SVI corresponding to the L3VNI (14000 / + # VLAN 4000) on the second VTEP. + # + # The second VTEP will be able to decapsulate the packet with VNI 10010 + # because this VNI is configured on its shared VXLAN device. Later, + # when ingressing the bridge, the VNI to VLAN lookup will fail because + # the VTEP is not a member in VLAN 10, which will cause the packet to + # be tagged with VLAN 4000 since it is configured as PVID. + # + # [1] https://datatracker.ietf.org/doc/html/draft-ietf-bess-evpn-irb-mcast + + run_cmd "tc -n $ns2 qdisc replace dev br0.4000 clsact" + run_cmd "ip -n $ns2 address replace $mcast_grp/$plen dev veth0 autojoin" + run_cmd "tc -n $ns2 filter replace dev br0.4000 ingress pref 1 handle 101 proto $proto flower src_ip $src dst_ip $grp action pass" + + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp src $src permanent dst $mcast_grp src_vni 10010 via veth0" + + # Remove the second VTEP from VLAN 10. + run_cmd "bridge -n $ns2 vlan del vid 10 dev vx0" + + # Make sure that packets sent from the first VTEP over VLAN 10 are + # received by the SVI corresponding to the L3VNI (14000 / VLAN 4000) on + # the second VTEP, since it is configured as PVID. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1 + log_test $? 0 "Egress VNI translation - PVID configured" + + # Remove PVID flag from VLAN 4000 on the second VTEP and make sure + # packets are no longer received by the SVI interface. + run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev br0.4000 ingress" 101 1 + log_test $? 0 "Egress VNI translation - no PVID configured" + + # Reconfigure the PVID and make sure packets are received again. + run_cmd "bridge -n $ns2 vlan add vid 4000 dev vx0 pvid" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev br0.4000 ingress" 101 2 + log_test $? 0 "Egress VNI translation - PVID reconfigured" +} + +egress_vni_translation_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local proto="ipv4" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: Egress VNI translation - IPv4 overlay / IPv4 underlay" + echo "----------------------------------------------------------------" + + egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \ + $src "mausezahn" +} + +egress_vni_translation_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local mcast_grp=238.1.1.1 + local plen=32 + local proto="ipv6" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: Egress VNI translation - IPv6 overlay / IPv4 underlay" + echo "----------------------------------------------------------------" + + egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \ + $src "mausezahn -6" +} + +egress_vni_translation_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local proto="ipv4" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: Egress VNI translation - IPv4 overlay / IPv6 underlay" + echo "----------------------------------------------------------------" + + egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \ + $src "mausezahn" +} + +egress_vni_translation_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local mcast_grp=ff0e::2 + local plen=128 + local proto="ipv6" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: Egress VNI translation - IPv6 overlay / IPv6 underlay" + echo "----------------------------------------------------------------" + + egress_vni_translation_common $ns1 $ns2 $mcast_grp $plen $proto $grp \ + $src "mausezahn -6" +} + +all_zeros_mdb_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local vtep3_ip=$1; shift + local vtep4_ip=$1; shift + local plen=$1; shift + local ipv4_grp=239.1.1.1 + local ipv4_unreg_grp=239.2.2.2 + local ipv4_ll_grp=224.0.0.100 + local ipv4_src=192.0.2.129 + local ipv6_grp=ff0e::1 + local ipv6_unreg_grp=ff0e::2 + local ipv6_ll_grp=ff02::1 + local ipv6_src=2001:db8:100::1 + + # Install all-zeros (catchall) MDB entries for IPv4 and IPv6 traffic + # and make sure they only forward unregistered IP multicast traffic + # which is not link-local. Also make sure that each entry only forwards + # traffic from the matching address family. + + # Associate two different VTEPs with one all-zeros MDB entry: Two with + # the IPv4 entry (0.0.0.0) and another two with the IPv6 one (::). + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp 0.0.0.0 permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp 0.0.0.0 permanent dst $vtep2_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp :: permanent dst $vtep3_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp :: permanent dst $vtep4_ip src_vni 10010" + + # Associate one VTEP from each set with a regular MDB entry: One with + # an IPv4 entry and another with an IPv6 one. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $ipv4_grp permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $ipv6_grp permanent dst $vtep3_ip src_vni 10010" + + # Add filters to match on decapsulated traffic in the second namespace. + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 102 proto all flower enc_dst_ip $vtep2_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 103 proto all flower enc_dst_ip $vtep3_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 104 proto all flower enc_dst_ip $vtep4_ip action pass" + + # Configure the VTEP addresses in the second namespace to enable + # decapsulation. + run_cmd "ip -n $ns2 address replace $vtep1_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep2_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep3_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep4_ip/$plen dev lo" + + # Send registered IPv4 multicast and make sure it only arrives to the + # first VTEP. + run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Registered IPv4 multicast - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 0 + log_test $? 0 "Registered IPv4 multicast - second VTEP" + + # Send unregistered IPv4 multicast that is not link-local and make sure + # it arrives to the first and second VTEPs. + run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Unregistered IPv4 multicast - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Unregistered IPv4 multicast - second VTEP" + + # Send IPv4 link-local multicast traffic and make sure it does not + # arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn br0.10 -A $ipv4_src -B $ipv4_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Link-local IPv4 multicast - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Link-local IPv4 multicast - second VTEP" + + # Send registered IPv4 multicast using a unicast MAC address and make + # sure it does not arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b 00:11:22:33:44:55 -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Registered IPv4 multicast with a unicast MAC - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Registered IPv4 multicast with a unicast MAC - second VTEP" + + # Send registered IPv4 multicast using a broadcast MAC address and make + # sure it does not arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn br0.10 -a own -b bcast -A $ipv4_src -B $ipv4_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 2 + log_test $? 0 "Registered IPv4 multicast with a broadcast MAC - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Registered IPv4 multicast with a broadcast MAC - second VTEP" + + # Make sure IPv4 traffic did not reach the VTEPs associated with + # IPv6 entries. + tc_check_packets "$ns2" "dev vx0 ingress" 103 0 + log_test $? 0 "IPv4 traffic - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 0 + log_test $? 0 "IPv4 traffic - fourth VTEP" + + # Reset IPv4 filters before testing IPv6 traffic. + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto all flower enc_dst_ip $vtep1_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 102 proto all flower enc_dst_ip $vtep2_ip action pass" + + # Send registered IPv6 multicast and make sure it only arrives to the + # third VTEP. + run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 103 1 + log_test $? 0 "Registered IPv6 multicast - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 0 + log_test $? 0 "Registered IPv6 multicast - fourth VTEP" + + # Send unregistered IPv6 multicast that is not link-local and make sure + # it arrives to the third and fourth VTEPs. + run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_unreg_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 103 2 + log_test $? 0 "Unregistered IPv6 multicast - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 1 + log_test $? 0 "Unregistered IPv6 multicast - fourth VTEP" + + # Send IPv6 link-local multicast traffic and make sure it does not + # arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -A $ipv6_src -B $ipv6_ll_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 103 2 + log_test $? 0 "Link-local IPv6 multicast - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 1 + log_test $? 0 "Link-local IPv6 multicast - fourth VTEP" + + # Send registered IPv6 multicast using a unicast MAC address and make + # sure it does not arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b 00:11:22:33:44:55 -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 103 2 + log_test $? 0 "Registered IPv6 multicast with a unicast MAC - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 1 + log_test $? 0 "Registered IPv6 multicast with a unicast MAC - fourth VTEP" + + # Send registered IPv6 multicast using a broadcast MAC address and make + # sure it does not arrive to any VTEP. + run_cmd "ip netns exec $ns1 mausezahn -6 br0.10 -a own -b bcast -A $ipv6_src -B $ipv6_grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 103 2 + log_test $? 0 "Registered IPv6 multicast with a broadcast MAC - third VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 104 1 + log_test $? 0 "Registered IPv6 multicast with a broadcast MAC - fourth VTEP" + + # Make sure IPv6 traffic did not reach the VTEPs associated with + # IPv4 entries. + tc_check_packets "$ns2" "dev vx0 ingress" 101 0 + log_test $? 0 "IPv6 traffic - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 0 + log_test $? 0 "IPv6 traffic - second VTEP" +} + +all_zeros_mdb_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.101 + local vtep2_ip=198.51.100.102 + local vtep3_ip=198.51.100.103 + local vtep4_ip=198.51.100.104 + local plen=32 + + echo + echo "Data path: All-zeros MDB entry - IPv4 underlay" + echo "----------------------------------------------" + + all_zeros_mdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $vtep3_ip \ + $vtep4_ip $plen +} + +all_zeros_mdb_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local vtep3_ip=2001:db8:3000::1 + local vtep4_ip=2001:db8:4000::1 + local plen=128 + + echo + echo "Data path: All-zeros MDB entry - IPv6 underlay" + echo "----------------------------------------------" + + all_zeros_mdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $vtep3_ip \ + $vtep4_ip $plen +} + +mdb_fdb_common() +{ + local ns1=$1; shift + local ns2=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local plen=$1; shift + local proto=$1; shift + local grp=$1; shift + local src=$1; shift + local mz=$1; shift + + # Install an MDB entry and an FDB entry and make sure that the FDB + # entry only forwards traffic that was not forwarded by the MDB. + + # Associate the MDB entry with one VTEP and the FDB entry with another + # VTEP. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 fdb add 00:00:00:00:00:00 dev vx0 self static dst $vtep2_ip src_vni 10010" + + # Add filters to match on decapsulated traffic in the second namespace. + run_cmd "tc -n $ns2 qdisc replace dev vx0 clsact" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 101 proto $proto flower ip_proto udp dst_port 54321 enc_dst_ip $vtep1_ip action pass" + run_cmd "tc -n $ns2 filter replace dev vx0 ingress pref 1 handle 102 proto $proto flower ip_proto udp dst_port 54321 enc_dst_ip $vtep2_ip action pass" + + # Configure the VTEP addresses in the second namespace to enable + # decapsulation. + run_cmd "ip -n $ns2 address replace $vtep1_ip/$plen dev lo" + run_cmd "ip -n $ns2 address replace $vtep2_ip/$plen dev lo" + + # Send IP multicast traffic and make sure it is forwarded by the MDB + # and only arrives to the first VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "IP multicast - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 0 + log_test $? 0 "IP multicast - second VTEP" + + # Send broadcast traffic and make sure it is forwarded by the FDB and + # only arrives to the second VTEP. + run_cmd "ip netns exec $ns1 $mz br0.10 -a own -b bcast -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "Broadcast - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 1 + log_test $? 0 "Broadcast - second VTEP" + + # Remove the MDB entry and make sure that IP multicast is now forwarded + # by the FDB to the second VTEP. + run_cmd "bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp dst $vtep1_ip src_vni 10010" + run_cmd "ip netns exec $ns1 $mz br0.10 -A $src -B $grp -t udp sp=12345,dp=54321 -p 100 -c 1 -q" + tc_check_packets "$ns2" "dev vx0 ingress" 101 1 + log_test $? 0 "IP multicast after removal - first VTEP" + tc_check_packets "$ns2" "dev vx0 ingress" 102 2 + log_test $? 0 "IP multicast after removal - second VTEP" +} + +mdb_fdb_ipv4_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local proto="ipv4" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: MDB with FDB - IPv4 overlay / IPv4 underlay" + echo "------------------------------------------------------" + + mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \ + "mausezahn" +} + +mdb_fdb_ipv6_ipv4() +{ + local ns1=ns1_v4 + local ns2=ns2_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local plen=32 + local proto="ipv6" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: MDB with FDB - IPv6 overlay / IPv4 underlay" + echo "------------------------------------------------------" + + mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \ + "mausezahn -6" +} + +mdb_fdb_ipv4_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local proto="ipv4" + local grp=239.1.1.1 + local src=192.0.2.129 + + echo + echo "Data path: MDB with FDB - IPv4 overlay / IPv6 underlay" + echo "------------------------------------------------------" + + mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \ + "mausezahn" +} + +mdb_fdb_ipv6_ipv6() +{ + local ns1=ns1_v6 + local ns2=ns2_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local plen=128 + local proto="ipv6" + local grp=ff0e::1 + local src=2001:db8:100::1 + + echo + echo "Data path: MDB with FDB - IPv6 overlay / IPv6 underlay" + echo "------------------------------------------------------" + + mdb_fdb_common $ns1 $ns2 $vtep1_ip $vtep2_ip $plen $proto $grp $src \ + "mausezahn -6" +} + +mdb_grp1_loop() +{ + local ns1=$1; shift + local vtep1_ip=$1; shift + local grp1=$1; shift + + while true; do + bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp1 dst $vtep1_ip src_vni 10010 + bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp1 permanent dst $vtep1_ip src_vni 10010 + done >/dev/null 2>&1 +} + +mdb_grp2_loop() +{ + local ns1=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local grp2=$1; shift + + while true; do + bridge -n $ns1 mdb del dev vx0 port vx0 grp $grp2 dst $vtep1_ip src_vni 10010 + bridge -n $ns1 mdb add dev vx0 port vx0 grp $grp2 permanent dst $vtep1_ip src_vni 10010 + bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp2 permanent dst $vtep2_ip src_vni 10010 + done >/dev/null 2>&1 +} + +mdb_torture_common() +{ + local ns1=$1; shift + local vtep1_ip=$1; shift + local vtep2_ip=$1; shift + local grp1=$1; shift + local grp2=$1; shift + local src=$1; shift + local mz=$1; shift + local pid1 + local pid2 + local pid3 + local pid4 + + # Continuously send two streams that are forwarded by two different MDB + # entries. The first entry will be added and deleted in a loop. This + # allows us to test that the data path does not use freed MDB entry + # memory. The second entry will have two remotes, one that is added and + # deleted in a loop and another that is replaced in a loop. This allows + # us to test that the data path does not use freed remote entry memory. + # The test is considered successful if nothing crashed. + + # Create the MDB entries that will be continuously deleted / replaced. + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp1 permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp2 permanent dst $vtep1_ip src_vni 10010" + run_cmd "bridge -n $ns1 mdb replace dev vx0 port vx0 grp $grp2 permanent dst $vtep2_ip src_vni 10010" + + mdb_grp1_loop $ns1 $vtep1_ip $grp1 & + pid1=$! + mdb_grp2_loop $ns1 $vtep1_ip $vtep2_ip $grp2 & + pid2=$! + ip netns exec $ns1 $mz br0.10 -A $src -B $grp1 -t udp sp=12345,dp=54321 -p 100 -c 0 -q & + pid3=$! + ip netns exec $ns1 $mz br0.10 -A $src -B $grp2 -t udp sp=12345,dp=54321 -p 100 -c 0 -q & + pid4=$! + + sleep 30 + kill -9 $pid1 $pid2 $pid3 $pid4 + wait $pid1 $pid2 $pid3 $pid4 2>/dev/null + + log_test 0 0 "Torture test" +} + +mdb_torture_ipv4_ipv4() +{ + local ns1=ns1_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local grp1=239.1.1.1 + local grp2=239.2.2.2 + local src=192.0.2.129 + + echo + echo "Data path: MDB torture test - IPv4 overlay / IPv4 underlay" + echo "----------------------------------------------------------" + + mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \ + "mausezahn" +} + +mdb_torture_ipv6_ipv4() +{ + local ns1=ns1_v4 + local vtep1_ip=198.51.100.100 + local vtep2_ip=198.51.100.200 + local grp1=ff0e::1 + local grp2=ff0e::2 + local src=2001:db8:100::1 + + echo + echo "Data path: MDB torture test - IPv6 overlay / IPv4 underlay" + echo "----------------------------------------------------------" + + mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \ + "mausezahn -6" +} + +mdb_torture_ipv4_ipv6() +{ + local ns1=ns1_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local grp1=239.1.1.1 + local grp2=239.2.2.2 + local src=192.0.2.129 + + echo + echo "Data path: MDB torture test - IPv4 overlay / IPv6 underlay" + echo "----------------------------------------------------------" + + mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \ + "mausezahn" +} + +mdb_torture_ipv6_ipv6() +{ + local ns1=ns1_v6 + local vtep1_ip=2001:db8:1000::1 + local vtep2_ip=2001:db8:2000::1 + local grp1=ff0e::1 + local grp2=ff0e::2 + local src=2001:db8:100::1 + + echo + echo "Data path: MDB torture test - IPv6 overlay / IPv6 underlay" + echo "----------------------------------------------------------" + + mdb_torture_common $ns1 $vtep1_ip $vtep2_ip $grp1 $grp2 $src \ + "mausezahn -6" +} + +################################################################################ +# Usage + +usage() +{ + cat < Test(s) to run (default: all) + (options: $TESTS) + -c Control path tests only + -d Data path tests only + -p Pause on fail + -P Pause after each test before cleanup + -v Verbose mode (show commands and output) +EOF +} + +################################################################################ +# Main + +trap cleanup EXIT + +while getopts ":t:cdpPvh" opt; do + case $opt in + t) TESTS=$OPTARG;; + c) TESTS=${CONTROL_PATH_TESTS};; + d) TESTS=${DATA_PATH_TESTS};; + p) PAUSE_ON_FAIL=yes;; + P) PAUSE=yes;; + v) VERBOSE=$(($VERBOSE + 1));; + h) usage; exit 0;; + *) usage; exit 1;; + esac +done + +# Make sure we don't pause twice. +[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit $ksft_skip; +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v bridge)" ]; then + echo "SKIP: Could not run test without bridge tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v mausezahn)" ]; then + echo "SKIP: Could not run test without mausezahn tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v jq)" ]; then + echo "SKIP: Could not run test without jq tool" + exit $ksft_skip +fi + +bridge mdb help 2>&1 | grep -q "src_vni" +if [ $? -ne 0 ]; then + echo "SKIP: iproute2 bridge too old, missing VXLAN MDB support" + exit $ksft_skip +fi + +# Start clean. +cleanup + +for t in $TESTS +do + setup; $t; cleanup; +done + +if [ "$TESTS" != "none" ]; then + printf "\nTests passed: %3d\n" ${nsuccess} + printf "Tests failed: %3d\n" ${nfail} +fi + +exit $ret -- cgit v1.2.3-70-g09d2 From 95fdf6e313a981b0729886f86916190cb418b04c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 17 Mar 2023 13:19:20 -0700 Subject: selftests/bpf: Add test for bpf_ksym_exists(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add load and run time test for bpf_ksym_exists() and check that the verifier performs dead code elimination for non-existing kfunc. Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Reviewed-by: Martin KaFai Lau Reviewed-by: Toke Høiland-Jørgensen Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20230317201920.62030-5-alexei.starovoitov@gmail.com --- .../testing/selftests/bpf/progs/task_kfunc_success.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index 4f61596b0242..cfa7f12b84e8 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -17,6 +17,10 @@ int err, pid; * TP_PROTO(struct task_struct *p, u64 clone_flags) */ +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +void invalid_kfunc(void) __ksym __weak; +void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; + static bool is_test_kfunc_task(void) { int cur_pid = bpf_get_current_pid_tgid() >> 32; @@ -26,7 +30,21 @@ static bool is_test_kfunc_task(void) static int test_acquire_release(struct task_struct *task) { - struct task_struct *acquired; + struct task_struct *acquired = NULL; + + if (!bpf_ksym_exists(bpf_task_acquire)) { + err = 3; + return 0; + } + if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) { + err = 4; + return 0; + } + if (bpf_ksym_exists(invalid_kfunc)) { + /* the verifier's dead code elimination should remove this */ + err = 5; + asm volatile ("goto -1"); /* for (;;); */ + } acquired = bpf_task_acquire(task); bpf_task_release(acquired); -- cgit v1.2.3-70-g09d2 From 2be7aa76cc69633930fb747e1d85d33a63a60c02 Mon Sep 17 00:00:00 2001 From: Manu Bretelle Date: Fri, 17 Mar 2023 09:32:56 -0700 Subject: selftests/bpf: Add --json-summary option to test_progs Currently, test_progs outputs all stdout/stderr as it runs, and when it is done, prints a summary. It is non-trivial for tooling to parse that output and extract meaningful information from it. This change adds a new option, `--json-summary`/`-J` that let the caller specify a file where `test_progs{,-no_alu32}` can write a summary of the run in a json format that can later be parsed by tooling. Currently, it creates a summary section with successes/skipped/failures followed by a list of failed tests and subtests. A test contains the following fields: - name: the name of the test - number: the number of the test - message: the log message that was printed by the test. - failed: A boolean indicating whether the test failed or not. Currently we only output failed tests, but in the future, successful tests could be added. - subtests: A list of subtests associated with this test. A subtest contains the following fields: - name: same as above - number: sanme as above - message: the log message that was printed by the subtest. - failed: same as above but for the subtest An example run and json content below: ``` $ sudo ./test_progs -a $(grep -v '^#' ./DENYLIST.aarch64 | awk '{print $1","}' | tr -d '\n') -j -J /tmp/test_progs.json $ jq < /tmp/test_progs.json | head -n 30 { "success": 29, "success_subtest": 23, "skipped": 3, "failed": 28, "results": [ { "name": "bpf_cookie", "number": 10, "message": "test_bpf_cookie:PASS:skel_open 0 nsec\n", "failed": true, "subtests": [ { "name": "multi_kprobe_link_api", "number": 2, "message": "kprobe_multi_link_api_subtest:PASS:load_kallsyms 0 nsec\nlibbpf: extern 'bpf_testmod_fentry_test1' (strong): not resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi': -3\nkprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3\n", "failed": true }, { "name": "multi_kprobe_attach_api", "number": 3, "message": "libbpf: extern 'bpf_testmod_fentry_test1' (strong): not resolved\nlibbpf: failed to load object 'kprobe_multi'\nlibbpf: failed to load BPF skeleton 'kprobe_multi': -3\nkprobe_multi_attach_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3\n", "failed": true }, { "name": "lsm", "number": 8, "message": "lsm_subtest:PASS:lsm.link_create 0 nsec\nlsm_subtest:FAIL:stack_mprotect unexpected stack_mprotect: actual 0 != expected -1\n", "failed": true } ``` The file can then be used to print a summary of the test run and list of failing tests/subtests: ``` $ jq -r < /tmp/test_progs.json '"Success: \(.success)/\(.success_subtest), Skipped: \(.skipped), Failed: \(.failed)"' Success: 29/23, Skipped: 3, Failed: 28 $ jq -r < /tmp/test_progs.json '.results | map([ if .failed then "#\(.number) \(.name)" else empty end, ( . as {name: $tname, number: $tnum} | .subtests | map( if .failed then "#\($tnum)/\(.number) \($tname)/\(.name)" else empty end ) ) ]) | flatten | .[]' | head -n 20 #10 bpf_cookie #10/2 bpf_cookie/multi_kprobe_link_api #10/3 bpf_cookie/multi_kprobe_attach_api #10/8 bpf_cookie/lsm #15 bpf_mod_race #15/1 bpf_mod_race/ksym (used_btfs UAF) #15/2 bpf_mod_race/kfunc (kfunc_btf_tab UAF) #36 cgroup_hierarchical_stats #61 deny_namespace #61/1 deny_namespace/unpriv_userns_create_no_bpf #73 fexit_stress #83 get_func_ip_test #99 kfunc_dynptr_param #99/1 kfunc_dynptr_param/dynptr_data_null #99/4 kfunc_dynptr_param/dynptr_data_null #100 kprobe_multi_bench_attach #100/1 kprobe_multi_bench_attach/kernel #100/2 kprobe_multi_bench_attach/modules #101 kprobe_multi_test #101/1 kprobe_multi_test/skel_api ``` Signed-off-by: Manu Bretelle Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230317163256.3809328-1-chantr4@gmail.com --- tools/testing/selftests/bpf/Makefile | 4 +- tools/testing/selftests/bpf/json_writer.c | 1 + tools/testing/selftests/bpf/json_writer.h | 1 + tools/testing/selftests/bpf/test_progs.c | 83 +++++++++++++++++++++++++++++-- tools/testing/selftests/bpf/test_progs.h | 1 + 5 files changed, 84 insertions(+), 6 deletions(-) create mode 120000 tools/testing/selftests/bpf/json_writer.c create mode 120000 tools/testing/selftests/bpf/json_writer.h (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 55811c448eb7..fc092582d16d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -234,6 +234,7 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o TESTING_HELPERS := $(OUTPUT)/testing_helpers.o TRACE_HELPERS := $(OUTPUT)/trace_helpers.o +JSON_WRITER := $(OUTPUT)/json_writer.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o $(OUTPUT)/test_dev_cgroup: $(CGROUP_HELPERS) $(TESTING_HELPERS) @@ -559,7 +560,8 @@ TRUNNER_BPF_PROGS_DIR := progs TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ btf_helpers.c flow_dissector_load.h \ - cap_helpers.c test_loader.c xsk.c disasm.c + cap_helpers.c test_loader.c xsk.c disasm.c \ + json_writer.c TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ diff --git a/tools/testing/selftests/bpf/json_writer.c b/tools/testing/selftests/bpf/json_writer.c new file mode 120000 index 000000000000..5effa31e2f39 --- /dev/null +++ b/tools/testing/selftests/bpf/json_writer.c @@ -0,0 +1 @@ +../../../bpf/bpftool/json_writer.c \ No newline at end of file diff --git a/tools/testing/selftests/bpf/json_writer.h b/tools/testing/selftests/bpf/json_writer.h new file mode 120000 index 000000000000..e0a264c26752 --- /dev/null +++ b/tools/testing/selftests/bpf/json_writer.h @@ -0,0 +1 @@ +../../../bpf/bpftool/json_writer.h \ No newline at end of file diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 6d5e3022c75f..d903e6a72a96 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -18,6 +18,7 @@ #include #include #include +#include "json_writer.h" static bool verbose(void) { @@ -269,10 +270,23 @@ static void print_subtest_name(int test_num, int subtest_num, fprintf(env.stdout, "\n"); } +static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt) +{ + /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a + * null byte. Yet in parallel mode, log_buf will be NULL if there is no message. + */ + if (log_cnt) { + jsonw_string_field(w, "message", log_buf); + } else { + jsonw_string_field(w, "message", ""); + } +} + static void dump_test_log(const struct prog_test_def *test, const struct test_state *test_state, bool skip_ok_subtests, - bool par_exec_result) + bool par_exec_result, + json_writer_t *w) { bool test_failed = test_state->error_cnt > 0; bool force_log = test_state->force_log; @@ -296,6 +310,16 @@ static void dump_test_log(const struct prog_test_def *test, if (test_state->log_cnt && print_test) print_test_log(test_state->log_buf, test_state->log_cnt); + if (w && print_test) { + jsonw_start_object(w); + jsonw_string_field(w, "name", test->test_name); + jsonw_uint_field(w, "number", test->test_num); + jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt); + jsonw_bool_field(w, "failed", test_failed); + jsonw_name(w, "subtests"); + jsonw_start_array(w); + } + for (i = 0; i < test_state->subtest_num; i++) { subtest_state = &test_state->subtest_states[i]; subtest_failed = subtest_state->error_cnt; @@ -314,6 +338,20 @@ static void dump_test_log(const struct prog_test_def *test, test->test_name, subtest_state->name, test_result(subtest_state->error_cnt, subtest_state->skipped)); + + if (w && print_subtest) { + jsonw_start_object(w); + jsonw_string_field(w, "name", subtest_state->name); + jsonw_uint_field(w, "number", i+1); + jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt); + jsonw_bool_field(w, "failed", subtest_failed); + jsonw_end_object(w); + } + } + + if (w && print_test) { + jsonw_end_array(w); + jsonw_end_object(w); } print_test_result(test, test_state); @@ -715,6 +753,7 @@ enum ARG_KEYS { ARG_TEST_NAME_GLOB_DENYLIST = 'd', ARG_NUM_WORKERS = 'j', ARG_DEBUG = -1, + ARG_JSON_SUMMARY = 'J' }; static const struct argp_option opts[] = { @@ -740,6 +779,7 @@ static const struct argp_option opts[] = { "Number of workers to run in parallel, default to number of cpus." }, { "debug", ARG_DEBUG, NULL, 0, "print extra debug information for test_progs." }, + { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."}, {}, }; @@ -870,6 +910,13 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case ARG_DEBUG: env->debug = true; break; + case ARG_JSON_SUMMARY: + env->json = fopen(arg, "w"); + if (env->json == NULL) { + perror("Failed to open json summary file"); + return -errno; + } + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -1017,7 +1064,7 @@ void crash_handler(int signum) stdio_restore(); if (env.test) { env.test_state->error_cnt++; - dump_test_log(env.test, env.test_state, true, false); + dump_test_log(env.test, env.test_state, true, false, NULL); } if (env.worker_id != -1) fprintf(stderr, "[%d]: ", env.worker_id); @@ -1124,7 +1171,7 @@ static void run_one_test(int test_num) stdio_restore(); - dump_test_log(test, state, false, false); + dump_test_log(test, state, false, false, NULL); } struct dispatch_data { @@ -1283,7 +1330,7 @@ static void *dispatch_thread(void *ctx) } while (false); pthread_mutex_lock(&stdout_output_lock); - dump_test_log(test, state, false, true); + dump_test_log(test, state, false, true, NULL); pthread_mutex_unlock(&stdout_output_lock); } /* while (true) */ error: @@ -1308,6 +1355,7 @@ static void calculate_summary_and_print_errors(struct test_env *env) { int i; int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0; + json_writer_t *w = NULL; for (i = 0; i < prog_test_cnt; i++) { struct test_state *state = &test_states[i]; @@ -1324,6 +1372,22 @@ static void calculate_summary_and_print_errors(struct test_env *env) succ_cnt++; } + if (env->json) { + w = jsonw_new(env->json); + if (!w) + fprintf(env->stderr, "Failed to create new JSON stream."); + } + + if (w) { + jsonw_start_object(w); + jsonw_uint_field(w, "success", succ_cnt); + jsonw_uint_field(w, "success_subtest", sub_succ_cnt); + jsonw_uint_field(w, "skipped", skip_cnt); + jsonw_uint_field(w, "failed", fail_cnt); + jsonw_name(w, "results"); + jsonw_start_array(w); + } + /* * We only print error logs summary when there are failed tests and * verbose mode is not enabled. Otherwise, results may be incosistent. @@ -1340,10 +1404,19 @@ static void calculate_summary_and_print_errors(struct test_env *env) if (!state->tested || !state->error_cnt) continue; - dump_test_log(test, state, true, true); + dump_test_log(test, state, true, true, w); } } + if (w) { + jsonw_end_array(w); + jsonw_end_object(w); + jsonw_destroy(&w); + } + + if (env->json) + fclose(env->json); + printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 3cbf005747ed..4b06b8347cd4 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -114,6 +114,7 @@ struct test_env { FILE *stdout; FILE *stderr; int nr_cpus; + FILE *json; int succ_cnt; /* successful tests */ int sub_succ_cnt; /* successful sub-tests */ -- cgit v1.2.3-70-g09d2 From bb4a6a9237293346cf1b3b7bc4ff4dfc1977a103 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Sun, 19 Mar 2023 13:30:14 -0700 Subject: selftest/bpf: Add a test case for ld_imm64 copy logic. Add a test case to exercise {btf_id, btf_obj_fd} copy logic between ld_imm64 insns. Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230319203014.55866-2-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/progs/test_ksyms_weak.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 5f8379aadb29..7003eef0c192 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -37,7 +37,7 @@ int pass_handler(const void *ctx) /* tests existing symbols. */ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0); - if (rq) + if (rq && bpf_ksym_exists(&runqueues)) out__existing_typed = rq->cpu; out__existing_typeless = (__u64)&bpf_prog_active; -- cgit v1.2.3-70-g09d2 From 04aae213e719ec2bb310158c4025316ace50589b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 20 Mar 2023 18:41:13 -0700 Subject: net: skbuff: rename __pkt_vlan_present_offset to __mono_tc_offset vlan_present is gone since commit 354259fa73e2 ("net: remove skb->vlan_present") rename the offset field to what BPF is currently looking for in this byte - mono_delivery_time and tc_at_ingress. Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230321014115.997841-2-kuba@kernel.org Signed-off-by: Martin KaFai Lau --- include/linux/skbuff.h | 4 ++-- net/core/filter.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3f3a2a82a86b..5a63878a4550 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -955,7 +955,7 @@ struct sk_buff { __u8 csum_valid:1; /* private: */ - __u8 __pkt_vlan_present_offset[0]; + __u8 __mono_tc_offset[0]; /* public: */ __u8 remcsum_offload:1; __u8 csum_complete_sw:1; @@ -1078,7 +1078,7 @@ struct sk_buff { #define TC_AT_INGRESS_MASK (1 << 7) #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif -#define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) +#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset) #ifdef __KERNEL__ /* diff --git a/net/core/filter.c b/net/core/filter.c index 50f649f1b4a9..3370efad1dda 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9185,7 +9185,7 @@ static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - PKT_VLAN_PRESENT_OFFSET); + SKB_BF_MONO_TC_OFFSET); *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, SKB_MONO_DELIVERY_TIME_MASK, 2); *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); @@ -9232,7 +9232,7 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, /* AX is needed because src_reg and dst_reg could be the same */ __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, @@ -9267,14 +9267,14 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, if (!prog->tstamp_type_access) { __u8 tmp_reg = BPF_REG_AX; - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET); + *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET); /* Writing __sk_buff->tstamp as ingress, goto */ *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); /* goto */ *insn++ = BPF_JMP_A(2); /* : mono_delivery_time */ *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, PKT_VLAN_PRESENT_OFFSET); + *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET); } #endif diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index d5fe3d4b936c..ae7b6e50e405 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -68,17 +68,17 @@ static struct test_case test_cases[] = { #if defined(__x86_64__) || defined(__aarch64__) { N(SCHED_CLS, struct __sk_buff, tstamp), - .read = "r11 = *(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset);" + .read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" "w11 &= 160;" "if w11 != 0xa0 goto pc+2;" "$dst = 0;" "goto pc+1;" "$dst = *(u64 *)($ctx + sk_buff::tstamp);", - .write = "r11 = *(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset);" + .write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" "if w11 & 0x80 goto pc+1;" "goto pc+2;" "w11 &= -33;" - "*(u8 *)($ctx + sk_buff::__pkt_vlan_present_offset) = r11;" + "*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;" "*(u64 *)($ctx + sk_buff::tstamp) = $src;", }, #endif -- cgit v1.2.3-70-g09d2 From c0ba861117c3e8deb03855d7dc5a7717958bbb18 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 20 Mar 2023 18:41:15 -0700 Subject: net: skbuff: move the fields BPF cares about directly next to the offset marker To avoid more possible BPF dependencies with moving bitfields around keep the fields BPF cares about right next to the offset marker. Signed-off-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230321014115.997841-4-kuba@kernel.org Signed-off-by: Martin KaFai Lau --- include/linux/skbuff.h | 18 +++++++++--------- tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c | 8 ++++---- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'tools/testing') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 36d31e74db37..6aeb0e7b9511 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -947,15 +947,15 @@ struct sk_buff { /* private: */ __u8 __mono_tc_offset[0]; /* public: */ - __u8 remcsum_offload:1; - __u8 csum_complete_sw:1; - __u8 csum_level:2; - __u8 dst_pending_confirm:1; __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ #ifdef CONFIG_NET_CLS_ACT - __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ + __u8 tc_skip_classify:1; #endif + __u8 remcsum_offload:1; + __u8 csum_complete_sw:1; + __u8 csum_level:2; + __u8 dst_pending_confirm:1; __u8 l4_hash:1; __u8 sw_hash:1; @@ -1072,11 +1072,11 @@ struct sk_buff { * around, you also must adapt these constants. */ #ifdef __BIG_ENDIAN_BITFIELD -#define TC_AT_INGRESS_MASK (1 << 0) -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7) +#define TC_AT_INGRESS_MASK (1 << 6) #else -#define TC_AT_INGRESS_MASK (1 << 7) -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0) +#define TC_AT_INGRESS_MASK (1 << 1) #endif #define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset) diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index ae7b6e50e405..4951aa978f33 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -69,15 +69,15 @@ static struct test_case test_cases[] = { { N(SCHED_CLS, struct __sk_buff, tstamp), .read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" - "w11 &= 160;" - "if w11 != 0xa0 goto pc+2;" + "w11 &= 3;" + "if w11 != 0x3 goto pc+2;" "$dst = 0;" "goto pc+1;" "$dst = *(u64 *)($ctx + sk_buff::tstamp);", .write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" - "if w11 & 0x80 goto pc+1;" + "if w11 & 0x2 goto pc+1;" "goto pc+2;" - "w11 &= -33;" + "w11 &= -2;" "*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;" "*(u64 *)($ctx + sk_buff::tstamp) = $src;", }, -- cgit v1.2.3-70-g09d2 From 5c5945dc695c54f2b55a934a10b6c4e220f9c140 Mon Sep 17 00:00:00 2001 From: Xiaoyan Li Date: Tue, 21 Mar 2023 16:12:02 +0800 Subject: selftests/net: Add SHA256 computation over data sent in tcp_mmap Add option to compute and send SHA256 over data sent (-i). This is to ensure the correctness of data received. Data is randomly populated from /dev/urandom. Tested: ./tcp_mmap -s -z -i ./tcp_mmap -z -H $ADDR -i SHA256 is correct ./tcp_mmap -s -i ./tcp_mmap -H $ADDR -i SHA256 is correct Signed-off-by: Coco Li Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230321081202.2370275-2-lixiaoyan@google.com Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/Makefile | 2 +- tools/testing/selftests/net/tcp_mmap.c | 102 +++++++++++++++++++++++++++++---- 2 files changed, 92 insertions(+), 12 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index e57750e44f71..1de34ec99290 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -89,7 +89,7 @@ TEST_FILES := settings include ../lib.mk $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma -$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread +$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread -lcrypto $(OUTPUT)/tcp_inq: LDLIBS += -lpthread $(OUTPUT)/bind_bhash: LDLIBS += -lpthread diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 46a02bbd31d0..607cc9ad8d1b 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -66,11 +66,16 @@ #include #include #include +#include #ifndef MSG_ZEROCOPY #define MSG_ZEROCOPY 0x4000000 #endif +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + #define FILE_SZ (1ULL << 35) static int cfg_family = AF_INET6; static socklen_t cfg_alen = sizeof(struct sockaddr_in6); @@ -81,12 +86,14 @@ static int sndbuf; /* Default: autotuning. Can be set with -w option static int zflg; /* zero copy option. (MSG_ZEROCOPY for sender, mmap() for receiver */ static int xflg; /* hash received data (simple xor) (-h option) */ static int keepflag; /* -k option: receiver shall keep all received file in memory (no munmap() calls) */ +static int integrity; /* -i option: sender and receiver compute sha256 over the data.*/ static size_t chunk_size = 512*1024; static size_t map_align; unsigned long htotal; +unsigned int digest_len; static inline void prefetch(const void *x) { @@ -148,12 +155,14 @@ static void *mmap_large_buffer(size_t need, size_t *allocated) void *child_thread(void *arg) { + unsigned char digest[SHA256_DIGEST_LENGTH]; unsigned long total_mmap = 0, total = 0; struct tcp_zerocopy_receive zc; + unsigned char *buffer = NULL; unsigned long delta_usec; + EVP_MD_CTX *ctx = NULL; int flags = MAP_SHARED; struct timeval t0, t1; - char *buffer = NULL; void *raddr = NULL; void *addr = NULL; double throughput; @@ -180,6 +189,14 @@ void *child_thread(void *arg) addr = ALIGN_PTR_UP(raddr, map_align); } } + if (integrity) { + ctx = EVP_MD_CTX_new(); + if (!ctx) { + perror("cannot enable SHA computing"); + goto error; + } + EVP_DigestInit_ex(ctx, EVP_sha256(), NULL); + } while (1) { struct pollfd pfd = { .fd = fd, .events = POLLIN, }; int sub; @@ -191,7 +208,7 @@ void *child_thread(void *arg) memset(&zc, 0, sizeof(zc)); zc.address = (__u64)((unsigned long)addr); - zc.length = chunk_size; + zc.length = min(chunk_size, FILE_SZ - lu); res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, &zc, &zc_len); @@ -200,6 +217,8 @@ void *child_thread(void *arg) if (zc.length) { assert(zc.length <= chunk_size); + if (integrity) + EVP_DigestUpdate(ctx, addr, zc.length); total_mmap += zc.length; if (xflg) hash_zone(addr, zc.length); @@ -211,22 +230,30 @@ void *child_thread(void *arg) } if (zc.recv_skip_hint) { assert(zc.recv_skip_hint <= chunk_size); - lu = read(fd, buffer, zc.recv_skip_hint); + lu = read(fd, buffer, min(zc.recv_skip_hint, + FILE_SZ - total)); if (lu > 0) { + if (integrity) + EVP_DigestUpdate(ctx, buffer, lu); if (xflg) hash_zone(buffer, lu); total += lu; } + if (lu == 0) + goto end; } continue; } sub = 0; while (sub < chunk_size) { - lu = read(fd, buffer + sub, chunk_size - sub); + lu = read(fd, buffer + sub, min(chunk_size - sub, + FILE_SZ - total)); if (lu == 0) goto end; if (lu < 0) break; + if (integrity) + EVP_DigestUpdate(ctx, buffer + sub, lu); if (xflg) hash_zone(buffer + sub, lu); total += lu; @@ -237,6 +264,20 @@ end: gettimeofday(&t1, NULL); delta_usec = (t1.tv_sec - t0.tv_sec) * 1000000 + t1.tv_usec - t0.tv_usec; + if (integrity) { + fcntl(fd, F_SETFL, 0); + EVP_DigestFinal_ex(ctx, digest, &digest_len); + lu = read(fd, buffer, SHA256_DIGEST_LENGTH); + if (lu != SHA256_DIGEST_LENGTH) + perror("Error: Cannot read SHA256\n"); + + if (memcmp(digest, buffer, + SHA256_DIGEST_LENGTH)) + fprintf(stderr, "Error: SHA256 of the data is not right\n"); + else + printf("\nSHA256 is correct\n"); + } + throughput = 0; if (delta_usec) throughput = total * 8.0 / (double)delta_usec / 1000.0; @@ -368,19 +409,38 @@ static unsigned long default_huge_page_size(void) return hps; } +static void randomize(void *target, size_t count) +{ + static int urandom = -1; + ssize_t got; + + urandom = open("/dev/urandom", O_RDONLY); + if (urandom < 0) { + perror("open /dev/urandom"); + exit(1); + } + got = read(urandom, target, count); + if (got != count) { + perror("read /dev/urandom"); + exit(1); + } +} + int main(int argc, char *argv[]) { + unsigned char digest[SHA256_DIGEST_LENGTH]; struct sockaddr_storage listenaddr, addr; unsigned int max_pacing_rate = 0; + EVP_MD_CTX *ctx = NULL; + unsigned char *buffer; uint64_t total = 0; char *host = NULL; int fd, c, on = 1; size_t buffer_sz; - char *buffer; int sflg = 0; int mss = 0; - while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:C:a:")) != -1) { + while ((c = getopt(argc, argv, "46p:svr:w:H:zxkP:M:C:a:i")) != -1) { switch (c) { case '4': cfg_family = PF_INET; @@ -426,6 +486,9 @@ int main(int argc, char *argv[]) case 'a': map_align = atol(optarg); break; + case 'i': + integrity = 1; + break; default: exit(1); } @@ -468,7 +531,7 @@ int main(int argc, char *argv[]) } buffer = mmap_large_buffer(chunk_size, &buffer_sz); - if (buffer == (char *)-1) { + if (buffer == (unsigned char *)-1) { perror("mmap"); exit(1); } @@ -501,17 +564,34 @@ int main(int argc, char *argv[]) perror("setsockopt SO_ZEROCOPY, (-z option disabled)"); zflg = 0; } + if (integrity) { + randomize(buffer, buffer_sz); + ctx = EVP_MD_CTX_new(); + if (!ctx) { + perror("cannot enable SHA computing"); + exit(1); + } + EVP_DigestInit_ex(ctx, EVP_sha256(), NULL); + } while (total < FILE_SZ) { + size_t offset = total % chunk_size; int64_t wr = FILE_SZ - total; - if (wr > chunk_size) - wr = chunk_size; - /* Note : we just want to fill the pipe with 0 bytes */ - wr = send(fd, buffer, (size_t)wr, zflg ? MSG_ZEROCOPY : 0); + if (wr > chunk_size - offset) + wr = chunk_size - offset; + /* Note : we just want to fill the pipe with random bytes */ + wr = send(fd, buffer + offset, + (size_t)wr, zflg ? MSG_ZEROCOPY : 0); if (wr <= 0) break; + if (integrity) + EVP_DigestUpdate(ctx, buffer + offset, wr); total += wr; } + if (integrity && total == FILE_SZ) { + EVP_DigestFinal_ex(ctx, digest, &digest_len); + send(fd, digest, (size_t)SHA256_DIGEST_LENGTH, 0); + } close(fd); munmap(buffer, buffer_sz); return 0; -- cgit v1.2.3-70-g09d2 From 9a321fd3308e262f2a76761bea86dd0f311e3f86 Mon Sep 17 00:00:00 2001 From: Tushar Vyavahare Date: Mon, 20 Mar 2023 15:57:05 +0530 Subject: selftests/xsk: add xdp populate metadata test Add a new test in copy-mode for testing the copying of metadata from the buffer in kernel-space to user-space. This is accomplished by adding a new XDP program and using the bss map to store a counter that is written to the metadata field. This counter is incremented for every packet so that the number becomes unique and should be the same as the payload. It is store in the bss so the value can be reset between runs. The XDP program populates the metadata and the userspace program checks the value stored in the metadata field against the payload using the new is_metadata_correct() function. To turn this verification on or off, add a new parameter (use_metadata) to the ifobject structure. Signed-off-by: Tushar Vyavahare Reviewed-by: Maciej Fijalkowski Link: https://lore.kernel.org/r/20230320102705.306187-1-tushar.vyavahare@intel.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/xsk_xdp_progs.c | 25 ++++++++++++ tools/testing/selftests/bpf/xsk_xdp_metadata.h | 5 +++ tools/testing/selftests/bpf/xskxceiver.c | 46 ++++++++++++++++++++++- tools/testing/selftests/bpf/xskxceiver.h | 2 + 4 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/xsk_xdp_metadata.h (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c index 744a01d0e57d..a630c95c7471 100644 --- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c +++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c @@ -3,6 +3,7 @@ #include #include +#include "xsk_xdp_metadata.h" struct { __uint(type, BPF_MAP_TYPE_XSKMAP); @@ -12,6 +13,7 @@ struct { } xsk SEC(".maps"); static unsigned int idx; +int count = 0; SEC("xdp") int xsk_def_prog(struct xdp_md *xdp) { @@ -27,4 +29,27 @@ SEC("xdp") int xsk_xdp_drop(struct xdp_md *xdp) return bpf_redirect_map(&xsk, 0, XDP_DROP); } +SEC("xdp") int xsk_xdp_populate_metadata(struct xdp_md *xdp) +{ + void *data, *data_meta; + struct xdp_info *meta; + int err; + + /* Reserve enough for all custom metadata. */ + err = bpf_xdp_adjust_meta(xdp, -(int)sizeof(struct xdp_info)); + if (err) + return XDP_DROP; + + data = (void *)(long)xdp->data; + data_meta = (void *)(long)xdp->data_meta; + + if (data_meta + sizeof(struct xdp_info) > data) + return XDP_DROP; + + meta = data_meta; + meta->count = count++; + + return bpf_redirect_map(&xsk, 0, XDP_DROP); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/xsk_xdp_metadata.h b/tools/testing/selftests/bpf/xsk_xdp_metadata.h new file mode 100644 index 000000000000..943133da378a --- /dev/null +++ b/tools/testing/selftests/bpf/xsk_xdp_metadata.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +struct xdp_info { + __u64 count; +} __attribute__((aligned(32))); diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index a17655107a94..b65e0645b0cd 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -103,6 +103,7 @@ #include #include #include "../kselftest.h" +#include "xsk_xdp_metadata.h" static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; @@ -464,6 +465,7 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, ifobj->use_fill_ring = true; ifobj->release_rx = true; ifobj->validation_func = NULL; + ifobj->use_metadata = false; if (i == 0) { ifobj->rx_on = false; @@ -798,6 +800,20 @@ static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt return false; } +static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) +{ + void *data = xsk_umem__get_data(buffer, addr); + struct xdp_info *meta = data - sizeof(struct xdp_info); + + if (meta->count != pkt->payload) { + ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", + __func__, pkt->payload, meta->count); + return false; + } + + return true; +} + static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) { void *data = xsk_umem__get_data(buffer, addr); @@ -959,7 +975,8 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) addr = xsk_umem__add_offset_to_addr(addr); if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || - !is_offset_correct(umem, pkt_stream, addr, pkt->addr)) + !is_offset_correct(umem, pkt_stream, addr, pkt->addr) || + (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) return TEST_FAILURE; if (ifobj->use_fill_ring) @@ -1686,6 +1703,30 @@ static void testapp_xdp_drop(struct test_spec *test) testapp_validate_traffic(test); } +static void testapp_xdp_metadata_count(struct test_spec *test) +{ + struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; + struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; + struct bpf_map *data_map; + int count = 0; + int key = 0; + + test_spec_set_name(test, "XDP_METADATA_COUNT"); + test_spec_set_xdp_prog(test, skel_rx->progs.xsk_xdp_populate_metadata, + skel_tx->progs.xsk_xdp_populate_metadata, + skel_rx->maps.xsk, skel_tx->maps.xsk); + test->ifobj_rx->use_metadata = true; + + data_map = bpf_object__find_map_by_name(skel_rx->obj, "xsk_xdp_.bss"); + if (!data_map || !bpf_map__is_internal(data_map)) + exit_with_error(ENOMEM); + + if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) + exit_with_error(errno); + + testapp_validate_traffic(test); +} + static void testapp_poll_txq_tmout(struct test_spec *test) { test_spec_set_name(test, "POLL_TXQ_FULL"); @@ -1835,6 +1876,9 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ case TEST_TYPE_XDP_DROP_HALF: testapp_xdp_drop(test); break; + case TEST_TYPE_XDP_METADATA_COUNT: + testapp_xdp_metadata_count(test); + break; default: break; } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 3e8ec7d8ec32..bdb4efedf3a9 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -88,6 +88,7 @@ enum test_type { TEST_TYPE_STATS_FILL_EMPTY, TEST_TYPE_BPF_RES, TEST_TYPE_XDP_DROP_HALF, + TEST_TYPE_XDP_METADATA_COUNT, TEST_TYPE_MAX }; @@ -158,6 +159,7 @@ struct ifobject { bool use_fill_ring; bool release_rx; bool shared_umem; + bool use_metadata; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; }; -- cgit v1.2.3-70-g09d2 From 3b2ec2140fa27febb21034943d656898b659dc02 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 21 Mar 2023 13:38:54 -0700 Subject: selftests/bpf: Add light skeleton test for kfunc detection. Add light skeleton test for kfunc detection and denylist it for s390. Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230321203854.3035-5-alexei.starovoitov@gmail.com --- tools/testing/selftests/bpf/DENYLIST.s390x | 1 + tools/testing/selftests/bpf/progs/test_ksyms_weak.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 34cb8b2de8ca..c7463f3ec3c0 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -11,6 +11,7 @@ get_stack_raw_tp # user_stack corrupted user stack iters/testmod_seq* # s390x doesn't support kfuncs in modules yet kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test # relies on fentry +ksyms_btf/weak_ksyms* # test_ksyms_weak__open_and_load unexpected error: -22 (kfunc) ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?) ksyms_module_libbpf # JIT does not support calling kernel function (kfunc) ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?) diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c index 7003eef0c192..d00268c91e19 100644 --- a/tools/testing/selftests/bpf/progs/test_ksyms_weak.c +++ b/tools/testing/selftests/bpf/progs/test_ksyms_weak.c @@ -20,6 +20,8 @@ __u64 out__non_existent_typed = -1; /* test existing weak symbols can be resolved. */ extern const struct rq runqueues __ksym __weak; /* typed */ extern const void bpf_prog_active __ksym __weak; /* typeless */ +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +void bpf_testmod_test_mod_kfunc(int i) __ksym __weak; /* non-existent weak symbols. */ @@ -29,6 +31,7 @@ extern const void bpf_link_fops1 __ksym __weak; /* typed symbols, default to zero. */ extern const int bpf_link_fops2 __ksym __weak; +void invalid_kfunc(void) __ksym __weak; SEC("raw_tp/sys_enter") int pass_handler(const void *ctx) @@ -50,6 +53,18 @@ int pass_handler(const void *ctx) if (&bpf_link_fops2) /* can't happen */ out__non_existent_typed = (__u64)bpf_per_cpu_ptr(&bpf_link_fops2, 0); + if (!bpf_ksym_exists(bpf_task_acquire)) + /* dead code won't be seen by the verifier */ + bpf_task_acquire(0); + + if (!bpf_ksym_exists(bpf_testmod_test_mod_kfunc)) + /* dead code won't be seen by the verifier */ + bpf_testmod_test_mod_kfunc(0); + + if (bpf_ksym_exists(invalid_kfunc)) + /* dead code won't be seen by the verifier */ + invalid_kfunc(); + return 0; } -- cgit v1.2.3-70-g09d2 From 830154cdc57971b06f81d4ffc39b868e3d7693de Mon Sep 17 00:00:00 2001 From: JP Kobryn Date: Wed, 22 Mar 2023 12:47:53 -0700 Subject: bpf/selftests: coverage for bpf_map_ops errors These tests expose the issue of being unable to properly check for errors returned from inlined bpf map helpers that make calls to the bpf_map_ops functions. At best, a check for zero or non-zero can be done but these tests show it is not possible to check for a negative value or for a specific error value. Signed-off-by: JP Kobryn Tested-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230322194754.185781-2-inwardvessel@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/map_ops.c | 162 +++++++++++++++++++++++ tools/testing/selftests/bpf/progs/test_map_ops.c | 138 +++++++++++++++++++ 2 files changed, 300 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/map_ops.c create mode 100644 tools/testing/selftests/bpf/progs/test_map_ops.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/map_ops.c b/tools/testing/selftests/bpf/prog_tests/map_ops.c new file mode 100644 index 000000000000..be5e42a413b4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_ops.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include + +#include "test_map_ops.skel.h" +#include "test_progs.h" + +static void map_update(void) +{ + (void)syscall(__NR_getpid); +} + +static void map_delete(void) +{ + (void)syscall(__NR_getppid); +} + +static void map_push(void) +{ + (void)syscall(__NR_getuid); +} + +static void map_pop(void) +{ + (void)syscall(__NR_geteuid); +} + +static void map_peek(void) +{ + (void)syscall(__NR_getgid); +} + +static void map_for_each_pass(void) +{ + (void)syscall(__NR_gettid); +} + +static void map_for_each_fail(void) +{ + (void)syscall(__NR_getpgid); +} + +static int setup(struct test_map_ops **skel) +{ + int err = 0; + + if (!skel) + return -1; + + *skel = test_map_ops__open(); + if (!ASSERT_OK_PTR(*skel, "test_map_ops__open")) + return -1; + + (*skel)->rodata->pid = getpid(); + + err = test_map_ops__load(*skel); + if (!ASSERT_OK(err, "test_map_ops__load")) + return err; + + err = test_map_ops__attach(*skel); + if (!ASSERT_OK(err, "test_map_ops__attach")) + return err; + + return err; +} + +static void teardown(struct test_map_ops **skel) +{ + if (skel && *skel) + test_map_ops__destroy(*skel); +} + +static void map_ops_update_delete_subtest(void) +{ + struct test_map_ops *skel; + + if (setup(&skel)) + goto teardown; + + map_update(); + ASSERT_OK(skel->bss->err, "map_update_initial"); + + map_update(); + ASSERT_LT(skel->bss->err, 0, "map_update_existing"); + ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing"); + + map_delete(); + ASSERT_OK(skel->bss->err, "map_delete_existing"); + + map_delete(); + ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing"); + ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing"); + +teardown: + teardown(&skel); +} + +static void map_ops_push_peek_pop_subtest(void) +{ + struct test_map_ops *skel; + + if (setup(&skel)) + goto teardown; + + map_push(); + ASSERT_OK(skel->bss->err, "map_push_initial"); + + map_push(); + ASSERT_LT(skel->bss->err, 0, "map_push_when_full"); + ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full"); + + map_peek(); + ASSERT_OK(skel->bss->err, "map_peek"); + + map_pop(); + ASSERT_OK(skel->bss->err, "map_pop"); + + map_peek(); + ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty"); + ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty"); + + map_pop(); + ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty"); + ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty"); + +teardown: + teardown(&skel); +} + +static void map_ops_for_each_subtest(void) +{ + struct test_map_ops *skel; + + if (setup(&skel)) + goto teardown; + + map_for_each_pass(); + /* expect to iterate over 1 element */ + ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags"); + + map_for_each_fail(); + ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags"); + ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags"); + +teardown: + teardown(&skel); +} + +void test_map_ops(void) +{ + if (test__start_subtest("map_ops_update_delete")) + map_ops_update_delete_subtest(); + + if (test__start_subtest("map_ops_push_peek_pop")) + map_ops_push_peek_pop_subtest(); + + if (test__start_subtest("map_ops_for_each")) + map_ops_for_each_subtest(); +} diff --git a/tools/testing/selftests/bpf/progs/test_map_ops.c b/tools/testing/selftests/bpf/progs/test_map_ops.c new file mode 100644 index 000000000000..b53b46a090c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_map_ops.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_STACK); + __uint(max_entries, 1); + __type(value, int); +} stack_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} array_map SEC(".maps"); + +const volatile pid_t pid; +long err = 0; + +static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags) +{ + return 0; +} + +SEC("tp/syscalls/sys_enter_getpid") +int map_update(void *ctx) +{ + const int key = 0; + const int val = 1; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getppid") +int map_delete(void *ctx) +{ + const int key = 0; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_delete_elem(&hash_map, &key); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getuid") +int map_push(void *ctx) +{ + const int val = 1; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_push_elem(&stack_map, &val, 0); + + return 0; +} + +SEC("tp/syscalls/sys_enter_geteuid") +int map_pop(void *ctx) +{ + int val; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_pop_elem(&stack_map, &val); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getgid") +int map_peek(void *ctx) +{ + int val; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + err = bpf_map_peek_elem(&stack_map, &val); + + return 0; +} + +SEC("tp/syscalls/sys_enter_gettid") +int map_for_each_pass(void *ctx) +{ + const int key = 0; + const int val = 1; + const u64 flags = 0; + int callback_ctx; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + bpf_map_update_elem(&array_map, &key, &val, flags); + + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); + + return 0; +} + +SEC("tp/syscalls/sys_enter_getpgid") +int map_for_each_fail(void *ctx) +{ + const int key = 0; + const int val = 1; + const u64 flags = BPF_NOEXIST; + int callback_ctx; + + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + bpf_map_update_elem(&array_map, &key, &val, flags); + + /* calling for_each with non-zero flags will return error */ + err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags); + + return 0; +} -- cgit v1.2.3-70-g09d2 From 7be14c1c9030f73cc18b4ff23b78a0a081f16188 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 22 Mar 2023 22:30:55 +0100 Subject: bpf: Fix __reg_bound_offset 64->32 var_off subreg propagation Xu reports that after commit 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking"), the following BPF program is rejected by the verifier: 0: (61) r2 = *(u32 *)(r1 +0) ; R2_w=pkt(off=0,r=0,imm=0) 1: (61) r3 = *(u32 *)(r1 +4) ; R3_w=pkt_end(off=0,imm=0) 2: (bf) r1 = r2 3: (07) r1 += 1 4: (2d) if r1 > r3 goto pc+8 5: (71) r1 = *(u8 *)(r2 +0) ; R1_w=scalar(umax=255,var_off=(0x0; 0xff)) 6: (18) r0 = 0x7fffffffffffff10 8: (0f) r1 += r0 ; R1_w=scalar(umin=0x7fffffffffffff10,umax=0x800000000000000f) 9: (18) r0 = 0x8000000000000000 11: (07) r0 += 1 12: (ad) if r0 < r1 goto pc-2 13: (b7) r0 = 0 14: (95) exit And the verifier log says: func#0 @0 0: R1=ctx(off=0,imm=0) R10=fp0 0: (61) r2 = *(u32 *)(r1 +0) ; R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=0,imm=0) 1: (61) r3 = *(u32 *)(r1 +4) ; R1=ctx(off=0,imm=0) R3_w=pkt_end(off=0,imm=0) 2: (bf) r1 = r2 ; R1_w=pkt(off=0,r=0,imm=0) R2_w=pkt(off=0,r=0,imm=0) 3: (07) r1 += 1 ; R1_w=pkt(off=1,r=0,imm=0) 4: (2d) if r1 > r3 goto pc+8 ; R1_w=pkt(off=1,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) 5: (71) r1 = *(u8 *)(r2 +0) ; R1_w=scalar(umax=255,var_off=(0x0; 0xff)) R2_w=pkt(off=0,r=1,imm=0) 6: (18) r0 = 0x7fffffffffffff10 ; R0_w=9223372036854775568 8: (0f) r1 += r0 ; R0_w=9223372036854775568 R1_w=scalar(umin=9223372036854775568,umax=9223372036854775823,s32_min=-240,s32_max=15) 9: (18) r0 = 0x8000000000000000 ; R0_w=-9223372036854775808 11: (07) r0 += 1 ; R0_w=-9223372036854775807 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775807 R1_w=scalar(umin=9223372036854775568,umax=9223372036854775809) 13: (b7) r0 = 0 ; R0_w=0 14: (95) exit from 12 to 11: R0_w=-9223372036854775807 R1_w=scalar(umin=9223372036854775810,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) R2_w=pkt(off=0,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775806 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775806 R1_w=scalar(umin=9223372036854775810,umax=9223372036854775810,var_off=(0x8000000000000000; 0xffffffff)) 13: safe [...] from 12 to 11: R0_w=-9223372036854775795 R1=scalar(umin=9223372036854775822,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775794 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775794 R1=scalar(umin=9223372036854775822,umax=9223372036854775822,var_off=(0x8000000000000000; 0xffffffff)) 13: safe from 12 to 11: R0_w=-9223372036854775794 R1=scalar(umin=9223372036854775823,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775793 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775793 R1=scalar(umin=9223372036854775823,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) 13: safe from 12 to 11: R0_w=-9223372036854775793 R1=scalar(umin=9223372036854775824,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775792 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775792 R1=scalar(umin=9223372036854775824,umax=9223372036854775823,var_off=(0x8000000000000000; 0xffffffff)) 13: safe [...] The 64bit umin=9223372036854775810 bound continuously bumps by +1 while umax=9223372036854775823 stays as-is until the verifier complexity limit is reached and the program gets finally rejected. During this simulation, the umin also eventually surpasses umax. Looking at the first 'from 12 to 11' output line from the loop, R1 has the following state: R1_w=scalar(umin=0x8000000000000002 (9223372036854775810), umax=0x800000000000000f (9223372036854775823), var_off=(0x8000000000000000; 0xffffffff)) The var_off has technically not an inconsistent state but it's very imprecise and far off surpassing 64bit umax bounds whereas the expected output with refined known bits in var_off should have been like: R1_w=scalar(umin=0x8000000000000002 (9223372036854775810), umax=0x800000000000000f (9223372036854775823), var_off=(0x8000000000000000; 0xf)) In the above log, var_off stays as var_off=(0x8000000000000000; 0xffffffff) and does not converge into a narrower mask where more bits become known, eventually transforming R1 into a constant upon umin=9223372036854775823, umax=9223372036854775823 case where the verifier would have terminated and let the program pass. The __reg_combine_64_into_32() marks the subregister unknown and propagates 64bit {s,u}min/{s,u}max bounds to their 32bit equivalents iff they are within the 32bit universe. The question came up whether __reg_combine_64_into_32() should special case the situation that when 64bit {s,u}min bounds have the same value as 64bit {s,u}max bounds to then assign the latter as well to the 32bit reg->{s,u}32_{min,max}_value. As can be seen from the above example however, that is just /one/ special case and not a /generic/ solution given above example would still not be addressed this way and remain at an imprecise var_off=(0x8000000000000000; 0xffffffff). The improvement is needed in __reg_bound_offset() to refine var32_off with the updated var64_off instead of the prior reg->var_off. The reg_bounds_sync() code first refines information about the register's min/max bounds via __update_reg_bounds() from the current var_off, then in __reg_deduce_bounds() from sign bit and with the potentially learned bits from bounds it'll update the var_off tnum in __reg_bound_offset(). For example, intersecting with the old var_off might have improved bounds slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), then new var_off will then result in (0; 0x7f...fc). The intersected var64_off holds then the universe which is a superset of var32_off. The point for the latter is not to broaden, but to further refine known bits based on the intersection of var_off with 32 bit bounds, so that we later construct the final var_off from upper and lower 32 bits. The final __update_reg_bounds() can then potentially still slightly refine bounds if more bits became known from the new var_off. After the improvement, we can see R1 converging successively: func#0 @0 0: R1=ctx(off=0,imm=0) R10=fp0 0: (61) r2 = *(u32 *)(r1 +0) ; R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=0,imm=0) 1: (61) r3 = *(u32 *)(r1 +4) ; R1=ctx(off=0,imm=0) R3_w=pkt_end(off=0,imm=0) 2: (bf) r1 = r2 ; R1_w=pkt(off=0,r=0,imm=0) R2_w=pkt(off=0,r=0,imm=0) 3: (07) r1 += 1 ; R1_w=pkt(off=1,r=0,imm=0) 4: (2d) if r1 > r3 goto pc+8 ; R1_w=pkt(off=1,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) 5: (71) r1 = *(u8 *)(r2 +0) ; R1_w=scalar(umax=255,var_off=(0x0; 0xff)) R2_w=pkt(off=0,r=1,imm=0) 6: (18) r0 = 0x7fffffffffffff10 ; R0_w=9223372036854775568 8: (0f) r1 += r0 ; R0_w=9223372036854775568 R1_w=scalar(umin=9223372036854775568,umax=9223372036854775823,s32_min=-240,s32_max=15) 9: (18) r0 = 0x8000000000000000 ; R0_w=-9223372036854775808 11: (07) r0 += 1 ; R0_w=-9223372036854775807 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775807 R1_w=scalar(umin=9223372036854775568,umax=9223372036854775809) 13: (b7) r0 = 0 ; R0_w=0 14: (95) exit from 12 to 11: R0_w=-9223372036854775807 R1_w=scalar(umin=9223372036854775810,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2_w=pkt(off=0,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775806 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775806 R1_w=-9223372036854775806 13: safe from 12 to 11: R0_w=-9223372036854775806 R1_w=scalar(umin=9223372036854775811,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2_w=pkt(off=0,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775805 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775805 R1_w=-9223372036854775805 13: safe [...] from 12 to 11: R0_w=-9223372036854775798 R1=scalar(umin=9223372036854775819,umax=9223372036854775823,var_off=(0x8000000000000008; 0x7),s32_min=8,s32_max=15,u32_min=8,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775797 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775797 R1=-9223372036854775797 13: safe from 12 to 11: R0_w=-9223372036854775797 R1=scalar(umin=9223372036854775820,umax=9223372036854775823,var_off=(0x800000000000000c; 0x3),s32_min=12,s32_max=15,u32_min=12,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775796 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775796 R1=-9223372036854775796 13: safe from 12 to 11: R0_w=-9223372036854775796 R1=scalar(umin=9223372036854775821,umax=9223372036854775823,var_off=(0x800000000000000c; 0x3),s32_min=12,s32_max=15,u32_min=12,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775795 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775795 R1=-9223372036854775795 13: safe from 12 to 11: R0_w=-9223372036854775795 R1=scalar(umin=9223372036854775822,umax=9223372036854775823,var_off=(0x800000000000000e; 0x1),s32_min=14,s32_max=15,u32_min=14,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775794 12: (ad) if r0 < r1 goto pc-2 ; R0_w=-9223372036854775794 R1=-9223372036854775794 13: safe from 12 to 11: R0_w=-9223372036854775794 R1=-9223372036854775793 R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 11: (07) r0 += 1 ; R0_w=-9223372036854775793 12: (ad) if r0 < r1 goto pc-2 last_idx 12 first_idx 12 parent didn't have regs=1 stack=0 marks: R0_rw=P-9223372036854775801 R1_r=scalar(umin=9223372036854775815,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 last_idx 11 first_idx 11 regs=1 stack=0 before 11: (07) r0 += 1 parent didn't have regs=1 stack=0 marks: R0_rw=P-9223372036854775805 R1_rw=scalar(umin=9223372036854775812,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2_w=pkt(off=0,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 last_idx 12 first_idx 0 regs=1 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=1 stack=0 before 11: (07) r0 += 1 regs=1 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=1 stack=0 before 11: (07) r0 += 1 regs=1 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=1 stack=0 before 11: (07) r0 += 1 regs=1 stack=0 before 9: (18) r0 = 0x8000000000000000 last_idx 12 first_idx 12 parent didn't have regs=2 stack=0 marks: R0_rw=P-9223372036854775801 R1_r=Pscalar(umin=9223372036854775815,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2=pkt(off=0,r=1,imm=0) R3=pkt_end(off=0,imm=0) R10=fp0 last_idx 11 first_idx 11 regs=2 stack=0 before 11: (07) r0 += 1 parent didn't have regs=2 stack=0 marks: R0_rw=P-9223372036854775805 R1_rw=Pscalar(umin=9223372036854775812,umax=9223372036854775823,var_off=(0x8000000000000000; 0xf),s32_min=0,s32_max=15,u32_max=15) R2_w=pkt(off=0,r=1,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0 last_idx 12 first_idx 0 regs=2 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=2 stack=0 before 11: (07) r0 += 1 regs=2 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=2 stack=0 before 11: (07) r0 += 1 regs=2 stack=0 before 12: (ad) if r0 < r1 goto pc-2 regs=2 stack=0 before 11: (07) r0 += 1 regs=2 stack=0 before 9: (18) r0 = 0x8000000000000000 regs=2 stack=0 before 8: (0f) r1 += r0 regs=3 stack=0 before 6: (18) r0 = 0x7fffffffffffff10 regs=2 stack=0 before 5: (71) r1 = *(u8 *)(r2 +0) 13: safe from 4 to 13: safe verification time 322 usec stack depth 0 processed 56 insns (limit 1000000) max_states_per_insn 1 total_states 3 peak_states 3 mark_read 1 This also fixes up a test case along with this improvement where we match on the verifier log. The updated log now has a refined var_off, too. Fixes: 3f50f132d840 ("bpf: Verifier, do explicit ALU32 bounds tracking") Reported-by: Xu Kuohai Signed-off-by: Daniel Borkmann Signed-off-by: Andrii Nakryiko Reviewed-by: John Fastabend Link: https://lore.kernel.org/bpf/20230314203424.4015351-2-xukuohai@huaweicloud.com Link: https://lore.kernel.org/bpf/20230322213056.2470-1-daniel@iogearbox.net --- kernel/bpf/verifier.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/align.c | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 50c995697f0e..fd2f216de920 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2149,9 +2149,9 @@ static void __reg_bound_offset(struct bpf_reg_state *reg) struct tnum var64_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); - struct tnum var32_off = tnum_intersect(tnum_subreg(reg->var_off), - tnum_range(reg->u32_min_value, - reg->u32_max_value)); + struct tnum var32_off = tnum_intersect(tnum_subreg(var64_off), + tnum_range(reg->u32_min_value, + reg->u32_max_value)); reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); } diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index c94fa8d6c4f6..b92770592563 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -575,14 +575,14 @@ static struct bpf_align_test tests[] = { /* New unknown value in R7 is (4n), >= 76 */ {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"}, /* Adding it to packet pointer gives nice bounds again */ - {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, + {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, + {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"}, }, }, }; -- cgit v1.2.3-70-g09d2 From 1a3148fc171f5cde11b4c24e808a953ff725a3e2 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Wed, 22 Mar 2023 22:30:56 +0100 Subject: selftests/bpf: Check when bounds are not in the 32-bit range Add cases to check if bound is updated correctly when 64-bit value is not in the 32-bit range. Signed-off-by: Xu Kuohai Signed-off-by: Daniel Borkmann Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20230322213056.2470-2-daniel@iogearbox.net --- tools/testing/selftests/bpf/verifier/bounds.c | 121 ++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c index 33125d5f6772..74b1917d4208 100644 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -753,3 +753,124 @@ .result_unpriv = REJECT, .result = ACCEPT, }, +{ + "bound check with JMP_JLT for crossing 64-bit signed boundary", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), + + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + + BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */ + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "bound check with JMP_JSLT for crossing 64-bit signed boundary", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), + + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + + BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + /* r1 signed range is [S64_MIN, S64_MAX] */ + BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF program is too large", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "bound check for loop upper bound greater than U32_MAX", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), + + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + + BPF_LD_IMM64(BPF_REG_0, 0x100000000), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "bound check with JMP32_JLT for crossing 32-bit signed boundary", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 6), + + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), + BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + + BPF_MOV32_IMM(BPF_REG_0, 0x80000000), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), + /* r1 unsigned range is [0, 0x8000000f] */ + BPF_JMP32_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, +}, +{ + "bound check with JMP32_JSLT for crossing 32-bit signed boundary", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 6), + + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), + BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + + BPF_MOV32_IMM(BPF_REG_0, 0x80000000), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), + /* r1 signed range is [S32_MIN, S32_MAX] */ + BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "BPF program is too large", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, +}, -- cgit v1.2.3-70-g09d2 From 06da9f3bd6418e06719f15340202996f7a4c258d Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Wed, 22 Mar 2023 20:24:05 -0700 Subject: selftests/bpf: Test switching TCP Congestion Control algorithms. Create a pair of sockets that utilize the congestion control algorithm under a particular name. Then switch up this congestion control algorithm to another implementation and check whether newly created connections using the same cc name now run the new implementation. Also, try to update a link with a struct_ops that is without BPF_F_LINK or with a wrong or different name. These cases should fail due to the violation of assumptions. To update a bpf_link of a struct_ops, it must be replaced with another struct_ops that is identical in type and name and has the BPF_F_LINK flag. The other test case is to create links from the same struct_ops more than once. It makes sure a struct_ops can be used repeatly. Signed-off-by: Kui-Feng Lee Link: https://lore.kernel.org/r/20230323032405.3735486-9-kuifeng@meta.com Signed-off-by: Martin KaFai Lau --- .../testing/selftests/bpf/prog_tests/bpf_tcp_ca.c | 160 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/tcp_ca_update.c | 80 +++++++++++ 2 files changed, 240 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/tcp_ca_update.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index e980188d4124..a53c254c6058 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -8,6 +8,7 @@ #include "bpf_dctcp.skel.h" #include "bpf_cubic.skel.h" #include "bpf_tcp_nogpl.skel.h" +#include "tcp_ca_update.skel.h" #include "bpf_dctcp_release.skel.h" #include "tcp_ca_write_sk_pacing.skel.h" #include "tcp_ca_incompl_cong_ops.skel.h" @@ -381,6 +382,155 @@ static void test_unsupp_cong_op(void) libbpf_set_print(old_print_fn); } +static void test_update_ca(void) +{ + struct tcp_ca_update *skel; + struct bpf_link *link; + int saved_ca1_cnt; + int err; + + skel = tcp_ca_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops"); + + do_test("tcp_ca_update", NULL); + saved_ca1_cnt = skel->bss->ca1_cnt; + ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); + + err = bpf_link__update_map(link, skel->maps.ca_update_2); + ASSERT_OK(err, "update_map"); + + do_test("tcp_ca_update", NULL); + ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); + ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt"); + + bpf_link__destroy(link); + tcp_ca_update__destroy(skel); +} + +static void test_update_wrong(void) +{ + struct tcp_ca_update *skel; + struct bpf_link *link; + int saved_ca1_cnt; + int err; + + skel = tcp_ca_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops"); + + do_test("tcp_ca_update", NULL); + saved_ca1_cnt = skel->bss->ca1_cnt; + ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); + + err = bpf_link__update_map(link, skel->maps.ca_wrong); + ASSERT_ERR(err, "update_map"); + + do_test("tcp_ca_update", NULL); + ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); + + bpf_link__destroy(link); + tcp_ca_update__destroy(skel); +} + +static void test_mixed_links(void) +{ + struct tcp_ca_update *skel; + struct bpf_link *link, *link_nl; + int err; + + skel = tcp_ca_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link); + ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"); + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops"); + + do_test("tcp_ca_update", NULL); + ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt"); + + err = bpf_link__update_map(link, skel->maps.ca_no_link); + ASSERT_ERR(err, "update_map"); + + bpf_link__destroy(link); + bpf_link__destroy(link_nl); + tcp_ca_update__destroy(skel); +} + +static void test_multi_links(void) +{ + struct tcp_ca_update *skel; + struct bpf_link *link; + + skel = tcp_ca_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops_1st"); + bpf_link__destroy(link); + + /* A map should be able to be used to create links multiple + * times. + */ + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops_2nd"); + bpf_link__destroy(link); + + tcp_ca_update__destroy(skel); +} + +static void test_link_replace(void) +{ + DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts); + struct tcp_ca_update *skel; + struct bpf_link *link; + int err; + + skel = tcp_ca_update__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); + ASSERT_OK_PTR(link, "attach_struct_ops_1st"); + bpf_link__destroy(link); + + link = bpf_map__attach_struct_ops(skel->maps.ca_update_2); + ASSERT_OK_PTR(link, "attach_struct_ops_2nd"); + + /* BPF_F_REPLACE with a wrong old map Fd. It should fail! + * + * With BPF_F_REPLACE, the link should be updated only if the + * old map fd given here matches the map backing the link. + */ + opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_1); + opts.flags = BPF_F_REPLACE; + err = bpf_link_update(bpf_link__fd(link), + bpf_map__fd(skel->maps.ca_update_1), + &opts); + ASSERT_ERR(err, "bpf_link_update_fail"); + + /* BPF_F_REPLACE with a correct old map Fd. It should success! */ + opts.old_map_fd = bpf_map__fd(skel->maps.ca_update_2); + err = bpf_link_update(bpf_link__fd(link), + bpf_map__fd(skel->maps.ca_update_1), + &opts); + ASSERT_OK(err, "bpf_link_update_success"); + + bpf_link__destroy(link); + + tcp_ca_update__destroy(skel); +} + void test_bpf_tcp_ca(void) { if (test__start_subtest("dctcp")) @@ -399,4 +549,14 @@ void test_bpf_tcp_ca(void) test_incompl_cong_ops(); if (test__start_subtest("unsupp_cong_op")) test_unsupp_cong_op(); + if (test__start_subtest("update_ca")) + test_update_ca(); + if (test__start_subtest("update_wrong")) + test_update_wrong(); + if (test__start_subtest("mixed_links")) + test_mixed_links(); + if (test__start_subtest("multi_links")) + test_multi_links(); + if (test__start_subtest("link_replace")) + test_link_replace(); } diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_update.c b/tools/testing/selftests/bpf/progs/tcp_ca_update.c new file mode 100644 index 000000000000..b93a0ed33057 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tcp_ca_update.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" + +#include +#include + +char _license[] SEC("license") = "GPL"; + +int ca1_cnt = 0; +int ca2_cnt = 0; + +static inline struct tcp_sock *tcp_sk(const struct sock *sk) +{ + return (struct tcp_sock *)sk; +} + +SEC("struct_ops/ca_update_1_init") +void BPF_PROG(ca_update_1_init, struct sock *sk) +{ + ca1_cnt++; +} + +SEC("struct_ops/ca_update_2_init") +void BPF_PROG(ca_update_2_init, struct sock *sk) +{ + ca2_cnt++; +} + +SEC("struct_ops/ca_update_cong_control") +void BPF_PROG(ca_update_cong_control, struct sock *sk, + const struct rate_sample *rs) +{ +} + +SEC("struct_ops/ca_update_ssthresh") +__u32 BPF_PROG(ca_update_ssthresh, struct sock *sk) +{ + return tcp_sk(sk)->snd_ssthresh; +} + +SEC("struct_ops/ca_update_undo_cwnd") +__u32 BPF_PROG(ca_update_undo_cwnd, struct sock *sk) +{ + return tcp_sk(sk)->snd_cwnd; +} + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_update_1 = { + .init = (void *)ca_update_1_init, + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_update", +}; + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_update_2 = { + .init = (void *)ca_update_2_init, + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_update", +}; + +SEC(".struct_ops.link") +struct tcp_congestion_ops ca_wrong = { + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_wrong", +}; + +SEC(".struct_ops") +struct tcp_congestion_ops ca_no_link = { + .cong_control = (void *)ca_update_cong_control, + .ssthresh = (void *)ca_update_ssthresh, + .undo_cwnd = (void *)ca_update_undo_cwnd, + .name = "tcp_ca_no_link", +}; -- cgit v1.2.3-70-g09d2 From ecb3c1e675c719885ac05bb5473fa5c495d1ad24 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 21 Mar 2023 12:52:00 +0100 Subject: selftests: rtnetlink: Make the set of tests to run configurable Extract the list of all tests into a variable, ALL_TESTS. Then assume the environment variable TESTS holds the list of tests to actually run, falling back to ALL_TESTS if TESTS is empty. This is the same interface that forwarding selftests use to make the set of tests to run configurable. In addition to this, allow setting the value explicitly through a command line option "-t" along the lines of what fib_nexthops.sh does. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 90 +++++++++++++++++--------------- 1 file changed, 48 insertions(+), 42 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 275491be3da2..12caf9602353 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -4,6 +4,30 @@ # # set -e +ALL_TESTS=" + kci_test_polrouting + kci_test_route_get + kci_test_addrlft + kci_test_promote_secondaries + kci_test_tc + kci_test_gre + kci_test_gretap + kci_test_ip6gretap + kci_test_erspan + kci_test_ip6erspan + kci_test_bridge + kci_test_addrlabel + kci_test_ifalias + kci_test_vrf + kci_test_encap + kci_test_macsec + kci_test_ipsec + kci_test_ipsec_offload + kci_test_fdb_get + kci_test_neigh_get + kci_test_bridge_parent_id +" + devdummy="test-dummy0" # Kselftest framework requirement - SKIP code is 4. @@ -1227,60 +1251,34 @@ kci_test_bridge_parent_id() kci_test_rtnl() { + local current_test local ret=0 + kci_add_dummy if [ $ret -ne 0 ];then echo "FAIL: cannot add dummy interface" return 1 fi - kci_test_polrouting - check_err $? - kci_test_route_get - check_err $? - kci_test_addrlft - check_err $? - kci_test_promote_secondaries - check_err $? - kci_test_tc - check_err $? - kci_test_gre - check_err $? - kci_test_gretap - check_err $? - kci_test_ip6gretap - check_err $? - kci_test_erspan - check_err $? - kci_test_ip6erspan - check_err $? - kci_test_bridge - check_err $? - kci_test_addrlabel - check_err $? - kci_test_ifalias - check_err $? - kci_test_vrf - check_err $? - kci_test_encap - check_err $? - kci_test_macsec - check_err $? - kci_test_ipsec - check_err $? - kci_test_ipsec_offload - check_err $? - kci_test_fdb_get - check_err $? - kci_test_neigh_get - check_err $? - kci_test_bridge_parent_id - check_err $? + for current_test in ${TESTS:-$ALL_TESTS}; do + $current_test + check_err $? + done kci_del_dummy return $ret } +usage() +{ + cat < Test(s) to run (default: all) + (options: $(echo $ALL_TESTS)) +EOF +} + #check for needed privileges if [ "$(id -u)" -ne 0 ];then echo "SKIP: Need root privileges" @@ -1295,6 +1293,14 @@ for x in ip tc;do fi done +while getopts t:h o; do + case $o in + t) TESTS=$OPTARG;; + h) usage; exit 0;; + *) usage; exit 1;; + esac +done + kci_test_rtnl exit $? -- cgit v1.2.3-70-g09d2 From 6a414fd77f613e374f2f1accb36beca90bab084d Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 21 Mar 2023 12:52:01 +0100 Subject: selftests: rtnetlink: Add an address proto test Add coverage of "ip address {add,replace} ... proto" support. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Reviewed-by: David Ahern Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 91 ++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 12caf9602353..3b15c686c03f 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -26,6 +26,7 @@ ALL_TESTS=" kci_test_fdb_get kci_test_neigh_get kci_test_bridge_parent_id + kci_test_address_proto " devdummy="test-dummy0" @@ -1249,6 +1250,96 @@ kci_test_bridge_parent_id() echo "PASS: bridge_parent_id" } +address_get_proto() +{ + local addr=$1; shift + + ip -N -j address show dev "$devdummy" | + jq -e -r --arg addr "${addr%/*}" \ + '.[].addr_info[] | select(.local == $addr) | .protocol' +} + +address_count() +{ + ip -N -j address show dev "$devdummy" "$@" | + jq -e -r '[.[].addr_info[] | .local | select(. != null)] | length' +} + +do_test_address_proto() +{ + local what=$1; shift + local addr=$1; shift + local addr2=${addr%/*}2/${addr#*/} + local addr3=${addr%/*}3/${addr#*/} + local proto + local count + local ret=0 + local err + + ip address add dev "$devdummy" "$addr3" + check_err $? + proto=$(address_get_proto "$addr3") + [[ "$proto" == null ]] + check_err $? + + ip address add dev "$devdummy" "$addr2" proto 0x99 + check_err $? + proto=$(address_get_proto "$addr2") + [[ "$proto" == 0x99 ]] + check_err $? + + ip address add dev "$devdummy" "$addr" proto 0xab + check_err $? + proto=$(address_get_proto "$addr") + [[ "$proto" == 0xab ]] + check_err $? + + ip address replace dev "$devdummy" "$addr" proto 0x11 + proto=$(address_get_proto "$addr") + check_err $? + [[ "$proto" == 0x11 ]] + check_err $? + + count=$(address_count) + check_err $? + (( count == 3 )) # $addr, $addr2 and $addr3 + + count=$(address_count proto 0) + check_err $? + (( count == 1 )) # just $addr2 + + count=$(address_count proto 0x11) + check_err $? + (( count == 2 )) # $addr and $addr2 + + count=$(address_count proto 0xab) + check_err $? + (( count == 1 )) # just $addr2 + + ip address del dev "$devdummy" "$addr" + ip address del dev "$devdummy" "$addr2" + ip address del dev "$devdummy" "$addr3" + + if [ $ret -ne 0 ]; then + echo "FAIL: address proto $what" + return 1 + fi + echo "PASS: address proto $what" +} + +kci_test_address_proto() +{ + local ret=0 + + do_test_address_proto IPv4 192.0.2.1/28 + check_err $? + + do_test_address_proto IPv6 2001:db8:1::1/64 + check_err $? + + return $ret +} + kci_test_rtnl() { local current_test -- cgit v1.2.3-70-g09d2 From 6c831c4684124a544f73f7c9b83bc7b2eb0b23d3 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Sat, 25 Mar 2023 16:31:46 -0500 Subject: bpf: Treat KF_RELEASE kfuncs as KF_TRUSTED_ARGS KF_RELEASE kfuncs are not currently treated as having KF_TRUSTED_ARGS, even though they have a superset of the requirements of KF_TRUSTED_ARGS. Like KF_TRUSTED_ARGS, KF_RELEASE kfuncs require a 0-offset argument, and don't allow NULL-able arguments. Unlike KF_TRUSTED_ARGS which require _either_ an argument with ref_obj_id > 0, _or_ (ref->type & BPF_REG_TRUSTED_MODIFIERS) (and no unsafe modifiers allowed), KF_RELEASE only allows for ref_obj_id > 0. Because KF_RELEASE today doesn't automatically imply KF_TRUSTED_ARGS, some of these requirements are enforced in different ways that can make the behavior of the verifier feel unpredictable. For example, a KF_RELEASE kfunc with a NULL-able argument will currently fail in the verifier with a message like, "arg#0 is ptr_or_null_ expected ptr_ or socket" rather than "Possibly NULL pointer passed to trusted arg0". Our intention is the same, but the semantics are different due to implemenetation details that kfunc authors and BPF program writers should not need to care about. Let's make the behavior of the verifier more consistent and intuitive by having KF_RELEASE kfuncs imply the presence of KF_TRUSTED_ARGS. Our eventual goal is to have all kfuncs assume KF_TRUSTED_ARGS by default anyways, so this takes us a step in that direction. Note that it does not make sense to assume KF_TRUSTED_ARGS for all KF_ACQUIRE kfuncs. KF_ACQUIRE kfuncs can have looser semantics than KF_RELEASE, with e.g. KF_RCU | KF_RET_NULL. We may want to have KF_ACQUIRE imply KF_TRUSTED_ARGS _unless_ KF_RCU is specified, but that can be left to another patch set, and there are no such subtleties to address for KF_RELEASE. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230325213144.486885-4-void@manifault.com Signed-off-by: Alexei Starovoitov --- Documentation/bpf/kfuncs.rst | 7 ++++--- kernel/bpf/cpumask.c | 2 +- kernel/bpf/verifier.c | 2 +- net/bpf/test_run.c | 6 ++++++ tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c | 4 ++-- tools/testing/selftests/bpf/progs/task_kfunc_failure.c | 6 +++--- tools/testing/selftests/bpf/verifier/calls.c | 10 +++++++--- tools/testing/selftests/bpf/verifier/ref_tracking.c | 6 +++--- 8 files changed, 27 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst index 69eccf6f98ef..bf1b85941452 100644 --- a/Documentation/bpf/kfuncs.rst +++ b/Documentation/bpf/kfuncs.rst @@ -179,9 +179,10 @@ both are orthogonal to each other. --------------------- The KF_RELEASE flag is used to indicate that the kfunc releases the pointer -passed in to it. There can be only one referenced pointer that can be passed in. -All copies of the pointer being released are invalidated as a result of invoking -kfunc with this flag. +passed in to it. There can be only one referenced pointer that can be passed +in. All copies of the pointer being released are invalidated as a result of +invoking kfunc with this flag. KF_RELEASE kfuncs automatically receive the +protection afforded by the KF_TRUSTED_ARGS flag described below. 2.4.4 KF_KPTR_GET flag ---------------------- diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c index e991af7dc13c..7efdf5d770ca 100644 --- a/kernel/bpf/cpumask.c +++ b/kernel/bpf/cpumask.c @@ -402,7 +402,7 @@ __diag_pop(); BTF_SET8_START(cpumask_kfunc_btf_ids) BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU) BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 64f06f6e16bf..20eb2015842f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9307,7 +9307,7 @@ static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) { - return meta->kfunc_flags & KF_TRUSTED_ARGS; + return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); } static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 27587f1c5f36..f1652f5fbd2e 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -606,6 +606,11 @@ bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) return &prog_test_struct; } +__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) +{ + WARN_ON_ONCE(1); +} + __bpf_kfunc struct prog_test_member * bpf_kfunc_call_memb_acquire(void) { @@ -800,6 +805,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_SET8_END(test_sk_check_kfunc_ids) static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 807fb0ac41e9..48b2034cadb3 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -206,7 +206,7 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("expects refcounted") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value *v; @@ -234,7 +234,7 @@ int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) { struct __cgrps_kfunc_map_value local, *v; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 27994d6b2914..2c374a7ffece 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -206,7 +206,7 @@ int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flag } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value *v; @@ -234,7 +234,7 @@ int BPF_PROG(task_kfunc_release_fp, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) { struct __tasks_kfunc_map_value local, *v; @@ -277,7 +277,7 @@ int BPF_PROG(task_kfunc_release_unacquired, struct task_struct *task, u64 clone_ } SEC("tp_btf/task_newtask") -__failure __msg("arg#0 is ptr_or_null_ expected ptr_ or socket") +__failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_from_pid_no_null_check, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 5702fc9761ef..1bdf2b43e49e 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -109,7 +109,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 3 }, { "bpf_kfunc_call_test_release", 5 }, @@ -165,19 +165,23 @@ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_EXIT_INSN(), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 16), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -4), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_acquire", 3 }, - { "bpf_kfunc_call_test_release", 9 }, + { "bpf_kfunc_call_test_offset", 9 }, + { "bpf_kfunc_call_test_release", 12 }, }, .result_unpriv = REJECT, .result = REJECT, diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 9540164712b7..5a2e154dd1e0 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -142,7 +142,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_lookup_user_key", 2 }, { "bpf_key_put", 4 }, @@ -163,7 +163,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 is ptr_or_null_ expected ptr_ or socket", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_lookup_system_key", 1 }, { "bpf_key_put", 3 }, @@ -182,7 +182,7 @@ .kfunc = "bpf", .expected_attach_type = BPF_LSM_MAC, .flags = BPF_F_SLEEPABLE, - .errstr = "arg#0 pointer type STRUCT bpf_key must point to scalar, or struct with scalar", + .errstr = "Possibly NULL pointer passed to trusted arg0", .fixup_kfunc_btf_id = { { "bpf_key_put", 1 }, }, -- cgit v1.2.3-70-g09d2 From 3e5329e193f463e6aaf98c33f7cb1308160880ab Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:42 +0200 Subject: selftests/bpf: Report program name on parse_test_spec error Change test_loader.c:run_subtest() behavior to show BPF program name when test spec for that program can't be parsed. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_loader.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index bf41390157bf..8ca5121b5329 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -232,8 +232,11 @@ void run_subtest(struct test_loader *tester, /* if we can't derive test specification, go to the next test */ err = parse_test_spec(tester, obj, prog, &spec); - if (!ASSERT_OK(err, "parse_test_spec")) + if (err) { + PRINT_FAIL("Can't parse test spec for program '%s'\n", + bpf_program__name(prog)); continue; + } tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts); if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ -- cgit v1.2.3-70-g09d2 From 207b1ba3019100d862931e97b49f76ff1e0a89f2 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:43 +0200 Subject: selftests/bpf: __imm_insn & __imm_const macro for bpf_misc.h Add two convenience macro for BPF test cases, allowing the following usage: #include ... asm volatile ( ... ".8byte %[raw_insn];" ... "r1 += %[st_foo_offset];" ... : : __imm_insn(raw_insn, BPF_RAW_INSN(...)), __imm_const(st_foo_offset, offsetof(struct st, foo)) : __clobber_all); Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 3c03ec8056ce..8b4681a96f89 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -35,8 +35,10 @@ #define __clobber_all "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "memory" #define __clobber_common "r0", "r1", "r2", "r3", "r4", "r5", "memory" #define __imm(name) [name]"i"(name) +#define __imm_const(name, expr) [name]"i"(expr) #define __imm_addr(name) [name]"i"(&name) #define __imm_ptr(name) [name]"p"(&name) +#define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 -- cgit v1.2.3-70-g09d2 From 1d56ade032a49b2042f43b3f6bdf116928064267 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:44 +0200 Subject: selftests/bpf: Unprivileged tests for test_loader.c Extends test_loader.c:test_loader__run_subtests() by allowing to execute tests in unprivileged mode, similar to test_verifier.c. Adds the following new attributes controlling test_loader behavior: __msg_unpriv __success_unpriv __failure_unpriv * If any of these attributes is present the test would be loaded in unprivileged mode. * If only "privileged" attributes are present the test would be loaded only in privileged mode. * If both "privileged" and "unprivileged" attributes are present the test would be loaded in both modes. * If test has to be executed in both modes, __msg(text) is specified and __msg_unpriv is not specified the behavior is the same as if __msg_unpriv(text) is specified. * For test filtering purposes the name of the program loaded in unprivileged mode is derived from the usual program name by adding `@unpriv' suffix. Also adds attribute '__description'. This attribute specifies text to be used instead of a program name for display and filtering purposes. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/Makefile | 10 +- tools/testing/selftests/bpf/autoconf_helper.h | 9 + tools/testing/selftests/bpf/progs/bpf_misc.h | 25 ++ tools/testing/selftests/bpf/test_loader.c | 394 +++++++++++++++++++++----- tools/testing/selftests/bpf/test_verifier.c | 25 +- tools/testing/selftests/bpf/unpriv_helpers.c | 26 ++ tools/testing/selftests/bpf/unpriv_helpers.h | 7 + 7 files changed, 395 insertions(+), 101 deletions(-) create mode 100644 tools/testing/selftests/bpf/autoconf_helper.h create mode 100644 tools/testing/selftests/bpf/unpriv_helpers.c create mode 100644 tools/testing/selftests/bpf/unpriv_helpers.h (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index fc092582d16d..4a8ef118fd9d 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -231,8 +231,9 @@ TEST_GEN_PROGS_EXTENDED += $(TRUNNER_BPFTOOL) $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ) -CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o TESTING_HELPERS := $(OUTPUT)/testing_helpers.o +CGROUP_HELPERS := $(OUTPUT)/cgroup_helpers.o +UNPRIV_HELPERS := $(OUTPUT)/unpriv_helpers.o TRACE_HELPERS := $(OUTPUT)/trace_helpers.o JSON_WRITER := $(OUTPUT)/json_writer.o CAP_HELPERS := $(OUTPUT)/cap_helpers.o @@ -252,7 +253,7 @@ $(OUTPUT)/test_lirc_mode2_user: $(TESTING_HELPERS) $(OUTPUT)/xdping: $(TESTING_HELPERS) $(OUTPUT)/flow_dissector_load: $(TESTING_HELPERS) $(OUTPUT)/test_maps: $(TESTING_HELPERS) -$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) +$(OUTPUT)/test_verifier: $(TESTING_HELPERS) $(CAP_HELPERS) $(UNPRIV_HELPERS) $(OUTPUT)/xsk.o: $(BPFOBJ) BPFTOOL ?= $(DEFAULT_BPFTOOL) @@ -560,8 +561,9 @@ TRUNNER_BPF_PROGS_DIR := progs TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ network_helpers.c testing_helpers.c \ btf_helpers.c flow_dissector_load.h \ - cap_helpers.c test_loader.c xsk.c disasm.c \ - json_writer.c + cap_helpers.c test_loader.c xsk.c disasm.c \ + json_writer.c unpriv_helpers.c + TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ diff --git a/tools/testing/selftests/bpf/autoconf_helper.h b/tools/testing/selftests/bpf/autoconf_helper.h new file mode 100644 index 000000000000..5b243b9cdf8c --- /dev/null +++ b/tools/testing/selftests/bpf/autoconf_helper.h @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#ifdef HAVE_GENHDR +# include "autoconf.h" +#else +# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) +# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 +# endif +#endif diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 8b4681a96f89..9defc217a5bd 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -5,12 +5,33 @@ /* This set of attributes controls behavior of the * test_loader.c:test_loader__run_subtests(). * + * The test_loader sequentially loads each program in a skeleton. + * Programs could be loaded in privileged and unprivileged modes. + * - __success, __failure, __msg imply privileged mode; + * - __success_unpriv, __failure_unpriv, __msg_unpriv imply + * unprivileged mode. + * If combination of privileged and unprivileged attributes is present + * both modes are used. If none are present privileged mode is implied. + * + * See test_loader.c:drop_capabilities() for exact set of capabilities + * that differ between privileged and unprivileged modes. + * + * For test filtering purposes the name of the program loaded in + * unprivileged mode is derived from the usual program name by adding + * `@unpriv' suffix. + * * __msg Message expected to be found in the verifier log. * Multiple __msg attributes could be specified. + * __msg_unpriv Same as __msg but for unprivileged mode. * * __success Expect program load success in privileged mode. + * __success_unpriv Expect program load success in unprivileged mode. * * __failure Expect program load failure in privileged mode. + * __failure_unpriv Expect program load failure in unprivileged mode. + * + * __description Text to be used instead of a program name for display + * and filtering purposes. * * __log_level Log level to use for the program, numeric value expected. * @@ -27,6 +48,10 @@ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) +#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) +#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) +#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) +#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 8ca5121b5329..41cddb303885 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -1,9 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include #include #include #include +#include "autoconf_helper.h" +#include "unpriv_helpers.h" +#include "cap_helpers.h" + #define str_has_pfx(str, pfx) \ (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0) @@ -12,16 +17,40 @@ #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" +#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" +#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" +#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" +#define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" -struct test_spec { - const char *name; +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +#define EFFICIENT_UNALIGNED_ACCESS 1 +#else +#define EFFICIENT_UNALIGNED_ACCESS 0 +#endif + +static int sysctl_unpriv_disabled = -1; + +enum mode { + PRIV = 1, + UNPRIV = 2 +}; + +struct test_subspec { + char *name; bool expect_failure; const char **expect_msgs; size_t expect_msg_cnt; +}; + +struct test_spec { + const char *prog_name; + struct test_subspec priv; + struct test_subspec unpriv; int log_level; int prog_flags; + int mode_mask; }; static int tester_init(struct test_loader *tester) @@ -44,17 +73,46 @@ void test_loader_fini(struct test_loader *tester) free(tester->log_buf); } +static void free_test_spec(struct test_spec *spec) +{ + free(spec->priv.name); + free(spec->unpriv.name); + free(spec->priv.expect_msgs); + free(spec->unpriv.expect_msgs); +} + +static int push_msg(const char *msg, struct test_subspec *subspec) +{ + void *tmp; + + tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *)); + if (!tmp) { + ASSERT_FAIL("failed to realloc memory for messages\n"); + return -ENOMEM; + } + subspec->expect_msgs = tmp; + subspec->expect_msgs[subspec->expect_msg_cnt++] = msg; + + return 0; +} + +/* Uses btf_decl_tag attributes to describe the expected test + * behavior, see bpf_misc.h for detailed description of each attribute + * and attribute combinations. + */ static int parse_test_spec(struct test_loader *tester, struct bpf_object *obj, struct bpf_program *prog, struct test_spec *spec) { + const char *description = NULL; + bool has_unpriv_result = false; + int func_id, i, err = 0; struct btf *btf; - int func_id, i; memset(spec, 0, sizeof(*spec)); - spec->name = bpf_program__name(prog); + spec->prog_name = bpf_program__name(prog); btf = bpf_object__btf(obj); if (!btf) { @@ -62,15 +120,15 @@ static int parse_test_spec(struct test_loader *tester, return -EINVAL; } - func_id = btf__find_by_name_kind(btf, spec->name, BTF_KIND_FUNC); + func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC); if (func_id < 0) { - ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->name); + ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name); return -EINVAL; } for (i = 1; i < btf__type_cnt(btf); i++) { + const char *s, *val, *msg; const struct btf_type *t; - const char *s, *val; char *e; t = btf__type_by_id(btf, i); @@ -81,30 +139,42 @@ static int parse_test_spec(struct test_loader *tester, continue; s = btf__str_by_offset(btf, t->name_off); - if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) { - spec->expect_failure = true; + if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) { + description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1; + } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) { + spec->priv.expect_failure = true; + spec->mode_mask |= PRIV; } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) { - spec->expect_failure = false; + spec->priv.expect_failure = false; + spec->mode_mask |= PRIV; + } else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) { + spec->unpriv.expect_failure = true; + spec->mode_mask |= UNPRIV; + has_unpriv_result = true; + } else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) { + spec->unpriv.expect_failure = false; + spec->mode_mask |= UNPRIV; + has_unpriv_result = true; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { - void *tmp; - const char **msg; - - tmp = realloc(spec->expect_msgs, - (1 + spec->expect_msg_cnt) * sizeof(void *)); - if (!tmp) { - ASSERT_FAIL("failed to realloc memory for messages\n"); - return -ENOMEM; - } - spec->expect_msgs = tmp; - msg = &spec->expect_msgs[spec->expect_msg_cnt++]; - *msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; + msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; + err = push_msg(msg, &spec->priv); + if (err) + goto cleanup; + spec->mode_mask |= PRIV; + } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { + msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; + err = push_msg(msg, &spec->unpriv); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) { val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1; errno = 0; spec->log_level = strtol(val, &e, 0); if (errno || e[0] != '\0') { - ASSERT_FAIL("failed to parse test log level from '%s'", s); - return -EINVAL; + PRINT_FAIL("failed to parse test log level from '%s'\n", s); + err = -EINVAL; + goto cleanup; } } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) { val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1; @@ -124,14 +194,70 @@ static int parse_test_spec(struct test_loader *tester, errno = 0; spec->prog_flags |= strtol(val, &e, 0); if (errno || e[0] != '\0') { - ASSERT_FAIL("failed to parse test prog flags from '%s'", s); - return -EINVAL; + PRINT_FAIL("failed to parse test prog flags from '%s'\n", + val); + err = -EINVAL; + goto cleanup; } } } } + if (spec->mode_mask == 0) + spec->mode_mask = PRIV; + + if (!description) + description = spec->prog_name; + + if (spec->mode_mask & PRIV) { + spec->priv.name = strdup(description); + if (!spec->priv.name) { + PRINT_FAIL("failed to allocate memory for priv.name\n"); + err = -ENOMEM; + goto cleanup; + } + } + + if (spec->mode_mask & UNPRIV) { + int descr_len = strlen(description); + const char *suffix = " @unpriv"; + char *name; + + name = malloc(descr_len + strlen(suffix) + 1); + if (!name) { + PRINT_FAIL("failed to allocate memory for unpriv.name\n"); + err = -ENOMEM; + goto cleanup; + } + + strcpy(name, description); + strcpy(&name[descr_len], suffix); + spec->unpriv.name = name; + } + + if (spec->mode_mask & (PRIV | UNPRIV)) { + if (!has_unpriv_result) + spec->unpriv.expect_failure = spec->priv.expect_failure; + + if (!spec->unpriv.expect_msgs) { + size_t sz = spec->priv.expect_msg_cnt * sizeof(void *); + + spec->unpriv.expect_msgs = malloc(sz); + if (!spec->unpriv.expect_msgs) { + PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n"); + err = -ENOMEM; + goto cleanup; + } + memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz); + spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt; + } + } + return 0; + +cleanup: + free_test_spec(spec); + return err; } static void prepare_case(struct test_loader *tester, @@ -148,7 +274,7 @@ static void prepare_case(struct test_loader *tester, bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz); - /* Make sure we set at least minimal log level, unless test requirest + /* Make sure we set at least minimal log level, unless test requires * even higher level already. Make sure to preserve independent log * level 4 (verifier stats), though. */ @@ -172,18 +298,18 @@ static void emit_verifier_log(const char *log_buf, bool force) } static void validate_case(struct test_loader *tester, - struct test_spec *spec, + struct test_subspec *subspec, struct bpf_object *obj, struct bpf_program *prog, int load_err) { int i, j; - for (i = 0; i < spec->expect_msg_cnt; i++) { + for (i = 0; i < subspec->expect_msg_cnt; i++) { char *match; const char *expect_msg; - expect_msg = spec->expect_msgs[i]; + expect_msg = subspec->expect_msgs[i]; match = strstr(tester->log_buf + tester->next_match_pos, expect_msg); if (!ASSERT_OK_PTR(match, "expect_msg")) { @@ -191,7 +317,8 @@ static void validate_case(struct test_loader *tester, if (env.verbosity == VERBOSE_NONE) emit_verifier_log(tester->log_buf, true /*force*/); for (j = 0; j < i; j++) - fprintf(stderr, "MATCHED MSG: '%s'\n", spec->expect_msgs[j]); + fprintf(stderr, + "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]); fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg); return; } @@ -200,17 +327,169 @@ static void validate_case(struct test_loader *tester, } } +struct cap_state { + __u64 old_caps; + bool initialized; +}; + +static int drop_capabilities(struct cap_state *caps) +{ + const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN | + 1ULL << CAP_PERFMON | 1ULL << CAP_BPF); + int err; + + err = cap_disable_effective(caps_to_drop, &caps->old_caps); + if (err) { + PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err)); + return err; + } + + caps->initialized = true; + return 0; +} + +static int restore_capabilities(struct cap_state *caps) +{ + int err; + + if (!caps->initialized) + return 0; + + err = cap_enable_effective(caps->old_caps, NULL); + if (err) + PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err)); + caps->initialized = false; + return err; +} + +static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec) +{ + if (sysctl_unpriv_disabled < 0) + sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0; + if (sysctl_unpriv_disabled) + return false; + if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) + return false; + return true; +} + +static bool is_unpriv_capable_map(struct bpf_map *map) +{ + enum bpf_map_type type; + __u32 flags; + + type = bpf_map__type(map); + + switch (type) { + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_PERCPU_HASH: + case BPF_MAP_TYPE_HASH_OF_MAPS: + flags = bpf_map__map_flags(map); + return !(flags & BPF_F_ZERO_SEED); + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: + case BPF_MAP_TYPE_ARRAY: + case BPF_MAP_TYPE_RINGBUF: + case BPF_MAP_TYPE_PROG_ARRAY: + case BPF_MAP_TYPE_CGROUP_ARRAY: + case BPF_MAP_TYPE_PERCPU_ARRAY: + case BPF_MAP_TYPE_USER_RINGBUF: + case BPF_MAP_TYPE_ARRAY_OF_MAPS: + case BPF_MAP_TYPE_CGROUP_STORAGE: + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + return true; + default: + return false; + } +} + /* this function is forced noinline and has short generic name to look better * in test_progs output (in case of a failure) */ static noinline void run_subtest(struct test_loader *tester, - const char *skel_name, - skel_elf_bytes_fn elf_bytes_factory) + struct bpf_object_open_opts *open_opts, + const void *obj_bytes, + size_t obj_byte_cnt, + struct test_spec *spec, + bool unpriv) +{ + struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; + struct cap_state caps = {}; + struct bpf_program *tprog; + struct bpf_object *tobj; + struct bpf_map *map; + int err; + + if (!test__start_subtest(subspec->name)) + return; + + if (unpriv) { + if (!can_execute_unpriv(tester, spec)) { + test__skip(); + test__end_subtest(); + return; + } + if (drop_capabilities(&caps)) { + test__end_subtest(); + return; + } + } + + tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts); + if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ + goto subtest_cleanup; + + bpf_object__for_each_program(tprog, tobj) + bpf_program__set_autoload(tprog, false); + + bpf_object__for_each_program(tprog, tobj) { + /* only load specified program */ + if (strcmp(bpf_program__name(tprog), spec->prog_name) == 0) { + bpf_program__set_autoload(tprog, true); + break; + } + } + + prepare_case(tester, spec, tobj, tprog); + + /* By default bpf_object__load() automatically creates all + * maps declared in the skeleton. Some map types are only + * allowed in priv mode. Disable autoload for such maps in + * unpriv mode. + */ + bpf_object__for_each_map(map, tobj) + bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map)); + + err = bpf_object__load(tobj); + if (subspec->expect_failure) { + if (!ASSERT_ERR(err, "unexpected_load_success")) { + emit_verifier_log(tester->log_buf, false /*force*/); + goto tobj_cleanup; + } + } else { + if (!ASSERT_OK(err, "unexpected_load_failure")) { + emit_verifier_log(tester->log_buf, true /*force*/); + goto tobj_cleanup; + } + } + + emit_verifier_log(tester->log_buf, false /*force*/); + validate_case(tester, subspec, tobj, tprog, err); + +tobj_cleanup: + bpf_object__close(tobj); +subtest_cleanup: + test__end_subtest(); + restore_capabilities(&caps); +} + +static void process_subtest(struct test_loader *tester, + const char *skel_name, + skel_elf_bytes_fn elf_bytes_factory) { LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name); - struct bpf_object *obj = NULL, *tobj; - struct bpf_program *prog, *tprog; + struct bpf_object *obj = NULL; + struct bpf_program *prog; const void *obj_bytes; size_t obj_byte_cnt; int err; @@ -224,12 +503,8 @@ void run_subtest(struct test_loader *tester, return; bpf_object__for_each_program(prog, obj) { - const char *prog_name = bpf_program__name(prog); struct test_spec spec; - if (!test__start_subtest(prog_name)) - continue; - /* if we can't derive test specification, go to the next test */ err = parse_test_spec(tester, obj, prog, &spec); if (err) { @@ -238,41 +513,12 @@ void run_subtest(struct test_loader *tester, continue; } - tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts); - if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ - continue; + if (spec.mode_mask & PRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, false); + if (spec.mode_mask & UNPRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, true); - bpf_object__for_each_program(tprog, tobj) - bpf_program__set_autoload(tprog, false); - - bpf_object__for_each_program(tprog, tobj) { - /* only load specified program */ - if (strcmp(bpf_program__name(tprog), prog_name) == 0) { - bpf_program__set_autoload(tprog, true); - break; - } - } - - prepare_case(tester, &spec, tobj, tprog); - - err = bpf_object__load(tobj); - if (spec.expect_failure) { - if (!ASSERT_ERR(err, "unexpected_load_success")) { - emit_verifier_log(tester->log_buf, false /*force*/); - goto tobj_cleanup; - } - } else { - if (!ASSERT_OK(err, "unexpected_load_failure")) { - emit_verifier_log(tester->log_buf, true /*force*/); - goto tobj_cleanup; - } - } - - emit_verifier_log(tester->log_buf, false /*force*/); - validate_case(tester, &spec, tobj, tprog, err); - -tobj_cleanup: - bpf_object__close(tobj); + free_test_spec(&spec); } bpf_object__close(obj); @@ -283,5 +529,5 @@ void test_loader__run_subtests(struct test_loader *tester, skel_elf_bytes_fn elf_bytes_factory) { /* see comment in run_subtest() for why we do this function nesting */ - run_subtest(tester, skel_name, elf_bytes_factory); + process_subtest(tester, skel_name, elf_bytes_factory); } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 49a70d9beb0b..5b90eef09ade 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -33,13 +33,8 @@ #include #include -#ifdef HAVE_GENHDR -# include "autoconf.h" -#else -# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) -# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 -# endif -#endif +#include "autoconf_helper.h" +#include "unpriv_helpers.h" #include "cap_helpers.h" #include "bpf_rand.h" #include "bpf_util.h" @@ -1665,22 +1660,6 @@ static bool is_admin(void) return (caps & ADMIN_CAPS) == ADMIN_CAPS; } -static void get_unpriv_disabled() -{ - char buf[2]; - FILE *fd; - - fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); - if (!fd) { - perror("fopen /proc/sys/"UNPRIV_SYSCTL); - unpriv_disabled = true; - return; - } - if (fgets(buf, 2, fd) == buf && atoi(buf)) - unpriv_disabled = true; - fclose(fd); -} - static bool test_as_unpriv(struct bpf_test *test) { #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c new file mode 100644 index 000000000000..2a6efbd0401e --- /dev/null +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include + +#include "unpriv_helpers.h" + +bool get_unpriv_disabled(void) +{ + bool disabled; + char buf[2]; + FILE *fd; + + fd = fopen("/proc/sys/" UNPRIV_SYSCTL, "r"); + if (fd) { + disabled = (fgets(buf, 2, fd) == buf && atoi(buf)); + fclose(fd); + } else { + perror("fopen /proc/sys/" UNPRIV_SYSCTL); + disabled = true; + } + + return disabled; +} diff --git a/tools/testing/selftests/bpf/unpriv_helpers.h b/tools/testing/selftests/bpf/unpriv_helpers.h new file mode 100644 index 000000000000..151f67329665 --- /dev/null +++ b/tools/testing/selftests/bpf/unpriv_helpers.h @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include + +#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" + +bool get_unpriv_disabled(void); -- cgit v1.2.3-70-g09d2 From 19a8e06f5f9155caf1a5577a0f7969eee13d0cbb Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:45 +0200 Subject: selftests/bpf: Tests execution support for test_loader.c Extends test_loader.c:test_loader__run_subtests() by allowing to execute BPF_PROG_TEST_RUN bpf command for selected programs. This is similar to functionality provided by test_verifier. Adds the following new attributes controlling test_loader behavior: __retval(...) __retval_unpriv(...) * If any of these attributes is present, the annotated program would be executed using libbpf's bpf_prog_test_run_opts() function. * If __retval is present, the test run would be done for program loaded in privileged mode. * If __retval_unpriv is present, the test run would be done for program loaded in unprivileged mode. * To mimic test_verifier behavior, the actual run is initiated in privileged mode. * The value returned by a test run is compared against retval parameter. The retval attribute takes one of the following parameters: - a decimal number - a hexadecimal number (must start from '0x') - any of a three special literals (provided for compatibility with test_verifier): - INT_MIN - POINTER_VALUE - TEST_DATA_LEN An example of the attribute usage: SEC("socket") __description("return 42") __success __success_unpriv __retval(42) __naked void the_42_test(void) { asm volatile (" \ r0 = 42; \ exit; \ " ::: __clobber_all); } Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 15 +++ tools/testing/selftests/bpf/test_loader.c | 149 ++++++++++++++++++++++++--- 2 files changed, 150 insertions(+), 14 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 9defc217a5bd..6e3b4903c541 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -30,6 +30,15 @@ * __failure Expect program load failure in privileged mode. * __failure_unpriv Expect program load failure in unprivileged mode. * + * __retval Execute the program using BPF_PROG_TEST_RUN command, + * expect return value to match passed parameter: + * - a decimal number + * - a hexadecimal number, when starts from 0x + * - literal INT_MIN + * - literal POINTER_VALUE (see definition below) + * - literal TEST_DATA_LEN (see definition below) + * __retval_unpriv Same, but load program in unprivileged mode. + * * __description Text to be used instead of a program name for display * and filtering purposes. * @@ -54,6 +63,8 @@ #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) +#define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) +#define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) @@ -65,6 +76,10 @@ #define __imm_ptr(name) [name]"p"(&name) #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) +/* Magic constants used with __retval() */ +#define POINTER_VALUE 0xcafe4all +#define TEST_DATA_LEN 64 + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 41cddb303885..47e9e076bc8f 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -23,6 +23,12 @@ #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" +#define TEST_TAG_RETVAL_PFX "comment:test_retval=" +#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv=" + +/* Warning: duplicated in bpf_misc.h */ +#define POINTER_VALUE 0xcafe4all +#define TEST_DATA_LEN 64 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define EFFICIENT_UNALIGNED_ACCESS 1 @@ -42,6 +48,8 @@ struct test_subspec { bool expect_failure; const char **expect_msgs; size_t expect_msg_cnt; + int retval; + bool execute; }; struct test_spec { @@ -96,6 +104,46 @@ static int push_msg(const char *msg, struct test_subspec *subspec) return 0; } +static int parse_int(const char *str, int *val, const char *name) +{ + char *end; + long tmp; + + errno = 0; + if (str_has_pfx(str, "0x")) + tmp = strtol(str + 2, &end, 16); + else + tmp = strtol(str, &end, 10); + if (errno || end[0] != '\0') { + PRINT_FAIL("failed to parse %s from '%s'\n", name, str); + return -EINVAL; + } + *val = tmp; + return 0; +} + +static int parse_retval(const char *str, int *val, const char *name) +{ + struct { + char *name; + int val; + } named_values[] = { + { "INT_MIN" , INT_MIN }, + { "POINTER_VALUE", POINTER_VALUE }, + { "TEST_DATA_LEN", TEST_DATA_LEN }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(named_values); ++i) { + if (strcmp(str, named_values[i].name) != 0) + continue; + *val = named_values[i].val; + return 0; + } + + return parse_int(str, val, name); +} + /* Uses btf_decl_tag attributes to describe the expected test * behavior, see bpf_misc.h for detailed description of each attribute * and attribute combinations. @@ -107,6 +155,7 @@ static int parse_test_spec(struct test_loader *tester, { const char *description = NULL; bool has_unpriv_result = false; + bool has_unpriv_retval = false; int func_id, i, err = 0; struct btf *btf; @@ -129,7 +178,7 @@ static int parse_test_spec(struct test_loader *tester, for (i = 1; i < btf__type_cnt(btf); i++) { const char *s, *val, *msg; const struct btf_type *t; - char *e; + int tmp; t = btf__type_by_id(btf, i); if (!btf_is_decl_tag(t)) @@ -167,15 +216,26 @@ static int parse_test_spec(struct test_loader *tester, if (err) goto cleanup; spec->mode_mask |= UNPRIV; + } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) { + val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1; + err = parse_retval(val, &spec->priv.retval, "__retval"); + if (err) + goto cleanup; + spec->priv.execute = true; + spec->mode_mask |= PRIV; + } else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) { + val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1; + err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv"); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; + spec->unpriv.execute = true; + has_unpriv_retval = true; } else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) { val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1; - errno = 0; - spec->log_level = strtol(val, &e, 0); - if (errno || e[0] != '\0') { - PRINT_FAIL("failed to parse test log level from '%s'\n", s); - err = -EINVAL; + err = parse_int(val, &spec->log_level, "test log level"); + if (err) goto cleanup; - } } else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) { val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1; if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) { @@ -191,14 +251,10 @@ static int parse_test_spec(struct test_loader *tester, } else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) { spec->prog_flags |= BPF_F_XDP_HAS_FRAGS; } else /* assume numeric value */ { - errno = 0; - spec->prog_flags |= strtol(val, &e, 0); - if (errno || e[0] != '\0') { - PRINT_FAIL("failed to parse test prog flags from '%s'\n", - val); - err = -EINVAL; + err = parse_int(val, &tmp, "test prog flags"); + if (err) goto cleanup; - } + spec->prog_flags |= tmp; } } } @@ -239,6 +295,11 @@ static int parse_test_spec(struct test_loader *tester, if (!has_unpriv_result) spec->unpriv.expect_failure = spec->priv.expect_failure; + if (!has_unpriv_retval) { + spec->unpriv.retval = spec->priv.retval; + spec->unpriv.execute = spec->priv.execute; + } + if (!spec->unpriv.expect_msgs) { size_t sz = spec->priv.expect_msg_cnt * sizeof(void *); @@ -402,6 +463,51 @@ static bool is_unpriv_capable_map(struct bpf_map *map) } } +static int do_prog_test_run(int fd_prog, int *retval) +{ + __u8 tmp_out[TEST_DATA_LEN << 2] = {}; + __u8 tmp_in[TEST_DATA_LEN] = {}; + int err, saved_errno; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = tmp_in, + .data_size_in = sizeof(tmp_in), + .data_out = tmp_out, + .data_size_out = sizeof(tmp_out), + .repeat = 1, + ); + + err = bpf_prog_test_run_opts(fd_prog, &topts); + saved_errno = errno; + + if (err) { + PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ", + saved_errno, strerror(saved_errno)); + return err; + } + + ASSERT_OK(0, "bpf_prog_test_run"); + *retval = topts.retval; + + return 0; +} + +static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec) +{ + if (!subspec->execute) + return false; + + if (subspec->expect_failure) + return false; + + if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) { + if (env.verbosity != VERBOSE_NONE) + printf("alignment prevents execution\n"); + return false; + } + + return true; +} + /* this function is forced noinline and has short generic name to look better * in test_progs output (in case of a failure) */ @@ -418,6 +524,7 @@ void run_subtest(struct test_loader *tester, struct bpf_program *tprog; struct bpf_object *tobj; struct bpf_map *map; + int retval; int err; if (!test__start_subtest(subspec->name)) @@ -476,6 +583,20 @@ void run_subtest(struct test_loader *tester, emit_verifier_log(tester->log_buf, false /*force*/); validate_case(tester, subspec, tobj, tprog, err); + if (should_do_test_run(spec, subspec)) { + /* For some reason test_verifier executes programs + * with all capabilities restored. Do the same here. + */ + if (!restore_capabilities(&caps)) + goto tobj_cleanup; + + do_prog_test_run(bpf_program__fd(tprog), &retval); + if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { + PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); + goto tobj_cleanup; + } + } + tobj_cleanup: bpf_object__close(tobj); subtest_cleanup: -- cgit v1.2.3-70-g09d2 From 55108621a35e42f773de5d4b20cf7a14d6d53503 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:46 +0200 Subject: selftests/bpf: prog_tests entry point for migrated test_verifier tests prog_tests/verifier.c would be used as a host for verifier/*.c tests migrated to use inline assembly and run from test_progs. The run_test_aux() function mimics the test_verifier behavior dropping CAP_SYS_ADMIN upon entry. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-6-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 29 +++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/verifier.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c new file mode 100644 index 000000000000..aa63f5d84d97 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include + +#include "cap_helpers.h" + +__maybe_unused +static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) +{ + struct test_loader tester = {}; + __u64 old_caps; + int err; + + /* test_verifier tests are executed w/o CAP_SYS_ADMIN, do the same here */ + err = cap_disable_effective(1ULL << CAP_SYS_ADMIN, &old_caps); + if (err) { + PRINT_FAIL("failed to drop CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); + return; + } + + test_loader__run_subtests(&tester, skel_name, elf_bytes_factory); + test_loader_fini(&tester); + + err = cap_enable_effective(old_caps, NULL); + if (err) + PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); +} + +#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes) -- cgit v1.2.3-70-g09d2 From 9d0f1568ad5ba29feddc0897e2ccc7d6de6713c8 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:47 +0200 Subject: selftests/bpf: verifier/and.c converted to inline assembly Test verifier/and.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-7-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 3 + tools/testing/selftests/bpf/progs/verifier_and.c | 107 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/and.c | 68 -------------- 3 files changed, 110 insertions(+), 68 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_and.c delete mode 100644 tools/testing/selftests/bpf/verifier/and.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index aa63f5d84d97..34526f6d5ab1 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -3,6 +3,7 @@ #include #include "cap_helpers.h" +#include "verifier_and.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -27,3 +28,5 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac } #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes) + +void test_verifier_and(void) { RUN(verifier_and); } diff --git a/tools/testing/selftests/bpf/progs/verifier_and.c b/tools/testing/selftests/bpf/progs/verifier_and.c new file mode 100644 index 000000000000..e97e518516b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_and.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/and.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("invalid and of negative number") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_and_of_negative_number(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= -4; \ + r1 <<= 2; \ + r0 += r1; \ +l0_%=: r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid range check") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_range_check(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r9 = 1; \ + w1 %%= 2; \ + w1 += 1; \ + w9 &= w1; \ + w9 += 1; \ + w9 >>= 1; \ + w3 = 1; \ + w3 -= w9; \ + w3 *= 0x10000000; \ + r0 += r3; \ + *(u32*)(r0 + 0) = r3; \ +l0_%=: r0 = r0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("check known subreg with unknown reg") +__success __failure_unpriv __msg_unpriv("R1 !read_ok") +__retval(0) +__naked void known_subreg_with_unknown_reg(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 <<= 32; \ + r0 += 1; \ + r0 &= 0xFFFF1234; \ + /* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */\ + if w0 < 1 goto l0_%=; \ + r1 = *(u32*)(r1 + 512); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c deleted file mode 100644 index 7d7ebee5cc7a..000000000000 --- a/tools/testing/selftests/bpf/verifier/and.c +++ /dev/null @@ -1,68 +0,0 @@ -{ - "invalid and of negative number", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid range check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_9, 1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1), - BPF_MOV32_IMM(BPF_REG_3, 1), - BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "check known subreg with unknown reg", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFFFF1234), - /* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */ - BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 1, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 512), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 !read_ok", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 0 -}, -- cgit v1.2.3-70-g09d2 From a3c830ae02093315a4526fa74fb7d1f66989d895 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:48 +0200 Subject: selftests/bpf: verifier/array_access.c converted to inline assembly Test verifier/array_access.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-8-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_array_access.c | 529 +++++++++++++++++++++ .../testing/selftests/bpf/verifier/array_access.c | 379 --------------- 3 files changed, 531 insertions(+), 379 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_array_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/array_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 34526f6d5ab1..60eb0f38ed92 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -4,6 +4,7 @@ #include "cap_helpers.h" #include "verifier_and.skel.h" +#include "verifier_array_access.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -30,3 +31,4 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes) void test_verifier_and(void) { RUN(verifier_and); } +void test_verifier_array_access(void) { RUN(verifier_array_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c new file mode 100644 index 000000000000..95d7ecc12963 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/array_access.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); + __uint(map_flags, BPF_F_RDONLY_PROG); +} map_array_ro SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); + __uint(map_flags, BPF_F_WRONLY_PROG); +} map_array_wo SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("valid map access into an array with a constant") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void an_array_with_a_constant_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a register") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_register_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a variable") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_variable_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[max_entries] goto l0_%=; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid map access into an array with a signed variable") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void array_with_a_signed_variable(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if w1 s> 0xffffffff goto l1_%=; \ + w1 = 0; \ +l1_%=: w2 = %[max_entries]; \ + if r2 s> r1 goto l2_%=; \ + w1 = 0; \ +l2_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a constant") +__failure __msg("invalid access to map value, value_size=48 off=48 size=8") +__failure_unpriv +__naked void an_array_with_a_constant_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + %[__imm_0]) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, (MAX_ENTRIES + 1) << 2), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a register") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_register_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a variable") +__failure +__msg("R0 unbounded memory access, make sure to bounds check any such access") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void an_array_with_a_variable_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with no floor check") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void array_with_no_floor_check(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + w2 = %[max_entries]; \ + if r2 s> r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a invalid max check") +__failure __msg("invalid access to map value, value_size=48 off=44 size=8") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void with_a_invalid_max_check_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + w2 = %[__imm_0]; \ + if r2 > r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access into an array with a invalid max check") +__failure __msg("R0 pointer += pointer") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void with_a_invalid_max_check_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r8 = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += r8; \ + r0 = *(u32*)(r0 + %[test_val_foo]); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("socket") +__description("valid read map access into a read-only array 1") +__success __success_unpriv __retval(28) +__naked void a_read_only_array_1_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("tc") +__description("valid read map access into a read-only array 2") +__success __retval(65507) +__naked void a_read_only_array_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 &= 0xffff; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("socket") +__description("invalid write map access into a read-only array 1") +__failure __msg("write into map forbidden") +__failure_unpriv +__naked void a_read_only_array_1_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("tc") +__description("invalid write map access into a read-only array 2") +__failure __msg("write into map forbidden") +__naked void a_read_only_array_2_2(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_ro] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = r0; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_skb_load_bytes), + __imm_addr(map_array_ro) + : __clobber_all); +} + +SEC("socket") +__description("valid write map access into a write-only array 1") +__success __success_unpriv __retval(1) +__naked void a_write_only_array_1_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("tc") +__description("valid write map access into a write-only array 2") +__success __retval(0) +__naked void a_write_only_array_2_1(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = r0; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_skb_load_bytes), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("socket") +__description("invalid read map access into a write-only array 1") +__failure __msg("read from map forbidden") +__failure_unpriv +__naked void a_write_only_array_1_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +SEC("tc") +__description("invalid read map access into a write-only array 2") +__failure __msg("read from map forbidden") +__naked void a_write_only_array_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_wo] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_array_wo) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c deleted file mode 100644 index 1b138cd2b187..000000000000 --- a/tools/testing/selftests/bpf/verifier/array_access.c +++ /dev/null @@ -1,379 +0,0 @@ -{ - "valid map access into an array with a constant", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "valid map access into an array with a register", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "valid map access into an array with a variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "valid map access into an array with a signed variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid map access into an array with a constant", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=48 size=8", - .result = REJECT, -}, -{ - "invalid map access into an array with a register", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 min value is outside of the allowed memory range", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid map access into an array with a variable", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 unbounded memory access, make sure to bounds check any such access", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid map access into an array with no floor check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid map access into an array with a invalid max check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "invalid access to map value, value_size=48 off=44 size=8", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid map access into an array with a invalid max check", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3, 11 }, - .errstr = "R0 pointer += pointer", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "valid read map access into a read-only array 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_ro = { 3 }, - .result = ACCEPT, - .retval = 28, -}, -{ - "valid read map access into a read-only array 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_array_ro = { 3 }, - .result = ACCEPT, - .retval = 65507, -}, -{ - "invalid write map access into a read-only array 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_array_ro = { 3 }, - .result = REJECT, - .errstr = "write into map forbidden", -}, -{ - "invalid write map access into a read-only array 2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_array_ro = { 4 }, - .result = REJECT, - .errstr = "write into map forbidden", -}, -{ - "valid write map access into a write-only array 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_wo = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "valid write map access into a write-only array 2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_load_bytes), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_array_wo = { 4 }, - .result = ACCEPT, - .retval = 0, -}, -{ - "invalid read map access into a write-only array 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_wo = { 3 }, - .result = REJECT, - .errstr = "read from map forbidden", -}, -{ - "invalid read map access into a write-only array 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_array_wo = { 3 }, - .result = REJECT, - .errstr = "read from map forbidden", -}, -- cgit v1.2.3-70-g09d2 From 0ccbe4956d6c20fa0a09a72d2033c49f0976ed6c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:49 +0200 Subject: selftests/bpf: verifier/basic_stack.c converted to inline assembly Test verifier/basic_stack.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-9-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_basic_stack.c | 100 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/basic_stack.c | 64 ------------- 3 files changed, 102 insertions(+), 64 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_basic_stack.c delete mode 100644 tools/testing/selftests/bpf/verifier/basic_stack.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 60eb0f38ed92..95a3151db052 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -5,6 +5,7 @@ #include "cap_helpers.h" #include "verifier_and.skel.h" #include "verifier_array_access.skel.h" +#include "verifier_basic_stack.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -32,3 +33,4 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac void test_verifier_and(void) { RUN(verifier_and); } void test_verifier_array_access(void) { RUN(verifier_array_access); } +void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } diff --git a/tools/testing/selftests/bpf/progs/verifier_basic_stack.c b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c new file mode 100644 index 000000000000..359df865a8f3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_basic_stack.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/basic_stack.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("stack out of bounds") +__failure __msg("invalid write to stack") +__failure_unpriv +__naked void stack_out_of_bounds(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 + 8) = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("uninitialized stack1") +__failure __msg("invalid indirect read from stack") +__failure_unpriv +__naked void uninitialized_stack1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("uninitialized stack2") +__failure __msg("invalid read from stack") +__failure_unpriv +__naked void uninitialized_stack2(void) +{ + asm volatile (" \ + r2 = r10; \ + r0 = *(u64*)(r2 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("invalid fp arithmetic") +__failure __msg("R1 subtraction from stack pointer") +__failure_unpriv +__naked void invalid_fp_arithmetic(void) +{ + /* If this gets ever changed, make sure JITs can deal with it. */ + asm volatile (" \ + r0 = 0; \ + r1 = r10; \ + r1 -= 8; \ + *(u64*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("non-invalid fp arithmetic") +__success __success_unpriv __retval(0) +__naked void non_invalid_fp_arithmetic(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("misaligned read from stack") +__failure __msg("misaligned stack access") +__failure_unpriv +__naked void misaligned_read_from_stack(void) +{ + asm volatile (" \ + r2 = r10; \ + r0 = *(u64*)(r2 - 4); \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/basic_stack.c b/tools/testing/selftests/bpf/verifier/basic_stack.c deleted file mode 100644 index f995777dddb3..000000000000 --- a/tools/testing/selftests/bpf/verifier/basic_stack.c +++ /dev/null @@ -1,64 +0,0 @@ -{ - "stack out of bounds", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid write to stack", - .result = REJECT, -}, -{ - "uninitialized stack1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 2 }, - .errstr = "invalid indirect read from stack", - .result = REJECT, -}, -{ - "uninitialized stack2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8), - BPF_EXIT_INSN(), - }, - .errstr = "invalid read from stack", - .result = REJECT, -}, -{ - "invalid fp arithmetic", - /* If this gets ever changed, make sure JITs can deal with it. */ - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 subtraction from stack pointer", - .result = REJECT, -}, -{ - "non-invalid fp arithmetic", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "misaligned read from stack", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), - BPF_EXIT_INSN(), - }, - .errstr = "misaligned stack access", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From 7605f94b3492328f37815c9b5749ffba5c76da84 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:50 +0200 Subject: selftests/bpf: verifier/bounds_deduction.c converted to inline assembly Test verifier/bounds_deduction.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-10-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_bounds_deduction.c | 171 +++++++++++++++++++++ .../selftests/bpf/verifier/bounds_deduction.c | 136 ---------------- 3 files changed, 173 insertions(+), 136 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c delete mode 100644 tools/testing/selftests/bpf/verifier/bounds_deduction.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 95a3151db052..a8cfef92ed64 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -6,6 +6,7 @@ #include "verifier_and.skel.h" #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" +#include "verifier_bounds_deduction.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -34,3 +35,4 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac void test_verifier_and(void) { RUN(verifier_and); } void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } +void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c new file mode 100644 index 000000000000..c506afbdd936 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds_deduction.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("check deducing bounds from const, 1") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_1(void) +{ + asm volatile (" \ + r0 = 1; \ + if r0 s>= 1 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 2") +__success __failure_unpriv +__msg_unpriv("R1 has pointer with unsupported alu operation") +__retval(1) +__naked void deducing_bounds_from_const_2(void) +{ + asm volatile (" \ + r0 = 1; \ + if r0 s>= 1 goto l0_%=; \ + exit; \ +l0_%=: if r0 s<= 1 goto l1_%=; \ + exit; \ +l1_%=: r1 -= r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 3") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_3(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 4") +__success __failure_unpriv +__msg_unpriv("R6 has pointer with unsupported alu operation") +__retval(0) +__naked void deducing_bounds_from_const_4(void) +{ + asm volatile (" \ + r6 = r1; \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ + exit; \ +l0_%=: if r0 s>= 0 goto l1_%=; \ + exit; \ +l1_%=: r6 -= r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 5") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_5(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 1 goto l0_%=; \ + r0 -= r1; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 6") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_6(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 0 goto l0_%=; \ + exit; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 7") +__failure __msg("dereference of modified ctx ptr") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void deducing_bounds_from_const_7(void) +{ + asm volatile (" \ + r0 = %[__imm_0]; \ + if r0 s>= 0 goto l0_%=; \ +l0_%=: r1 -= r0; \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 8") +__failure __msg("negative offset ctx ptr R1 off=-1 disallowed") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void deducing_bounds_from_const_8(void) +{ + asm volatile (" \ + r0 = %[__imm_0]; \ + if r0 s>= 0 goto l0_%=; \ + r1 += r0; \ +l0_%=: r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 9") +__failure __msg("R0 tried to subtract pointer from scalar") +__msg_unpriv("R1 has pointer with unsupported alu operation") +__naked void deducing_bounds_from_const_9(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s>= 0 goto l0_%=; \ +l0_%=: r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from const, 10") +__failure +__msg("math between ctx pointer and register with unbounded min value is not allowed") +__failure_unpriv +__naked void deducing_bounds_from_const_10(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 s<= 0 goto l0_%=; \ +l0_%=: /* Marks reg as unknown. */ \ + r0 = -r0; \ + r0 -= r1; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c deleted file mode 100644 index 3931c481e30c..000000000000 --- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c +++ /dev/null @@ -1,136 +0,0 @@ -{ - "check deducing bounds from const, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "R0 tried to subtract pointer from scalar", - .result = REJECT, -}, -{ - "check deducing bounds from const, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 1, -}, -{ - "check deducing bounds from const, 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "R0 tried to subtract pointer from scalar", - .result = REJECT, -}, -{ - "check deducing bounds from const, 4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R6 has pointer with unsupported alu operation", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "check deducing bounds from const, 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "R0 tried to subtract pointer from scalar", - .result = REJECT, -}, -{ - "check deducing bounds from const, 6", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "R0 tried to subtract pointer from scalar", - .result = REJECT, -}, -{ - "check deducing bounds from const, 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, ~0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "dereference of modified ctx ptr", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "check deducing bounds from const, 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, ~0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "negative offset ctx ptr R1 off=-1 disallowed", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "check deducing bounds from const, 9", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .errstr = "R0 tried to subtract pointer from scalar", - .result = REJECT, -}, -{ - "check deducing bounds from const, 10", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0), - /* Marks reg as unknown. */ - BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "math between ctx pointer and register with unbounded min value is not allowed", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From b14a702afd0d2da746294ed6070668b839a77793 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:51 +0200 Subject: selftests/bpf: verifier/bounds_mix_sign_unsign.c converted to inline assembly Test verifier/bounds_mix_sign_unsign.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-11-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_bounds_mix_sign_unsign.c | 554 +++++++++++++++++++++ .../bpf/verifier/bounds_mix_sign_unsign.c | 411 --------------- 3 files changed, 556 insertions(+), 411 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c delete mode 100644 tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index a8cfef92ed64..bbc39412fcd1 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -7,6 +7,7 @@ #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" #include "verifier_bounds_deduction.skel.h" +#include "verifier_bounds_mix_sign_unsign.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -36,3 +37,4 @@ void test_verifier_and(void) { RUN(verifier_and); } void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } +void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c new file mode 100644 index 000000000000..91a66357896a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("bounds checks mixing signed and unsigned, positive bounds") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_positive_bounds(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 2; \ + if r2 >= r1 goto l0_%=; \ + if r1 s> 4 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void checks_mixing_signed_and_unsigned(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 2") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + r8 = 0; \ + r8 += r1; \ + if r8 s> 1 goto l0_%=; \ + r0 += r8; \ + r0 = 0; \ + *(u8*)(r8 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 3") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_3(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + r8 = r1; \ + if r8 s> 1 goto l0_%=; \ + r0 += r8; \ + r0 = 0; \ + *(u8*)(r8 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 4") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_4(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 1; \ + r1 &= r2; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 5") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_5(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += 4; \ + r0 -= r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 6") +__failure __msg("R4 min value is negative, either use unsigned") +__failure_unpriv +__naked void signed_and_unsigned_variant_6(void) +{ + asm volatile (" \ + r9 = r1; \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = r9; \ + r2 = 0; \ + r3 = r10; \ + r3 += -512; \ + r4 = *(u64*)(r10 - 16); \ + r6 = -1; \ + if r4 > r6 goto l0_%=; \ + if r4 s> 1 goto l0_%=; \ + r4 += 1; \ + r5 = 0; \ + r6 = 0; \ + *(u16*)(r10 - 512) = r6; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 7") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_7(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = %[__imm_0]; \ + if r1 > r2 goto l0_%=; \ + if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 1024 * 1024 * 1024) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 8") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_8(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 9") +__success __success_unpriv __retval(0) +__naked void signed_and_unsigned_variant_9(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -9223372036854775808ULL ll; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 10") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_10(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 0; \ + if r2 > r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 11") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_11(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + if r2 >= r1 goto l1_%=; \ + /* Dead branch. */ \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 12") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_12(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -6; \ + if r2 >= r1 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: if r1 s> 1 goto l0_%=; \ + r0 += r1; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 13") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_13(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = 2; \ + if r2 >= r1 goto l0_%=; \ + r7 = 1; \ + if r7 s> 0 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r7 += r1; \ + if r7 s> 4 goto l2_%=; \ + r0 += r7; \ + r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 14") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_14(void) +{ + asm volatile (" \ + r9 = *(u32*)(r1 + %[__sk_buff_mark]); \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -1; \ + r8 = 2; \ + if r9 == 42 goto l1_%=; \ + if r8 s> r1 goto l2_%=; \ +l3_%=: if r1 s> 1 goto l2_%=; \ + r0 += r1; \ +l0_%=: r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ +l2_%=: r0 = 0; \ + exit; \ +l1_%=: if r1 > r2 goto l2_%=; \ + goto l3_%=; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks mixing signed and unsigned, variant 15") +__failure __msg("unbounded min value") +__failure_unpriv +__naked void signed_and_unsigned_variant_15(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64*)(r10 - 16) = r0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r10 - 16); \ + r2 = -6; \ + if r2 >= r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +l1_%=: r0 += r1; \ + if r0 > 1 goto l2_%=; \ + r0 = 0; \ + exit; \ +l2_%=: r1 = 0; \ + *(u8*)(r0 + 0) = r1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c deleted file mode 100644 index bf82b923c5fe..000000000000 --- a/tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c +++ /dev/null @@ -1,411 +0,0 @@ -{ - "bounds checks mixing signed and unsigned, positive bounds", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 2), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 2", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), - BPF_MOV64_IMM(BPF_REG_8, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 3", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 4", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result = ACCEPT, -}, -{ - "bounds checks mixing signed and unsigned, variant 5", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 6", - .insns = { - BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_6, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R4 min value is negative, either use unsigned", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 7", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result = ACCEPT, -}, -{ - "bounds checks mixing signed and unsigned, variant 8", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 9", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result = ACCEPT, -}, -{ - "bounds checks mixing signed and unsigned, variant 10", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 11", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - /* Dead branch. */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 12", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -6), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 13", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 2), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 14", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_8, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6), - BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3), - BPF_JMP_IMM(BPF_JA, 0, 0, -7), - }, - .fixup_map_hash_8b = { 6 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -{ - "bounds checks mixing signed and unsigned, variant 15", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, -6), - BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "unbounded min value", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From 2f2047c22cda4fbbe6bb889cc6c5450cd90688f8 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:52 +0200 Subject: selftests/bpf: verifier/cfg.c converted to inline assembly Test verifier/cfg.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-12-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_cfg.c | 100 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/cfg.c | 73 ---------------- 3 files changed, 102 insertions(+), 73 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_cfg.c delete mode 100644 tools/testing/selftests/bpf/verifier/cfg.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index bbc39412fcd1..46182abecabb 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -8,6 +8,7 @@ #include "verifier_basic_stack.skel.h" #include "verifier_bounds_deduction.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" +#include "verifier_cfg.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -38,3 +39,4 @@ void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } +void test_verifier_cfg(void) { RUN(verifier_cfg); } diff --git a/tools/testing/selftests/bpf/progs/verifier_cfg.c b/tools/testing/selftests/bpf/progs/verifier_cfg.c new file mode 100644 index 000000000000..df7697b94007 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cfg.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cfg.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("unreachable") +__failure __msg("unreachable") +__failure_unpriv +__naked void unreachable(void) +{ + asm volatile (" \ + exit; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unreachable2") +__failure __msg("unreachable") +__failure_unpriv +__naked void unreachable2(void) +{ + asm volatile (" \ + goto l0_%=; \ + goto l0_%=; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("out of range jump") +__failure __msg("jump out of range") +__failure_unpriv +__naked void out_of_range_jump(void) +{ + asm volatile (" \ + goto l0_%=; \ + exit; \ +l0_%=: \ +" ::: __clobber_all); +} + +SEC("socket") +__description("out of range jump2") +__failure __msg("jump out of range") +__failure_unpriv +__naked void out_of_range_jump2(void) +{ + asm volatile (" \ + goto -2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("loop (back-edge)") +__failure __msg("unreachable insn 1") +__msg_unpriv("back-edge") +__naked void loop_back_edge(void) +{ + asm volatile (" \ +l0_%=: goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("loop2 (back-edge)") +__failure __msg("unreachable insn 4") +__msg_unpriv("back-edge") +__naked void loop2_back_edge(void) +{ + asm volatile (" \ +l0_%=: r1 = r0; \ + r2 = r0; \ + r3 = r0; \ + goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("conditional loop") +__failure __msg("infinite loop detected") +__msg_unpriv("back-edge") +__naked void conditional_loop(void) +{ + asm volatile (" \ + r0 = r1; \ +l0_%=: r2 = r0; \ + r3 = r0; \ + if r1 == 0 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c deleted file mode 100644 index 4eb76ed739ce..000000000000 --- a/tools/testing/selftests/bpf/verifier/cfg.c +++ /dev/null @@ -1,73 +0,0 @@ -{ - "unreachable", - .insns = { - BPF_EXIT_INSN(), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable", - .result = REJECT, -}, -{ - "unreachable2", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable", - .result = REJECT, -}, -{ - "out of range jump", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_EXIT_INSN(), - }, - .errstr = "jump out of range", - .result = REJECT, -}, -{ - "out of range jump2", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, -2), - BPF_EXIT_INSN(), - }, - .errstr = "jump out of range", - .result = REJECT, -}, -{ - "loop (back-edge)", - .insns = { - BPF_JMP_IMM(BPF_JA, 0, 0, -1), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable insn 1", - .errstr_unpriv = "back-edge", - .result = REJECT, -}, -{ - "loop2 (back-edge)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - BPF_EXIT_INSN(), - }, - .errstr = "unreachable insn 4", - .errstr_unpriv = "back-edge", - .result = REJECT, -}, -{ - "conditional loop", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), - BPF_EXIT_INSN(), - }, - .errstr = "infinite loop detected", - .errstr_unpriv = "back-edge", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From 047687a7f494d45198f112b51e72228aa054732c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:53 +0200 Subject: selftests/bpf: verifier/cgroup_inv_retcode.c converted to inline assembly Test verifier/cgroup_inv_retcode.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-13-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_cgroup_inv_retcode.c | 89 ++++++++++++++++++++++ .../selftests/bpf/verifier/cgroup_inv_retcode.c | 72 ----------------- 3 files changed, 91 insertions(+), 72 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c delete mode 100644 tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 46182abecabb..b138c9894abb 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -9,6 +9,7 @@ #include "verifier_bounds_deduction.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_cfg.skel.h" +#include "verifier_cgroup_inv_retcode.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -40,3 +41,4 @@ void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_cfg(void) { RUN(verifier_cfg); } +void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c new file mode 100644 index 000000000000..d6c4a7f3f790 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test1") +__failure __msg("R0 has value (0x0; 0xffffffff)") +__naked void with_invalid_return_code_test1(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test2") +__success +__naked void with_invalid_return_code_test2(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r0 &= 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test3") +__failure __msg("R0 has value (0x0; 0x3)") +__naked void with_invalid_return_code_test3(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r0 &= 3; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test4") +__success +__naked void with_invalid_return_code_test4(void) +{ + asm volatile (" \ + r0 = 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test5") +__failure __msg("R0 has value (0x2; 0x0)") +__naked void with_invalid_return_code_test5(void) +{ + asm volatile (" \ + r0 = 2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test6") +__failure __msg("R0 is not a known value (ctx)") +__naked void with_invalid_return_code_test6(void) +{ + asm volatile (" \ + r0 = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/sock") +__description("bpf_exit with invalid return code. test7") +__failure __msg("R0 has unknown scalar value") +__naked void with_invalid_return_code_test7(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + 0); \ + r2 = *(u32*)(r1 + 4); \ + r0 *= r2; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c b/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c deleted file mode 100644 index 6d65fe3e7321..000000000000 --- a/tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c +++ /dev/null @@ -1,72 +0,0 @@ -{ - "bpf_exit with invalid return code. test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x0; 0xffffffff)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x0; 0x3)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test5", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has value (0x2; 0x0)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test6", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R0 is not a known value (ctx)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -{ - "bpf_exit with invalid return code. test7", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4), - BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 has unknown scalar value", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, -}, -- cgit v1.2.3-70-g09d2 From b1b6372535c0cc0cce4870b07a0938309f3a5d37 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:54 +0200 Subject: selftests/bpf: verifier/cgroup_skb.c converted to inline assembly Test verifier/cgroup_skb.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-14-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_cgroup_skb.c | 227 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/cgroup_skb.c | 197 ------------------ 3 files changed, 229 insertions(+), 197 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c delete mode 100644 tools/testing/selftests/bpf/verifier/cgroup_skb.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index b138c9894abb..53e41af90821 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -10,6 +10,7 @@ #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" +#include "verifier_cgroup_skb.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -42,3 +43,4 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } +void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c new file mode 100644 index 000000000000..5ee3d349d6d0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_skb.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("cgroup/skb") +__description("direct packet read test#1 for CGROUP_SKB") +__success __failure_unpriv +__msg_unpriv("invalid bpf_context access off=76 size=4") +__retval(0) +__naked void test_1_for_cgroup_skb(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = *(u32*)(r1 + %[__sk_buff_len]); \ + r5 = *(u32*)(r1 + %[__sk_buff_pkt_type]); \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + *(u32*)(r1 + %[__sk_buff_mark]) = r6; \ + r7 = *(u32*)(r1 + %[__sk_buff_queue_mapping]); \ + r8 = *(u32*)(r1 + %[__sk_buff_protocol]); \ + r9 = *(u32*)(r1 + %[__sk_buff_vlan_present]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)), + __imm_const(__sk_buff_protocol, offsetof(struct __sk_buff, protocol)), + __imm_const(__sk_buff_queue_mapping, offsetof(struct __sk_buff, queue_mapping)), + __imm_const(__sk_buff_vlan_present, offsetof(struct __sk_buff, vlan_present)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#2 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_2_for_cgroup_skb(void) +{ + asm volatile (" \ + r4 = *(u32*)(r1 + %[__sk_buff_vlan_tci]); \ + r5 = *(u32*)(r1 + %[__sk_buff_vlan_proto]); \ + r6 = *(u32*)(r1 + %[__sk_buff_priority]); \ + *(u32*)(r1 + %[__sk_buff_priority]) = r6; \ + r7 = *(u32*)(r1 + %[__sk_buff_ingress_ifindex]);\ + r8 = *(u32*)(r1 + %[__sk_buff_tc_index]); \ + r9 = *(u32*)(r1 + %[__sk_buff_hash]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_hash, offsetof(struct __sk_buff, hash)), + __imm_const(__sk_buff_ingress_ifindex, offsetof(struct __sk_buff, ingress_ifindex)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)), + __imm_const(__sk_buff_tc_index, offsetof(struct __sk_buff, tc_index)), + __imm_const(__sk_buff_vlan_proto, offsetof(struct __sk_buff, vlan_proto)), + __imm_const(__sk_buff_vlan_tci, offsetof(struct __sk_buff, vlan_tci)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#3 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_3_for_cgroup_skb(void) +{ + asm volatile (" \ + r4 = *(u32*)(r1 + %[__sk_buff_cb_0]); \ + r5 = *(u32*)(r1 + %[__sk_buff_cb_1]); \ + r6 = *(u32*)(r1 + %[__sk_buff_cb_2]); \ + r7 = *(u32*)(r1 + %[__sk_buff_cb_3]); \ + r8 = *(u32*)(r1 + %[__sk_buff_cb_4]); \ + r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \ + *(u32*)(r1 + %[__sk_buff_cb_0]) = r4; \ + *(u32*)(r1 + %[__sk_buff_cb_1]) = r5; \ + *(u32*)(r1 + %[__sk_buff_cb_2]) = r6; \ + *(u32*)(r1 + %[__sk_buff_cb_3]) = r7; \ + *(u32*)(r1 + %[__sk_buff_cb_4]) = r8; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])), + __imm_const(__sk_buff_cb_1, offsetof(struct __sk_buff, cb[1])), + __imm_const(__sk_buff_cb_2, offsetof(struct __sk_buff, cb[2])), + __imm_const(__sk_buff_cb_3, offsetof(struct __sk_buff, cb[3])), + __imm_const(__sk_buff_cb_4, offsetof(struct __sk_buff, cb[4])), + __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("direct packet read test#4 for CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void test_4_for_cgroup_skb(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_family]); \ + r3 = *(u32*)(r1 + %[__sk_buff_remote_ip4]); \ + r4 = *(u32*)(r1 + %[__sk_buff_local_ip4]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_0]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_1]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_2]); \ + r5 = *(u32*)(r1 + %[__sk_buff_remote_ip6_3]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_0]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_1]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_2]); \ + r6 = *(u32*)(r1 + %[__sk_buff_local_ip6_3]); \ + r7 = *(u32*)(r1 + %[__sk_buff_remote_port]); \ + r8 = *(u32*)(r1 + %[__sk_buff_local_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_family, offsetof(struct __sk_buff, family)), + __imm_const(__sk_buff_local_ip4, offsetof(struct __sk_buff, local_ip4)), + __imm_const(__sk_buff_local_ip6_0, offsetof(struct __sk_buff, local_ip6[0])), + __imm_const(__sk_buff_local_ip6_1, offsetof(struct __sk_buff, local_ip6[1])), + __imm_const(__sk_buff_local_ip6_2, offsetof(struct __sk_buff, local_ip6[2])), + __imm_const(__sk_buff_local_ip6_3, offsetof(struct __sk_buff, local_ip6[3])), + __imm_const(__sk_buff_local_port, offsetof(struct __sk_buff, local_port)), + __imm_const(__sk_buff_remote_ip4, offsetof(struct __sk_buff, remote_ip4)), + __imm_const(__sk_buff_remote_ip6_0, offsetof(struct __sk_buff, remote_ip6[0])), + __imm_const(__sk_buff_remote_ip6_1, offsetof(struct __sk_buff, remote_ip6[1])), + __imm_const(__sk_buff_remote_ip6_2, offsetof(struct __sk_buff, remote_ip6[2])), + __imm_const(__sk_buff_remote_ip6_3, offsetof(struct __sk_buff, remote_ip6[3])), + __imm_const(__sk_buff_remote_port, offsetof(struct __sk_buff, remote_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of tc_classid for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of data_meta for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void data_meta_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_data_meta]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data_meta, offsetof(struct __sk_buff, data_meta)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid access of flow_keys for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void flow_keys_for_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_flow_keys]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_flow_keys, offsetof(struct __sk_buff, flow_keys)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid write access to napi_id for CGROUP_SKB") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void napi_id_for_cgroup_skb(void) +{ + asm volatile (" \ + r9 = *(u32*)(r1 + %[__sk_buff_napi_id]); \ + *(u32*)(r1 + %[__sk_buff_napi_id]) = r9; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_napi_id, offsetof(struct __sk_buff, napi_id)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("write tstamp from CGROUP_SKB") +__success __failure_unpriv +__msg_unpriv("invalid bpf_context access off=152 size=8") +__retval(0) +__naked void write_tstamp_from_cgroup_skb(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_tstamp]) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("read tstamp from CGROUP_SKB") +__success __success_unpriv __retval(0) +__naked void read_tstamp_from_cgroup_skb(void) +{ + asm volatile (" \ + r0 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/cgroup_skb.c b/tools/testing/selftests/bpf/verifier/cgroup_skb.c deleted file mode 100644 index 52e4c03b076b..000000000000 --- a/tools/testing/selftests/bpf/verifier/cgroup_skb.c +++ /dev/null @@ -1,197 +0,0 @@ -{ - "direct packet read test#1 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, pkt_type)), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, queue_mapping)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, protocol)), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, vlan_present)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid bpf_context access off=76 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "direct packet read test#2 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, vlan_tci)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, vlan_proto)), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, priority)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, priority)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, ingress_ifindex)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, tc_index)), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, hash)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "direct packet read test#3 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, cb[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, cb[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, cb[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, cb[4])), - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, napi_id)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4, - offsetof(struct __sk_buff, cb[0])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5, - offsetof(struct __sk_buff, cb[1])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, - offsetof(struct __sk_buff, cb[2])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, - offsetof(struct __sk_buff, cb[3])), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8, - offsetof(struct __sk_buff, cb[4])), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "direct packet read test#4 for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, family)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip4)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, local_ip4)), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1, - offsetof(struct __sk_buff, remote_ip6[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, local_ip6[3])), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, remote_port)), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, local_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid access of tc_classid for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid access of data_meta for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data_meta)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid access of flow_keys for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, flow_keys)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid write access to napi_id for CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, - offsetof(struct __sk_buff, napi_id)), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9, - offsetof(struct __sk_buff, napi_id)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "write tstamp from CGROUP_SKB", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, tstamp)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid bpf_context access off=152 size=8", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "read tstamp from CGROUP_SKB", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tstamp)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -- cgit v1.2.3-70-g09d2 From 8f16f3c07e460f81ff6f4d673c7edd413db19ffe Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:55 +0200 Subject: selftests/bpf: verifier/cgroup_storage.c converted to inline assembly Test verifier/cgroup_storage.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-15-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_cgroup_storage.c | 308 +++++++++++++++++++++ .../selftests/bpf/verifier/cgroup_storage.c | 220 --------------- 3 files changed, 310 insertions(+), 220 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c delete mode 100644 tools/testing/selftests/bpf/verifier/cgroup_storage.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 53e41af90821..3b47620a1f42 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -11,6 +11,7 @@ #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" +#include "verifier_cgroup_storage.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -44,3 +45,4 @@ void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_u void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } +void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } diff --git a/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c new file mode 100644 index 000000000000..9a13f5c11ac7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/cgroup_storage.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __uint(max_entries, 0); + __type(key, struct bpf_cgroup_storage_key); + __type(value, char[TEST_DATA_LEN]); +} cgroup_storage SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + __uint(max_entries, 0); + __type(key, struct bpf_cgroup_storage_key); + __type(value, char[64]); +} percpu_cgroup_storage SEC(".maps"); + +SEC("cgroup/skb") +__description("valid cgroup storage access") +__success __success_unpriv __retval(0) +__naked void valid_cgroup_storage_access(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 1") +__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage") +__failure_unpriv +__naked void invalid_cgroup_storage_access_1(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 2") +__failure __msg("fd 1 is not pointing to valid bpf_map") +__failure_unpriv +__naked void invalid_cgroup_storage_access_2(void) +{ + asm volatile (" \ + r2 = 0; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_get_local_storage]; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 3") +__failure __msg("invalid access to map value, value_size=64 off=256 size=4") +__failure_unpriv +__naked void invalid_cgroup_storage_access_3(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 256); \ + r1 += 1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 4") +__failure __msg("invalid access to map value, value_size=64 off=-2 size=4") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void invalid_cgroup_storage_access_4(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 - 2); \ + r0 = r1; \ + r1 += 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 5") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__failure_unpriv +__naked void invalid_cgroup_storage_access_5(void) +{ + asm volatile (" \ + r2 = 7; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid cgroup storage access 6") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__msg_unpriv("R2 leaks addr into helper function") +__naked void invalid_cgroup_storage_access_6(void) +{ + asm volatile (" \ + r2 = r1; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("valid per-cpu cgroup storage access") +__success __success_unpriv __retval(0) +__naked void per_cpu_cgroup_storage_access(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 1") +__failure __msg("cannot pass map_type 1 into func bpf_get_local_storage") +__failure_unpriv +__naked void cpu_cgroup_storage_access_1(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 2") +__failure __msg("fd 1 is not pointing to valid bpf_map") +__failure_unpriv +__naked void cpu_cgroup_storage_access_2(void) +{ + asm volatile (" \ + r2 = 0; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_get_local_storage]; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 1)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 3") +__failure __msg("invalid access to map value, value_size=64 off=256 size=4") +__failure_unpriv +__naked void cpu_cgroup_storage_access_3(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 256); \ + r1 += 1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 4") +__failure __msg("invalid access to map value, value_size=64 off=-2 size=4") +__failure_unpriv +__flag(BPF_F_ANY_ALIGNMENT) +__naked void cpu_cgroup_storage_access_4(void) +{ + asm volatile (" \ + r2 = 0; \ + r1 = %[cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 - 2); \ + r0 = r1; \ + r1 += 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 5") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__failure_unpriv +__naked void cpu_cgroup_storage_access_5(void) +{ + asm volatile (" \ + r2 = 7; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("invalid per-cpu cgroup storage access 6") +__failure __msg("get_local_storage() doesn't support non-zero flags") +__msg_unpriv("R2 leaks addr into helper function") +__naked void cpu_cgroup_storage_access_6(void) +{ + asm volatile (" \ + r2 = r1; \ + r1 = %[percpu_cgroup_storage] ll; \ + call %[bpf_get_local_storage]; \ + r1 = *(u32*)(r0 + 0); \ + r0 = r1; \ + r0 &= 1; \ + exit; \ +" : + : __imm(bpf_get_local_storage), + __imm_addr(percpu_cgroup_storage) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/cgroup_storage.c b/tools/testing/selftests/bpf/verifier/cgroup_storage.c deleted file mode 100644 index 97057c0a1b8a..000000000000 --- a/tools/testing/selftests/bpf/verifier/cgroup_storage.c +++ /dev/null @@ -1,220 +0,0 @@ -{ - "valid cgroup storage access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid cgroup storage access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid cgroup storage access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "fd 1 is not pointing to valid bpf_map", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid cgroup storage access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=256 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid cgroup storage access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=-2 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid cgroup storage access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 7), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid cgroup storage access 6", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .errstr_unpriv = "R2 leaks addr into helper function", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "valid per-cpu cgroup storage access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid per-cpu cgroup storage access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "cannot pass map_type 1 into func bpf_get_local_storage", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid per-cpu cgroup storage access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "fd 1 is not pointing to valid bpf_map", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid per-cpu cgroup storage access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=256 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid per-cpu cgroup storage access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .fixup_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=64 off=-2 size=4", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid per-cpu cgroup storage access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 7), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "invalid per-cpu cgroup storage access 6", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_percpu_cgroup_storage = { 1 }, - .result = REJECT, - .errstr = "get_local_storage() doesn't support non-zero flags", - .errstr_unpriv = "R2 leaks addr into helper function", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -- cgit v1.2.3-70-g09d2 From a2777eaad5d9b7b06917d5bd3e786a1733e9dc3c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:56 +0200 Subject: selftests/bpf: verifier/const_or.c converted to inline assembly Test verifier/const_or.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-16-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_const_or.c | 82 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/const_or.c | 60 ---------------- 3 files changed, 84 insertions(+), 60 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_const_or.c delete mode 100644 tools/testing/selftests/bpf/verifier/const_or.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 3b47620a1f42..36fdede7dcab 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -12,6 +12,7 @@ #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" #include "verifier_cgroup_storage.skel.h" +#include "verifier_const_or.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -46,3 +47,4 @@ void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } +void test_verifier_const_or(void) { RUN(verifier_const_or); } diff --git a/tools/testing/selftests/bpf/progs/verifier_const_or.c b/tools/testing/selftests/bpf/progs/verifier_const_or.c new file mode 100644 index 000000000000..ba8922b2eebd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_const_or.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/const_or.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("tracepoint") +__description("constant register |= constant should keep constant type") +__success +__naked void constant_should_keep_constant_type(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r2 |= 13; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant should not bypass stack boundary checks") +__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__naked void not_bypass_stack_boundary_checks_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r2 |= 24; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant register should keep constant type") +__success +__naked void register_should_keep_constant_type(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r4 = 13; \ + r2 |= r4; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("constant register |= constant register should not bypass stack boundary checks") +__failure __msg("invalid indirect access to stack R1 off=-48 size=58") +__naked void not_bypass_stack_boundary_checks_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -48; \ + r2 = 34; \ + r4 = 24; \ + r2 |= r4; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/const_or.c b/tools/testing/selftests/bpf/verifier/const_or.c deleted file mode 100644 index 0719b0ddec04..000000000000 --- a/tools/testing/selftests/bpf/verifier/const_or.c +++ /dev/null @@ -1,60 +0,0 @@ -{ - "constant register |= constant should keep constant type", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "constant register |= constant should not bypass stack boundary checks", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect access to stack R1 off=-48 size=58", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "constant register |= constant register should keep constant type", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_4, 13), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "constant register |= constant register should not bypass stack boundary checks", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_4, 24), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect access to stack R1 off=-48 size=58", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -- cgit v1.2.3-70-g09d2 From a58475a98903c756a7f731cffdf20242ed17b9b0 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:57 +0200 Subject: selftests/bpf: verifier/ctx_sk_msg.c converted to inline assembly Test verifier/ctx_sk_msg.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-17-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_ctx_sk_msg.c | 228 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/ctx_sk_msg.c | 181 ---------------- 3 files changed, 230 insertions(+), 181 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c delete mode 100644 tools/testing/selftests/bpf/verifier/ctx_sk_msg.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 36fdede7dcab..29351c774ee2 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -13,6 +13,7 @@ #include "verifier_cgroup_skb.skel.h" #include "verifier_cgroup_storage.skel.h" #include "verifier_const_or.skel.h" +#include "verifier_ctx_sk_msg.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -48,3 +49,4 @@ void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode) void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } +void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c new file mode 100644 index 000000000000..65edc89799f9 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ctx_sk_msg.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("sk_msg") +__description("valid access family in SK_MSG") +__success +__naked void access_family_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_family]); \ + exit; \ +" : + : __imm_const(sk_msg_md_family, offsetof(struct sk_msg_md, family)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access remote_ip4 in SK_MSG") +__success +__naked void remote_ip4_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip4]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_ip4, offsetof(struct sk_msg_md, remote_ip4)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access local_ip4 in SK_MSG") +__success +__naked void local_ip4_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip4]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_ip4, offsetof(struct sk_msg_md, local_ip4)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access remote_port in SK_MSG") +__success +__naked void remote_port_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_port]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_port, offsetof(struct sk_msg_md, remote_port)) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access local_port in SK_MSG") +__success +__naked void local_port_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_port]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_port, offsetof(struct sk_msg_md, local_port)) + : __clobber_all); +} + +SEC("sk_skb") +__description("valid access remote_ip6 in SK_MSG") +__success +__naked void remote_ip6_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_0]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_1]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_2]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_remote_ip6_3]); \ + exit; \ +" : + : __imm_const(sk_msg_md_remote_ip6_0, offsetof(struct sk_msg_md, remote_ip6[0])), + __imm_const(sk_msg_md_remote_ip6_1, offsetof(struct sk_msg_md, remote_ip6[1])), + __imm_const(sk_msg_md_remote_ip6_2, offsetof(struct sk_msg_md, remote_ip6[2])), + __imm_const(sk_msg_md_remote_ip6_3, offsetof(struct sk_msg_md, remote_ip6[3])) + : __clobber_all); +} + +SEC("sk_skb") +__description("valid access local_ip6 in SK_MSG") +__success +__naked void local_ip6_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_0]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_1]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_2]); \ + r0 = *(u32*)(r1 + %[sk_msg_md_local_ip6_3]); \ + exit; \ +" : + : __imm_const(sk_msg_md_local_ip6_0, offsetof(struct sk_msg_md, local_ip6[0])), + __imm_const(sk_msg_md_local_ip6_1, offsetof(struct sk_msg_md, local_ip6[1])), + __imm_const(sk_msg_md_local_ip6_2, offsetof(struct sk_msg_md, local_ip6[2])), + __imm_const(sk_msg_md_local_ip6_3, offsetof(struct sk_msg_md, local_ip6[3])) + : __clobber_all); +} + +SEC("sk_msg") +__description("valid access size in SK_MSG") +__success +__naked void access_size_in_sk_msg(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[sk_msg_md_size]); \ + exit; \ +" : + : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size)) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid 64B read of size in SK_MSG") +__failure __msg("invalid bpf_context access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void of_size_in_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_size]); \ + exit; \ +" : + : __imm_const(sk_msg_md_size, offsetof(struct sk_msg_md, size)) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid read past end of SK_MSG") +__failure __msg("invalid bpf_context access") +__naked void past_end_of_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__imm_0]); \ + exit; \ +" : + : __imm_const(__imm_0, offsetof(struct sk_msg_md, size) + 4) + : __clobber_all); +} + +SEC("sk_msg") +__description("invalid read offset in SK_MSG") +__failure __msg("invalid bpf_context access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void read_offset_in_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__imm_0]); \ + exit; \ +" : + : __imm_const(__imm_0, offsetof(struct sk_msg_md, family) + 1) + : __clobber_all); +} + +SEC("sk_msg") +__description("direct packet read for SK_MSG") +__success +__naked void packet_read_for_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +SEC("sk_msg") +__description("direct packet write for SK_MSG") +__success +__naked void packet_write_for_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +SEC("sk_msg") +__description("overlapping checks for direct packet access SK_MSG") +__success +__naked void direct_packet_access_sk_msg(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[sk_msg_md_data]); \ + r3 = *(u64*)(r1 + %[sk_msg_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r2; \ + r1 += 6; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u16*)(r2 + 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(sk_msg_md_data, offsetof(struct sk_msg_md, data)), + __imm_const(sk_msg_md_data_end, offsetof(struct sk_msg_md, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c deleted file mode 100644 index c6c69220a569..000000000000 --- a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c +++ /dev/null @@ -1,181 +0,0 @@ -{ - "valid access family in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, family)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "valid access remote_ip4 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "valid access local_ip4 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip4)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "valid access remote_port in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "valid access local_port in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_port)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "valid access remote_ip6 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, remote_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, -}, -{ - "valid access local_ip6 in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[0])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[1])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[2])), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, local_ip6[3])), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_SKB, -}, -{ - "valid access size in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct sk_msg_md, size)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "invalid 64B read of size in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, size)), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "invalid read past end of SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, size) + 4), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "invalid read offset in SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, family) + 1), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SK_MSG, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet read for SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "direct packet write for SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -{ - "overlapping checks for direct packet access SK_MSG", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, - offsetof(struct sk_msg_md, data)), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, - offsetof(struct sk_msg_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SK_MSG, -}, -- cgit v1.2.3-70-g09d2 From 84988478fb2c9068c6adf107eb630c48c00ff690 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:58 +0200 Subject: selftests/bpf: verifier/direct_stack_access_wraparound.c converted to inline assembly Test verifier/direct_stack_access_wraparound.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-18-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../verifier_direct_stack_access_wraparound.c | 56 ++++++++++++++++++++++ .../bpf/verifier/direct_stack_access_wraparound.c | 40 ---------------- 3 files changed, 58 insertions(+), 40 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c delete mode 100644 tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 29351c774ee2..8c33b8792a0a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -14,6 +14,7 @@ #include "verifier_cgroup_storage.skel.h" #include "verifier_const_or.skel.h" #include "verifier_ctx_sk_msg.skel.h" +#include "verifier_direct_stack_access_wraparound.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -50,3 +51,4 @@ void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } +void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c new file mode 100644 index 000000000000..c538c6893552 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test1") +__failure __msg("fp pointer and 2147483647") +__failure_unpriv +__naked void with_32_bit_wraparound_test1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x7fffffff; \ + r1 += 0x7fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test2") +__failure __msg("fp pointer and 1073741823") +__failure_unpriv +__naked void with_32_bit_wraparound_test2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x3fffffff; \ + r1 += 0x3fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("direct stack access with 32-bit wraparound. test3") +__failure __msg("fp pointer offset 1073741822") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void with_32_bit_wraparound_test3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0x1fffffff; \ + r1 += 0x1fffffff; \ + w0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c b/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c deleted file mode 100644 index 698e3779fdd2..000000000000 --- a/tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c +++ /dev/null @@ -1,40 +0,0 @@ -{ - "direct stack access with 32-bit wraparound. test1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "fp pointer and 2147483647", - .result = REJECT -}, -{ - "direct stack access with 32-bit wraparound. test2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "fp pointer and 1073741823", - .result = REJECT -}, -{ - "direct stack access with 32-bit wraparound. test3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "fp pointer offset 1073741822", - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result = REJECT -}, -- cgit v1.2.3-70-g09d2 From 01a0925531a4ec962c88ceccb464c1c1178e9d81 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:54:59 +0200 Subject: selftests/bpf: verifier/div0.c converted to inline assembly Test verifier/div0.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-19-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_div0.c | 213 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/div0.c | 184 ------------------- 3 files changed, 215 insertions(+), 184 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_div0.c delete mode 100644 tools/testing/selftests/bpf/verifier/div0.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 8c33b8792a0a..b172c41cdc61 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -15,6 +15,7 @@ #include "verifier_const_or.skel.h" #include "verifier_ctx_sk_msg.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" +#include "verifier_div0.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -52,3 +53,4 @@ void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } +void test_verifier_div0(void) { RUN(verifier_div0); } diff --git a/tools/testing/selftests/bpf/progs/verifier_div0.c b/tools/testing/selftests/bpf/progs/verifier_div0.c new file mode 100644 index 000000000000..cca5ea18fc28 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_div0.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/div0.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("DIV32 by 0, zero check 1") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_1_1(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + w2 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("DIV32 by 0, zero check 2") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_2_1(void) +{ + asm volatile (" \ + w0 = 42; \ + r1 = 0xffffffff00000000LL ll; \ + w2 = 1; \ + w2 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("DIV64 by 0, zero check") +__success __success_unpriv __retval(42) +__naked void div64_by_0_zero_check(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + r2 /= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD32 by 0, zero check 1") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_1_2(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + w2 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD32 by 0, zero check 2") +__success __success_unpriv __retval(42) +__naked void by_0_zero_check_2_2(void) +{ + asm volatile (" \ + w0 = 42; \ + r1 = 0xffffffff00000000LL ll; \ + w2 = 1; \ + w2 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("MOD64 by 0, zero check") +__success __success_unpriv __retval(42) +__naked void mod64_by_0_zero_check(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 0; \ + w2 = 1; \ + r2 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check ok, cls") +__success __retval(8) +__naked void _0_zero_check_ok_cls_1(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 2; \ + w2 = 16; \ + w2 /= w1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check 1, cls") +__success __retval(0) +__naked void _0_zero_check_1_cls_1(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + w0 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV32 by 0, zero check 2, cls") +__success __retval(0) +__naked void _0_zero_check_2_cls_1(void) +{ + asm volatile (" \ + r1 = 0xffffffff00000000LL ll; \ + w0 = 1; \ + w0 /= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("DIV64 by 0, zero check, cls") +__success __retval(0) +__naked void by_0_zero_check_cls(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + r0 /= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check ok, cls") +__success __retval(2) +__naked void _0_zero_check_ok_cls_2(void) +{ + asm volatile (" \ + w0 = 42; \ + w1 = 3; \ + w2 = 5; \ + w2 %%= w1; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check 1, cls") +__success __retval(1) +__naked void _0_zero_check_1_cls_2(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 1; \ + w0 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD32 by 0, zero check 2, cls") +__success __retval(1) +__naked void _0_zero_check_2_cls_2(void) +{ + asm volatile (" \ + r1 = 0xffffffff00000000LL ll; \ + w0 = 1; \ + w0 %%= w1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD64 by 0, zero check 1, cls") +__success __retval(2) +__naked void _0_zero_check_1_cls_3(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = 2; \ + r0 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("MOD64 by 0, zero check 2, cls") +__success __retval(-1) +__naked void _0_zero_check_2_cls_3(void) +{ + asm volatile (" \ + w1 = 0; \ + w0 = -1; \ + r0 %%= r1; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/div0.c b/tools/testing/selftests/bpf/verifier/div0.c deleted file mode 100644 index 7685edfbcf71..000000000000 --- a/tools/testing/selftests/bpf/verifier/div0.c +++ /dev/null @@ -1,184 +0,0 @@ -{ - "DIV32 by 0, zero check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "DIV32 by 0, zero check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "DIV64 by 0, zero check", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "MOD32 by 0, zero check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "MOD32 by 0, zero check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "MOD64 by 0, zero check", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1), - BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "DIV32 by 0, zero check ok, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 2), - BPF_MOV32_IMM(BPF_REG_2, 16), - BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 8, -}, -{ - "DIV32 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "DIV32 by 0, zero check 2, cls", - .insns = { - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "DIV64 by 0, zero check, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "MOD32 by 0, zero check ok, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, 42), - BPF_MOV32_IMM(BPF_REG_1, 3), - BPF_MOV32_IMM(BPF_REG_2, 5), - BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2, -}, -{ - "MOD32 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -{ - "MOD32 by 0, zero check 2, cls", - .insns = { - BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -{ - "MOD64 by 0, zero check 1, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, 2), - BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 2, -}, -{ - "MOD64 by 0, zero check 2, cls", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_0, -1), - BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = -1, -}, -- cgit v1.2.3-70-g09d2 From 9553de70a8412a07b16703449b4b4c4e5d37c388 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:00 +0200 Subject: selftests/bpf: verifier/div_overflow.c converted to inline assembly Test verifier/div_overflow.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-20-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_div_overflow.c | 144 +++++++++++++++++++++ .../testing/selftests/bpf/verifier/div_overflow.c | 110 ---------------- 3 files changed, 146 insertions(+), 110 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_div_overflow.c delete mode 100644 tools/testing/selftests/bpf/verifier/div_overflow.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index b172c41cdc61..d92211b4c3af 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -16,6 +16,7 @@ #include "verifier_ctx_sk_msg.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" +#include "verifier_div_overflow.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -54,3 +55,4 @@ void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } +void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } diff --git a/tools/testing/selftests/bpf/progs/verifier_div_overflow.c b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c new file mode 100644 index 000000000000..458984da804c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_div_overflow.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/div_overflow.c */ + +#include +#include +#include +#include "bpf_misc.h" + +/* Just make sure that JITs used udiv/umod as otherwise we get + * an exception from INT_MIN/-1 overflow similarly as with div + * by zero. + */ + +SEC("tc") +__description("DIV32 overflow, check 1") +__success __retval(0) +__naked void div32_overflow_check_1(void) +{ + asm volatile (" \ + w1 = -1; \ + w0 = %[int_min]; \ + w0 /= w1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV32 overflow, check 2") +__success __retval(0) +__naked void div32_overflow_check_2(void) +{ + asm volatile (" \ + w0 = %[int_min]; \ + w0 /= -1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV64 overflow, check 1") +__success __retval(0) +__naked void div64_overflow_check_1(void) +{ + asm volatile (" \ + r1 = -1; \ + r2 = %[llong_min] ll; \ + r2 /= r1; \ + w0 = 0; \ + if r0 == r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("DIV64 overflow, check 2") +__success __retval(0) +__naked void div64_overflow_check_2(void) +{ + asm volatile (" \ + r1 = %[llong_min] ll; \ + r1 /= -1; \ + w0 = 0; \ + if r0 == r1 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD32 overflow, check 1") +__success __retval(INT_MIN) +__naked void mod32_overflow_check_1(void) +{ + asm volatile (" \ + w1 = -1; \ + w0 = %[int_min]; \ + w0 %%= w1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD32 overflow, check 2") +__success __retval(INT_MIN) +__naked void mod32_overflow_check_2(void) +{ + asm volatile (" \ + w0 = %[int_min]; \ + w0 %%= -1; \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD64 overflow, check 1") +__success __retval(1) +__naked void mod64_overflow_check_1(void) +{ + asm volatile (" \ + r1 = -1; \ + r2 = %[llong_min] ll; \ + r3 = r2; \ + r2 %%= r1; \ + w0 = 0; \ + if r3 != r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +SEC("tc") +__description("MOD64 overflow, check 2") +__success __retval(1) +__naked void mod64_overflow_check_2(void) +{ + asm volatile (" \ + r2 = %[llong_min] ll; \ + r3 = r2; \ + r2 %%= -1; \ + w0 = 0; \ + if r3 != r2 goto l0_%=; \ + w0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(llong_min, LLONG_MIN) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/div_overflow.c b/tools/testing/selftests/bpf/verifier/div_overflow.c deleted file mode 100644 index acab4f00819f..000000000000 --- a/tools/testing/selftests/bpf/verifier/div_overflow.c +++ /dev/null @@ -1,110 +0,0 @@ -/* Just make sure that JITs used udiv/umod as otherwise we get - * an exception from INT_MIN/-1 overflow similarly as with div - * by zero. - */ -{ - "DIV32 overflow, check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "DIV32 overflow, check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "DIV64 overflow, check 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), - BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "DIV64 overflow, check 2", - .insns = { - BPF_LD_IMM64(BPF_REG_1, LLONG_MIN), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "MOD32 overflow, check 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = INT_MIN, -}, -{ - "MOD32 overflow, check 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_0, INT_MIN), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = INT_MIN, -}, -{ - "MOD64 overflow, check 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -{ - "MOD64 overflow, check 2", - .insns = { - BPF_LD_IMM64(BPF_REG_2, LLONG_MIN), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_IMM(BPF_MOD, BPF_REG_2, -1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JNE, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -- cgit v1.2.3-70-g09d2 From b37d776b431ec056075feeeaddcdee4512c522f6 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:01 +0200 Subject: selftests/bpf: verifier/helper_access_var_len.c converted to inline assembly Test verifier/helper_access_var_len.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-21-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_helper_access_var_len.c | 825 +++++++++++++++++++++ .../selftests/bpf/verifier/helper_access_var_len.c | 650 ---------------- 3 files changed, 827 insertions(+), 650 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c delete mode 100644 tools/testing/selftests/bpf/verifier/helper_access_var_len.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d92211b4c3af..22d7e152c05e 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -17,6 +17,7 @@ #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" +#include "verifier_helper_access_var_len.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -56,3 +57,4 @@ void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } +void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c new file mode 100644 index 000000000000..50c6b22606f6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("tracepoint") +__description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds") +__success +__naked void bitwise_and_jmp_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r2 &= 64; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: stack, bitwise AND, zero included") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__retval(0) +__naked void stack_bitwise_and_zero_included(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + /* use bitwise AND to limit r3 range to [0, 64] */\ + r3 &= 64; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory at &fp[-64] is\ + * not initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, bitwise AND + JMP, wrong max") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void bitwise_and_jmp_wrong_max(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r2 &= 65; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, correct bounds") +__success +__naked void memory_stack_jmp_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 64 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP (signed), correct bounds") +__success +__naked void stack_jmp_signed_correct_bounds(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = 16; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 s> 64 goto l0_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, bounds + offset") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void memory_stack_jmp_bounds_offset(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 64 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r2 += 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, wrong max") +__failure __msg("invalid indirect access to stack R1 off=-64 size=65") +__naked void memory_stack_jmp_wrong_max(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 > 65 goto l0_%=; \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP, no max check") +__failure +/* because max wasn't checked, signed min is negative */ +__msg("R2 min value is negative, either use unsigned or 'var &= const'") +__naked void stack_jmp_no_max_check(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + r4 = 0; \ + if r4 >= r2 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: stack, JMP, no min check") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64") +__retval(0) +__naked void stack_jmp_no_min_check(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + /* use JMP to limit r3 range to [0, 64] */ \ + if r3 > 64 goto l0_%=; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory at &fp[-64] is\ + * not initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: stack, JMP (signed), no min check") +__failure __msg("R2 min value is negative") +__naked void jmp_signed_no_min_check(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + 8); \ + r1 = r10; \ + r1 += -64; \ + *(u64*)(r1 - 128) = r2; \ + r2 = *(u64*)(r1 - 128); \ + if r2 s> 64 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map, JMP, correct bounds") +__success +__naked void memory_map_jmp_correct_bounds(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[sizeof_test_val]; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[sizeof_test_val] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map, JMP, wrong max") +__failure __msg("invalid access to map value, value_size=48 off=0 size=49") +__naked void memory_map_jmp_wrong_max(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 8); \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = r6; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) + 1) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map adjusted, JMP, correct bounds") +__success +__naked void map_adjusted_jmp_correct_bounds(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += 20; \ + r2 = %[sizeof_test_val]; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - 20), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: map adjusted, JMP, wrong max") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void map_adjusted_jmp_wrong_max(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 8); \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += 20; \ + r2 = r6; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + if r2 s> %[__imm_0] goto l1_%=; \ + r4 = 0; \ + if r4 s>= r2 goto l1_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - 19) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_1(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 0); \ + r1 = 0; \ + *(u64*)(r10 - 128) = r2; \ + r2 = *(u64*)(r10 - 128); \ + r2 &= 64; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r2 = 0; \ + *(u64*)(r1 + 0) = r2; \ + r2 &= 8; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + exit; \ +" : + : __imm(bpf_csum_diff) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_5(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r1 = r10; \ + r1 += -8; \ + *(u64*)(r1 + 0) = r2; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +__naked void ptr_to_mem_or_null_6(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)") +__success __retval(0) +/* csum_diff of 64-byte packet */ +__flag(BPF_F_ANY_ALIGNMENT) +__naked void ptr_to_mem_or_null_7(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r6; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r6; \ + r2 = *(u64*)(r6 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_8(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)") +__failure __msg("R1 type=scalar expected=fp") +__naked void ptr_to_mem_or_null_9(void) +{ + asm volatile (" \ + r1 = 0; \ + r2 = 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_10(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_11(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_12(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r1 = r10; \ + r1 += -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)") +__success +__naked void ptr_to_mem_or_null_13(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = *(u64*)(r0 + 0); \ + if r2 > 8 goto l0_%=; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("helper access to variable memory: 8 bytes leak") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64") +__retval(0) +__naked void variable_memory_8_bytes_leak(void) +{ + asm volatile (" \ + /* set max stack size */ \ + r6 = 0; \ + *(u64*)(r10 - 128) = r6; \ + /* set r3 to a random value */ \ + call %[bpf_get_prandom_u32]; \ + r3 = r0; \ + r1 = %[map_ringbuf] ll; \ + r2 = r10; \ + r2 += -64; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + /* Note: fp[-32] left uninitialized */ \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + /* Limit r3 range to [1, 64] */ \ + r3 &= 63; \ + r3 += 1; \ + r4 = 0; \ + /* Call bpf_ringbuf_output(), it is one of a few helper functions with\ + * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\ + * For unpriv this should signal an error, because memory region [1, 64]\ + * at &fp[-64] is not fully initialized. \ + */ \ + call %[bpf_ringbuf_output]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_ringbuf_output), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to variable memory: 8 bytes no leak (init memory)") +__success +__naked void bytes_no_leak_init_memory(void) +{ + asm volatile (" \ + r1 = r10; \ + r0 = 0; \ + r0 = 0; \ + *(u64*)(r10 - 64) = r0; \ + *(u64*)(r10 - 56) = r0; \ + *(u64*)(r10 - 48) = r0; \ + *(u64*)(r10 - 40) = r0; \ + *(u64*)(r10 - 32) = r0; \ + *(u64*)(r10 - 24) = r0; \ + *(u64*)(r10 - 16) = r0; \ + *(u64*)(r10 - 8) = r0; \ + r1 += -64; \ + r2 = 0; \ + r2 &= 32; \ + r2 += 32; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ + r1 = *(u64*)(r10 - 16); \ + exit; \ +" : + : __imm(bpf_probe_read_kernel) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c deleted file mode 100644 index 9c4885885aba..000000000000 --- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +++ /dev/null @@ -1,650 +0,0 @@ -{ - "helper access to variable memory: stack, bitwise AND + JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, bitwise AND, zero included", - .insns = { - /* set max stack size */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), - /* set r3 to a random value */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - /* use bitwise AND to limit r3 range to [0, 64] */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 64), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_MOV64_IMM(BPF_REG_4, 0), - /* Call bpf_ringbuf_output(), it is one of a few helper functions with - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. - * For unpriv this should signal an error, because memory at &fp[-64] is - * not initialized. - */ - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 4 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "helper access to variable memory: stack, bitwise AND + JMP, wrong max", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect access to stack R1 off=-64 size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP (signed), correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_MOV64_IMM(BPF_REG_2, 16), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP, bounds + offset", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect access to stack R1 off=-64 size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP, wrong max", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid indirect access to stack R1 off=-64 size=65", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP, no max check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - /* because max wasn't checked, signed min is negative */ - .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: stack, JMP, no min check", - .insns = { - /* set max stack size */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), - /* set r3 to a random value */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - /* use JMP to limit r3 range to [0, 64] */ - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 64, 6), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_MOV64_IMM(BPF_REG_4, 0), - /* Call bpf_ringbuf_output(), it is one of a few helper functions with - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. - * For unpriv this should signal an error, because memory at &fp[-64] is - * not initialized. - */ - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 4 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -64+0 size 64", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "helper access to variable memory: stack, JMP (signed), no min check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: map, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val), 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: map, JMP, wrong max", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=49", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: map adjusted, JMP, correct bounds", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 20, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: map adjusted, JMP, wrong max", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .errstr = "R1 min value is outside of the allowed memory range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=scalar expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_csum_diff), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 0 /* csum_diff of 64-byte packet */, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=scalar expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .errstr = "R1 type=scalar expected=fp", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to variable memory: 8 bytes leak", - .insns = { - /* set max stack size */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -128, 0), - /* set r3 to a random value */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - /* Note: fp[-32] left uninitialized */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - /* Limit r3 range to [1, 64] */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 63), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_4, 0), - /* Call bpf_ringbuf_output(), it is one of a few helper functions with - * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode. - * For unpriv this should signal an error, because memory region [1, 64] - * at &fp[-64] is not fully initialized. - */ - BPF_EMIT_CALL(BPF_FUNC_ringbuf_output), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 3 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -64+32 size 64", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "helper access to variable memory: 8 bytes no leak (init memory)", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -- cgit v1.2.3-70-g09d2 From fb179fe69e6a7bf66232d72a77f53ec10c026ee7 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:02 +0200 Subject: selftests/bpf: verifier/helper_packet_access.c converted to inline assembly Test verifier/helper_packet_access.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-22-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_helper_packet_access.c | 550 +++++++++++++++++++++ .../selftests/bpf/verifier/helper_packet_access.c | 460 ----------------- 3 files changed, 552 insertions(+), 460 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/helper_packet_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 22d7e152c05e..1cd162daf150 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -18,6 +18,7 @@ #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" #include "verifier_helper_access_var_len.skel.h" +#include "verifier_helper_packet_access.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -58,3 +59,4 @@ void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_st void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } +void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c new file mode 100644 index 000000000000..74f5f9cd153d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_packet_access.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("xdp") +__description("helper access to packet: test1, valid packet_ptr range") +__success __retval(0) +__naked void test1_valid_packet_ptr_range(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = 0; \ + call %[bpf_map_update_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test2, unchecked packet_ptr") +__failure __msg("invalid access to packet") +__naked void packet_test2_unchecked_packet_ptr(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test3, variable add") +__success __retval(0) +__naked void to_packet_test3_variable_add(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r5 = *(u8*)(r2 + 0); \ + r4 = r2; \ + r4 += r5; \ + r5 = r4; \ + r5 += 8; \ + if r5 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r2 = r4; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test4, packet_ptr with bad range") +__failure __msg("invalid access to packet") +__naked void packet_ptr_with_bad_range_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = r2; \ + r4 += 4; \ + if r4 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("helper access to packet: test5, packet_ptr with too short range") +__failure __msg("invalid access to packet") +__naked void ptr_with_too_short_range_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r2 += 1; \ + r4 = r2; \ + r4 += 7; \ + if r4 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test6, cls valid packet_ptr range") +__success __retval(0) +__naked void cls_valid_packet_ptr_range(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = 0; \ + call %[bpf_map_update_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test7, cls unchecked packet_ptr") +__failure __msg("invalid access to packet") +__naked void test7_cls_unchecked_packet_ptr(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test8, cls variable add") +__success __retval(0) +__naked void packet_test8_cls_variable_add(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r5 = *(u8*)(r2 + 0); \ + r4 = r2; \ + r4 += r5; \ + r5 = r4; \ + r5 += 8; \ + if r5 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + r2 = r4; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test9, cls packet_ptr with bad range") +__failure __msg("invalid access to packet") +__naked void packet_ptr_with_bad_range_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = r2; \ + r4 += 4; \ + if r4 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test10, cls packet_ptr with too short range") +__failure __msg("invalid access to packet") +__naked void ptr_with_too_short_range_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r2 += 1; \ + r4 = r2; \ + r4 += 7; \ + if r4 > r3 goto l0_%=; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test11, cls unsuitable helper 1") +__failure __msg("helper access to the packet") +__naked void test11_cls_unsuitable_helper_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r3 = r6; \ + r3 += 7; \ + if r3 > r7 goto l0_%=; \ + r2 = 0; \ + r4 = 42; \ + r5 = 0; \ + call %[bpf_skb_store_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_store_bytes), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test12, cls unsuitable helper 2") +__failure __msg("helper access to the packet") +__naked void test12_cls_unsuitable_helper_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = r6; \ + r6 += 8; \ + if r6 > r7 goto l0_%=; \ + r2 = 0; \ + r4 = 4; \ + call %[bpf_skb_load_bytes]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test13, cls helper ok") +__success __retval(0) +__naked void packet_test13_cls_helper_ok(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test14, cls helper ok sub") +__success __retval(0) +__naked void test14_cls_helper_ok_sub(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 -= 4; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test15, cls helper fail sub") +__failure __msg("invalid access to packet") +__naked void test15_cls_helper_fail_sub(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 -= 12; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test16, cls helper fail range 1") +__failure __msg("invalid access to packet") +__naked void cls_helper_fail_range_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 8; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test17, cls helper fail range 2") +__failure __msg("R2 min value is negative") +__naked void cls_helper_fail_range_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = -9; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test18, cls helper fail range 3") +__failure __msg("R2 min value is negative") +__naked void cls_helper_fail_range_3(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__imm_0, ~0), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test19, cls helper range zero") +__success __retval(0) +__naked void test19_cls_helper_range_zero(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r6; \ + r2 = 0; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test20, pkt end as input") +__failure __msg("R1 type=pkt_end expected=fp") +__naked void test20_pkt_end_as_input(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r1 = r7; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("helper access to packet: test21, wrong reg") +__failure __msg("invalid access to packet") +__naked void to_packet_test21_wrong_reg(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r6 += 1; \ + r1 = r6; \ + r1 += 7; \ + if r1 > r7 goto l0_%=; \ + r2 = 4; \ + r3 = 0; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_csum_diff]; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_csum_diff), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/helper_packet_access.c b/tools/testing/selftests/bpf/verifier/helper_packet_access.c deleted file mode 100644 index ae54587e9829..000000000000 --- a/tools/testing/selftests/bpf/verifier/helper_packet_access.c +++ /dev/null @@ -1,460 +0,0 @@ -{ - "helper access to packet: test1, valid packet_ptr range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result_unpriv = ACCEPT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "helper access to packet: test2, unchecked packet_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "helper access to packet: test3, variable add", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "helper access to packet: test4, packet_ptr with bad range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 7 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "helper access to packet: test5, packet_ptr with too short range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "helper access to packet: test6, cls valid packet_ptr range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test7, cls unchecked packet_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test8, cls variable add", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 10), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test9, cls packet_ptr with bad range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 7 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test10, cls packet_ptr with too short range", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test11, cls unsuitable helper 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_7, 4), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_4, 42), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_store_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "helper access to the packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test12, cls unsuitable helper 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_4, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "helper access to the packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test13, cls helper ok", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test14, cls helper ok sub", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 4), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test15, cls helper fail sub", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test16, cls helper fail range 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test17, cls helper fail range 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, -9), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R2 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test18, cls helper fail range 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, ~0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R2 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test19, cls helper range zero", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test20, pkt end as input", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=pkt_end expected=fp", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "helper access to packet: test21, wrong reg", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_csum_diff), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -- cgit v1.2.3-70-g09d2 From 77aa2563cb44a6241990cf4f082b55ee6f0a0623 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:03 +0200 Subject: selftests/bpf: verifier/helper_restricted.c converted to inline assembly Test verifier/helper_restricted.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-23-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_helper_restricted.c | 279 +++++++++++++++++++++ .../selftests/bpf/verifier/helper_restricted.c | 196 --------------- 3 files changed, 281 insertions(+), 196 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_helper_restricted.c delete mode 100644 tools/testing/selftests/bpf/verifier/helper_restricted.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 1cd162daf150..02983d1de218 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -19,6 +19,7 @@ #include "verifier_div_overflow.skel.h" #include "verifier_helper_access_var_len.skel.h" #include "verifier_helper_packet_access.skel.h" +#include "verifier_helper_restricted.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -60,3 +61,4 @@ void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } +void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c new file mode 100644 index 000000000000..0ede0ccd090c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_restricted.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_restricted.c */ + +#include +#include +#include "bpf_misc.h" + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct val); +} map_spin_lock SEC(".maps"); + +struct timer { + struct bpf_timer t; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct timer); +} map_timer SEC(".maps"); + +SEC("kprobe") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void in_bpf_prog_type_kprobe_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void in_bpf_prog_type_tracepoint_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void bpf_prog_type_perf_event_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("unknown func bpf_ktime_get_coarse_ns") +__naked void bpf_prog_type_raw_tracepoint_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_coarse_ns]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_coarse_ns) + : __clobber_all); +} + +SEC("kprobe") +__description("bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void in_bpf_prog_type_kprobe_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void bpf_prog_type_perf_event_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void in_bpf_prog_type_tracepoint_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_timer yet") +__naked void bpf_prog_type_raw_tracepoint_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_timer] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[map_timer] ll; \ + r3 = 1; \ +l0_%=: call %[bpf_timer_init]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_timer_init), + __imm_addr(map_timer) + : __clobber_all); +} + +SEC("kprobe") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void in_bpf_prog_type_kprobe_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("tracepoint") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void in_bpf_prog_type_tracepoint_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("perf_event") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void bpf_prog_type_perf_event_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("raw_tracepoint") +__description("bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT") +__failure __msg("tracing progs cannot use bpf_spin_lock yet") +__naked void bpf_prog_type_raw_tracepoint_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_spin_lock]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/helper_restricted.c b/tools/testing/selftests/bpf/verifier/helper_restricted.c deleted file mode 100644 index a067b7098b97..000000000000 --- a/tools/testing/selftests/bpf/verifier/helper_restricted.c +++ /dev/null @@ -1,196 +0,0 @@ -{ - "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_KPROBE", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown func bpf_ktime_get_coarse_ns", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_KPROBE, -}, -{ - "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_TRACEPOINT", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown func bpf_ktime_get_coarse_ns", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_PERF_EVENT", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown func bpf_ktime_get_coarse_ns", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "bpf_ktime_get_coarse_ns is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ktime_get_coarse_ns), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "unknown func bpf_ktime_get_coarse_ns", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, -}, -{ - "bpf_timer_init isn restricted in BPF_PROG_TYPE_KPROBE", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_timer_init), - BPF_EXIT_INSN(), - }, - .fixup_map_timer = { 3, 8 }, - .errstr = "tracing progs cannot use bpf_timer yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_KPROBE, -}, -{ - "bpf_timer_init is forbidden in BPF_PROG_TYPE_PERF_EVENT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_timer_init), - BPF_EXIT_INSN(), - }, - .fixup_map_timer = { 3, 8 }, - .errstr = "tracing progs cannot use bpf_timer yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "bpf_timer_init is forbidden in BPF_PROG_TYPE_TRACEPOINT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_timer_init), - BPF_EXIT_INSN(), - }, - .fixup_map_timer = { 3, 8 }, - .errstr = "tracing progs cannot use bpf_timer yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bpf_timer_init is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_timer_init), - BPF_EXIT_INSN(), - }, - .fixup_map_timer = { 3, 8 }, - .errstr = "tracing progs cannot use bpf_timer yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, -}, -{ - "bpf_spin_lock is forbidden in BPF_PROG_TYPE_KPROBE", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .errstr = "tracing progs cannot use bpf_spin_lock yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_KPROBE, -}, -{ - "bpf_spin_lock is forbidden in BPF_PROG_TYPE_TRACEPOINT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .errstr = "tracing progs cannot use bpf_spin_lock yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bpf_spin_lock is forbidden in BPF_PROG_TYPE_PERF_EVENT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .errstr = "tracing progs cannot use bpf_spin_lock yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "bpf_spin_lock is forbidden in BPF_PROG_TYPE_RAW_TRACEPOINT", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .errstr = "tracing progs cannot use bpf_spin_lock yet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT, -}, -- cgit v1.2.3-70-g09d2 From ecc424827b775860119f5a5e2c521d7485bcc74f Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:04 +0200 Subject: selftests/bpf: verifier/helper_value_access.c converted to inline assembly Test verifier/helper_value_access.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-24-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_helper_value_access.c | 1245 ++++++++++++++++++++ .../selftests/bpf/verifier/helper_value_access.c | 953 --------------- 3 files changed, 1247 insertions(+), 953 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_helper_value_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/helper_value_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 02983d1de218..2c3745a1fdcb 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -20,6 +20,7 @@ #include "verifier_helper_access_var_len.skel.h" #include "verifier_helper_packet_access.skel.h" #include "verifier_helper_restricted.skel.h" +#include "verifier_helper_value_access.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -62,3 +63,4 @@ void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } +void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c new file mode 100644 index 000000000000..692216c0ad3d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c @@ -0,0 +1,1245 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/helper_value_access.c */ + +#include +#include +#include "bpf_misc.h" + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tracepoint") +__description("helper access to map: full range") +__success +__naked void access_to_map_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[sizeof_test_val]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(sizeof_test_val, sizeof(struct test_val)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: partial range") +__success +__naked void access_to_map_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: empty range") +__failure __msg("invalid access to map value, value_size=48 off=0 size=0") +__naked void access_to_map_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=0 size=56") +__naked void map_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) + 8) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: negative range") +__failure __msg("R2 min value is negative") +__naked void access_to_map_negative_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): full range") +__success +__naked void via_const_imm_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): partial range") +__success +__naked void via_const_imm_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): empty range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=0") +__naked void via_const_imm_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=52") +__naked void imm_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): negative range (> adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_imm_negative_range_adjustment_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const imm): negative range (< adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_imm_negative_range_adjustment_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r1 += %[test_val_foo]; \ + r2 = -1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): full range") +__success +__naked void via_const_reg_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): partial range") +__success +__naked void via_const_reg_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): empty range") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void via_const_reg_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = 0; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): out-of-bound range") +__failure __msg("invalid access to map value, value_size=48 off=4 size=52") +__naked void reg_out_of_bound_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): negative range (> adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_reg_negative_range_adjustment_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = -8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via const reg): negative range (< adjustment)") +__failure __msg("R2 min value is negative") +__naked void const_reg_negative_range_adjustment_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = %[test_val_foo]; \ + r1 += r3; \ + r2 = -1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): full range") +__success +__naked void map_via_variable_full_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo)), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): partial range") +__success +__naked void map_via_variable_partial_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): empty range") +__failure __msg("R1 min value is outside of the allowed memory range") +__naked void map_via_variable_empty_range(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_trace_printk]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_trace_printk), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): no max check") +__failure __msg("R1 unbounded memory access") +__naked void via_variable_no_max_check_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + r1 += r3; \ + r2 = 1; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to adjusted map (via variable): wrong max check") +__failure __msg("invalid access to map value, value_size=48 off=4 size=45") +__naked void via_variable_wrong_max_check_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[test_val_foo] goto l0_%=; \ + r1 += r3; \ + r2 = %[__imm_0]; \ + r3 = 0; \ + call %[bpf_probe_read_kernel]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_probe_read_kernel), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) - offsetof(struct test_val, foo) + 1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <, good access") +__success +__naked void bounds_check_using_good_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 < 32 goto l1_%=; \ + r0 = 0; \ +l0_%=: exit; \ +l1_%=: r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <, bad access") +__failure __msg("R1 unbounded memory access") +__naked void bounds_check_using_bad_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 < 32 goto l1_%=; \ + r1 += r3; \ +l0_%=: r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <=, good access") +__success +__naked void bounds_check_using_good_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 <= 32 goto l1_%=; \ + r0 = 0; \ +l0_%=: exit; \ +l1_%=: r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using <=, bad access") +__failure __msg("R1 unbounded memory access") +__naked void bounds_check_using_bad_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 <= 32 goto l1_%=; \ + r1 += r3; \ +l0_%=: r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, good access") +__success +__naked void check_using_s_good_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< 0 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, good access 2") +__success +__naked void using_s_good_access_2_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<, bad access") +__failure __msg("R1 min value is negative") +__naked void check_using_s_bad_access_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u64*)(r0 + 0); \ + if r3 s< 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s< -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, good access") +__success +__naked void check_using_s_good_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= 0 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, good access 2") +__success +__naked void using_s_good_access_2_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("helper access to map: bounds check using s<=, bad access") +__failure __msg("R1 min value is negative") +__naked void check_using_s_bad_access_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + r3 = *(u64*)(r0 + 0); \ + if r3 s<= 32 goto l1_%=; \ +l2_%=: r0 = 0; \ +l0_%=: exit; \ +l1_%=: if r3 s<= -3 goto l2_%=; \ + r1 += r3; \ + r0 = 0; \ + *(u8*)(r1 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map lookup helper access to map") +__success +__naked void lookup_helper_access_to_map(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map update helper access to map") +__success +__naked void update_helper_access_to_map(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r4 = 0; \ + r3 = r0; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_update_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_map_update_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map update helper access to map: wrong size") +__failure __msg("invalid access to map value, value_size=8 off=0 size=16") +__naked void access_to_map_wrong_size(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r4 = 0; \ + r3 = r0; \ + r2 = r0; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_update_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_map_update_elem), + __imm_addr(map_hash_16b), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm)") +__success +__naked void adjusted_map_via_const_imm(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += %[other_val_bar]; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm): out-of-bound 1") +__failure __msg("invalid access to map value, value_size=16 off=12 size=8") +__naked void imm_out_of_bound_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += %[__imm_0]; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, sizeof(struct other_val) - 4) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const imm): out-of-bound 2") +__failure __msg("invalid access to map value, value_size=16 off=-4 size=8") +__naked void imm_out_of_bound_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r2 += -4; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg)") +__success +__naked void adjusted_map_via_const_reg(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = %[other_val_bar]; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg): out-of-bound 1") +__failure __msg("invalid access to map value, value_size=16 off=12 size=8") +__naked void reg_out_of_bound_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = %[__imm_0]; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, sizeof(struct other_val) - 4) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via const reg): out-of-bound 2") +__failure __msg("invalid access to map value, value_size=16 off=-4 size=8") +__naked void reg_out_of_bound_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = -4; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable)") +__success +__naked void to_adjusted_map_via_variable(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[other_val_bar] goto l0_%=; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(other_val_bar, offsetof(struct other_val, bar)) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable): no max check") +__failure +__msg("R2 unbounded memory access, make sure to bounds check any such access") +__naked void via_variable_no_max_check_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("tracepoint") +__description("map helper access to adjusted map (via variable): wrong max check") +__failure __msg("invalid access to map value, value_size=16 off=9 size=8") +__naked void via_variable_wrong_max_check_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = r0; \ + r3 = *(u32*)(r0 + 0); \ + if r3 > %[__imm_0] goto l0_%=; \ + r2 += r3; \ + r1 = %[map_hash_16b] ll; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b), + __imm_const(__imm_0, offsetof(struct other_val, bar) + 1) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/helper_value_access.c b/tools/testing/selftests/bpf/verifier/helper_value_access.c deleted file mode 100644 index 1c7882ddfa63..000000000000 --- a/tools/testing/selftests/bpf/verifier/helper_value_access.c +++ /dev/null @@ -1,953 +0,0 @@ -{ - "helper access to map: full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=0", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=56", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: negative range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=0", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - offsetof(struct test_val, foo) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=52", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): negative range (> adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const imm): negative range (< adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the allowed memory range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): out-of-bound range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo) + 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=52", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): negative range (> adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via const reg): negative range (< adjustment)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct test_val, foo)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R2 min value is negative", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via variable): full range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via variable): partial range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via variable): empty range", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_trace_printk), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 min value is outside of the allowed memory range", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via variable): no max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R1 unbounded memory access", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to adjusted map (via variable): wrong max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct test_val, foo), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, - sizeof(struct test_val) - - offsetof(struct test_val, foo) + 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=45", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using <, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using <, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 unbounded memory access", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using <=, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using <=, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 unbounded memory access", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<, good access 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 min value is negative", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<=, good access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<=, good access 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "helper access to map: bounds check using s<=, bad access", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .result = REJECT, - .errstr = "R1 min value is negative", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map lookup helper access to map", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 8 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map update helper access to map", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_update_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map update helper access to map: wrong size", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_update_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .fixup_map_hash_16b = { 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=8 off=0 size=16", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const imm)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, offsetof(struct other_val, bar)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const imm): out-of-bound 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, sizeof(struct other_val) - 4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=12 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const imm): out-of-bound 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 9 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=-4 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const reg)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, offsetof(struct other_val, bar)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const reg): out-of-bound 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct other_val) - 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=12 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via const reg): out-of-bound 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, -4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=-4 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via variable)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar), 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 11 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via variable): no max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 10 }, - .result = REJECT, - .errstr = "R2 unbounded memory access, make sure to bounds check any such access", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "map helper access to adjusted map (via variable): wrong max check", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, offsetof(struct other_val, bar) + 1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 3, 11 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=16 off=9 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -- cgit v1.2.3-70-g09d2 From 01481e67dd4d1c2c62eb6a506a5f4803ee50f8a6 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:05 +0200 Subject: selftests/bpf: verifier/int_ptr.c converted to inline assembly Test verifier/int_ptr.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-25-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_int_ptr.c | 157 ++++++++++++++++++++ tools/testing/selftests/bpf/verifier/int_ptr.c | 161 --------------------- 3 files changed, 159 insertions(+), 161 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_int_ptr.c delete mode 100644 tools/testing/selftests/bpf/verifier/int_ptr.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 2c3745a1fdcb..d9180da30f1b 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -21,6 +21,7 @@ #include "verifier_helper_packet_access.skel.h" #include "verifier_helper_restricted.skel.h" #include "verifier_helper_value_access.skel.h" +#include "verifier_int_ptr.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -64,3 +65,4 @@ void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } +void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_int_ptr.c b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c new file mode 100644 index 000000000000..b054f9c48143 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_int_ptr.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/int_ptr.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG uninitialized") +__failure __msg("invalid indirect read from stack R4 off -16+0 size 8") +__naked void arg_ptr_to_long_uninitialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("socket") +__description("ARG_PTR_TO_LONG half-uninitialized") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R4 off -16+4 size 8") +__retval(0) +__naked void ptr_to_long_half_uninitialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + *(u32*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG misaligned") +__failure __msg("misaligned stack access off (0x0; 0x0)+-20+0 size 8") +__naked void arg_ptr_to_long_misaligned(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -12; \ + r0 = 0; \ + *(u32*)(r7 + 0) = r0; \ + *(u64*)(r7 + 4) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG size < sizeof(long)") +__failure __msg("invalid indirect access to stack R4 off=-4 size=8") +__naked void to_long_size_sizeof_long(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -16; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += 12; \ + *(u32*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +SEC("cgroup/sysctl") +__description("ARG_PTR_TO_LONG initialized") +__success +__naked void arg_ptr_to_long_initialized(void) +{ + asm volatile (" \ + /* bpf_strtoul arg1 (buf) */ \ + r7 = r10; \ + r7 += -8; \ + r0 = 0x00303036; \ + *(u64*)(r7 + 0) = r0; \ + r1 = r7; \ + /* bpf_strtoul arg2 (buf_len) */ \ + r2 = 4; \ + /* bpf_strtoul arg3 (flags) */ \ + r3 = 0; \ + /* bpf_strtoul arg4 (res) */ \ + r7 += -8; \ + *(u64*)(r7 + 0) = r0; \ + r4 = r7; \ + /* bpf_strtoul() */ \ + call %[bpf_strtoul]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_strtoul) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/int_ptr.c b/tools/testing/selftests/bpf/verifier/int_ptr.c deleted file mode 100644 index 02d9e004260b..000000000000 --- a/tools/testing/selftests/bpf/verifier/int_ptr.c +++ /dev/null @@ -1,161 +0,0 @@ -{ - "ARG_PTR_TO_LONG uninitialized", - .insns = { - /* bpf_strtoul arg1 (buf) */ - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - - /* bpf_strtoul arg2 (buf_len) */ - BPF_MOV64_IMM(BPF_REG_2, 4), - - /* bpf_strtoul arg3 (flags) */ - BPF_MOV64_IMM(BPF_REG_3, 0), - - /* bpf_strtoul arg4 (res) */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_7), - - /* bpf_strtoul() */ - BPF_EMIT_CALL(BPF_FUNC_strtoul), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL, - .errstr = "invalid indirect read from stack R4 off -16+0 size 8", -}, -{ - "ARG_PTR_TO_LONG half-uninitialized", - .insns = { - /* bpf_strtoul arg1 (buf) */ - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - - /* bpf_strtoul arg2 (buf_len) */ - BPF_MOV64_IMM(BPF_REG_2, 4), - - /* bpf_strtoul arg3 (flags) */ - BPF_MOV64_IMM(BPF_REG_3, 0), - - /* bpf_strtoul arg4 (res) */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_7), - - /* bpf_strtoul() */ - BPF_EMIT_CALL(BPF_FUNC_strtoul), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid indirect read from stack R4 off -16+4 size 8", - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "ARG_PTR_TO_LONG misaligned", - .insns = { - /* bpf_strtoul arg1 (buf) */ - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - - /* bpf_strtoul arg2 (buf_len) */ - BPF_MOV64_IMM(BPF_REG_2, 4), - - /* bpf_strtoul arg3 (flags) */ - BPF_MOV64_IMM(BPF_REG_3, 0), - - /* bpf_strtoul arg4 (res) */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -12), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_7), - - /* bpf_strtoul() */ - BPF_EMIT_CALL(BPF_FUNC_strtoul), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL, - .errstr = "misaligned stack access off (0x0; 0x0)+-20+0 size 8", -}, -{ - "ARG_PTR_TO_LONG size < sizeof(long)", - .insns = { - /* bpf_strtoul arg1 (buf) */ - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -16), - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - - /* bpf_strtoul arg2 (buf_len) */ - BPF_MOV64_IMM(BPF_REG_2, 4), - - /* bpf_strtoul arg3 (flags) */ - BPF_MOV64_IMM(BPF_REG_3, 0), - - /* bpf_strtoul arg4 (res) */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 12), - BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_7), - - /* bpf_strtoul() */ - BPF_EMIT_CALL(BPF_FUNC_strtoul), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL, - .errstr = "invalid indirect access to stack R4 off=-4 size=8", -}, -{ - "ARG_PTR_TO_LONG initialized", - .insns = { - /* bpf_strtoul arg1 (buf) */ - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - - /* bpf_strtoul arg2 (buf_len) */ - BPF_MOV64_IMM(BPF_REG_2, 4), - - /* bpf_strtoul arg3 (flags) */ - BPF_MOV64_IMM(BPF_REG_3, 0), - - /* bpf_strtoul arg4 (res) */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_7), - - /* bpf_strtoul() */ - BPF_EMIT_CALL(BPF_FUNC_strtoul), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SYSCTL, -}, -- cgit v1.2.3-70-g09d2 From e297875580662f2fdcde1cba4ea28bf70ea2ca49 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:06 +0200 Subject: selftests/bpf: verifier/ld_ind.c converted to inline assembly Test verifier/ld_ind.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-26-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_ld_ind.c | 110 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/ld_ind.c | 72 -------------- 3 files changed, 112 insertions(+), 72 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_ld_ind.c delete mode 100644 tools/testing/selftests/bpf/verifier/ld_ind.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d9180da30f1b..d8d4464b6112 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -22,6 +22,7 @@ #include "verifier_helper_restricted.skel.h" #include "verifier_helper_value_access.skel.h" #include "verifier_int_ptr.skel.h" +#include "verifier_ld_ind.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -66,3 +67,4 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } +void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ld_ind.c b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c new file mode 100644 index 000000000000..c925ba9a2e74 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ld_ind.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ld_ind.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("socket") +__description("ld_ind: check calling conv, r1") +__failure __msg("R1 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r1(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 1; \ + .8byte %[ld_ind]; \ + r0 = r1; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r2") +__failure __msg("R2 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r2(void) +{ + asm volatile (" \ + r6 = r1; \ + r2 = 1; \ + .8byte %[ld_ind]; \ + r0 = r2; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r3") +__failure __msg("R3 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r3(void) +{ + asm volatile (" \ + r6 = r1; \ + r3 = 1; \ + .8byte %[ld_ind]; \ + r0 = r3; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r4") +__failure __msg("R4 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r4(void) +{ + asm volatile (" \ + r6 = r1; \ + r4 = 1; \ + .8byte %[ld_ind]; \ + r0 = r4; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r5") +__failure __msg("R5 !read_ok") +__failure_unpriv +__naked void ind_check_calling_conv_r5(void) +{ + asm volatile (" \ + r6 = r1; \ + r5 = 1; \ + .8byte %[ld_ind]; \ + r0 = r5; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000)) + : __clobber_all); +} + +SEC("socket") +__description("ld_ind: check calling conv, r7") +__success __success_unpriv __retval(1) +__naked void ind_check_calling_conv_r7(void) +{ + asm volatile (" \ + r6 = r1; \ + r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + exit; \ +" : + : __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/ld_ind.c b/tools/testing/selftests/bpf/verifier/ld_ind.c deleted file mode 100644 index 079734227538..000000000000 --- a/tools/testing/selftests/bpf/verifier/ld_ind.c +++ /dev/null @@ -1,72 +0,0 @@ -{ - "ld_ind: check calling conv, r1", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_LD_IND(BPF_W, BPF_REG_1, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R1 !read_ok", - .result = REJECT, -}, -{ - "ld_ind: check calling conv, r2", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_LD_IND(BPF_W, BPF_REG_2, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R2 !read_ok", - .result = REJECT, -}, -{ - "ld_ind: check calling conv, r3", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_LD_IND(BPF_W, BPF_REG_3, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .errstr = "R3 !read_ok", - .result = REJECT, -}, -{ - "ld_ind: check calling conv, r4", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_LD_IND(BPF_W, BPF_REG_4, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_EXIT_INSN(), - }, - .errstr = "R4 !read_ok", - .result = REJECT, -}, -{ - "ld_ind: check calling conv, r5", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_5, 1), - BPF_LD_IND(BPF_W, BPF_REG_5, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), - BPF_EXIT_INSN(), - }, - .errstr = "R5 !read_ok", - .result = REJECT, -}, -{ - "ld_ind: check calling conv, r7", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 1, -}, -- cgit v1.2.3-70-g09d2 From 583c7ce5be093277ef1691f35d9bec5ef9934e24 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:07 +0200 Subject: selftests/bpf: verifier/leak_ptr.c converted to inline assembly Test verifier/leak_ptr.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-27-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_leak_ptr.c | 92 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/leak_ptr.c | 67 ---------------- 3 files changed, 94 insertions(+), 67 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_leak_ptr.c delete mode 100644 tools/testing/selftests/bpf/verifier/leak_ptr.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d8d4464b6112..f8b3b6beba3f 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -23,6 +23,7 @@ #include "verifier_helper_value_access.skel.h" #include "verifier_int_ptr.skel.h" #include "verifier_ld_ind.skel.h" +#include "verifier_leak_ptr.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -68,3 +69,4 @@ void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } +void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c new file mode 100644 index 000000000000..d153fbe50055 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_leak_ptr.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/leak_ptr.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("leak pointer into ctx 1") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__failure_unpriv __msg_unpriv("R2 leaks addr into mem") +__naked void leak_pointer_into_ctx_1(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r2 = %[map_hash_8b] ll; \ + lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r2; \ + exit; \ +" : + : __imm_addr(map_hash_8b), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into ctx 2") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__failure_unpriv __msg_unpriv("R10 leaks addr into mem") +__naked void leak_pointer_into_ctx_2(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r0; \ + lock *(u64 *)(r1 + %[__sk_buff_cb_0]) += r10; \ + exit; \ +" : + : __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into ctx 3") +__success __failure_unpriv __msg_unpriv("R2 leaks addr into ctx") +__retval(0) +__naked void leak_pointer_into_ctx_3(void) +{ + asm volatile (" \ + r0 = 0; \ + r2 = %[map_hash_8b] ll; \ + *(u64*)(r1 + %[__sk_buff_cb_0]) = r2; \ + exit; \ +" : + : __imm_addr(map_hash_8b), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("leak pointer into map val") +__success __failure_unpriv __msg_unpriv("R6 leaks addr into mem") +__retval(0) +__naked void leak_pointer_into_map_val(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = 0; \ + *(u64*)(r0 + 0) = r3; \ + lock *(u64 *)(r0 + 0) += r6; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/leak_ptr.c b/tools/testing/selftests/bpf/verifier/leak_ptr.c deleted file mode 100644 index 73f0dea95546..000000000000 --- a/tools/testing/selftests/bpf/verifier/leak_ptr.c +++ /dev/null @@ -1,67 +0,0 @@ -{ - "leak pointer into ctx 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 2 }, - .errstr_unpriv = "R2 leaks addr into mem", - .result_unpriv = REJECT, - .result = REJECT, - .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", -}, -{ - "leak pointer into ctx 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 leaks addr into mem", - .result_unpriv = REJECT, - .result = REJECT, - .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", -}, -{ - "leak pointer into ctx 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, - offsetof(struct __sk_buff, cb[0])), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .errstr_unpriv = "R2 leaks addr into ctx", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "leak pointer into map val", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr_unpriv = "R6 leaks addr into mem", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From caf345cf12073eb5905b03c3c5f905cc0964dc6c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:08 +0200 Subject: selftests/bpf: verifier/map_ptr.c converted to inline assembly Test verifier/map_ptr.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-28-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_map_ptr.c | 159 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/map_ptr.c | 99 ------------- 3 files changed, 161 insertions(+), 99 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_map_ptr.c delete mode 100644 tools/testing/selftests/bpf/verifier/map_ptr.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index f8b3b6beba3f..d2f3bff0e942 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -24,6 +24,7 @@ #include "verifier_int_ptr.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" +#include "verifier_map_ptr.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -70,3 +71,4 @@ void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } +void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c new file mode 100644 index 000000000000..11a079145966 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ptr.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +SEC("socket") +__description("bpf_map_ptr: read with negative offset rejected") +__failure __msg("R1 is bpf_array invalid negative access: off=-8") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__naked void read_with_negative_offset_rejected(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u64*)(r1 - 8); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: write rejected") +__failure __msg("only read from bpf_array is supported") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__naked void bpf_map_ptr_write_rejected(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + *(u64*)(r1 + 0) = r2; \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: read non-existent field rejected") +__failure +__msg("cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4") +__failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void read_non_existent_field_rejected(void) +{ + asm volatile (" \ + r6 = 0; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u32*)(r1 + 1); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: read ops field accepted") +__success __failure_unpriv +__msg_unpriv("access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN") +__retval(1) +__naked void ptr_read_ops_field_accepted(void) +{ + asm volatile (" \ + r6 = 0; \ + r1 = %[map_array_48b] ll; \ + r6 = *(u64*)(r1 + 0); \ + r0 = 1; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: r = 0, map_ptr = map_ptr + r") +__success __failure_unpriv +__msg_unpriv("R1 has pointer with unsupported alu operation") +__retval(0) +__naked void map_ptr_map_ptr_r(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r0 = 0; \ + r1 = %[map_hash_16b] ll; \ + r1 += r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +SEC("socket") +__description("bpf_map_ptr: r = 0, r = r + map_ptr") +__success __failure_unpriv +__msg_unpriv("R0 has pointer with unsupported alu operation") +__retval(0) +__naked void _0_r_r_map_ptr(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + r0 = %[map_hash_16b] ll; \ + r1 += r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_16b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c deleted file mode 100644 index 17ee84dc7766..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_ptr.c +++ /dev/null @@ -1,99 +0,0 @@ -{ - "bpf_map_ptr: read with negative offset rejected", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result_unpriv = REJECT, - .errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", - .result = REJECT, - .errstr = "R1 is bpf_array invalid negative access: off=-8", -}, -{ - "bpf_map_ptr: write rejected", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result_unpriv = REJECT, - .errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", - .result = REJECT, - .errstr = "only read from bpf_array is supported", -}, -{ - "bpf_map_ptr: read non-existent field rejected", - .insns = { - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result_unpriv = REJECT, - .errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", - .result = REJECT, - .errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "bpf_map_ptr: read ops field accepted", - .insns = { - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result_unpriv = REJECT, - .errstr_unpriv = "access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", - .result = ACCEPT, - .retval = 1, -}, -{ - "bpf_map_ptr: r = 0, map_ptr = map_ptr + r", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 4 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 has pointer with unsupported alu operation", - .result = ACCEPT, -}, -{ - "bpf_map_ptr: r = 0, r = r + map_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 4 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 has pointer with unsupported alu operation", - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From 05e474ecbb56a5fadea6b36b7aa7d8d237aea064 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:09 +0200 Subject: selftests/bpf: verifier/map_ret_val.c converted to inline assembly Test verifier/map_ret_val.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-29-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_map_ret_val.c | 110 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/map_ret_val.c | 65 ------------ 3 files changed, 112 insertions(+), 65 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_map_ret_val.c delete mode 100644 tools/testing/selftests/bpf/verifier/map_ret_val.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d2f3bff0e942..5131a73fd225 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -25,6 +25,7 @@ #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_map_ptr.skel.h" +#include "verifier_map_ret_val.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -72,3 +73,4 @@ void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } +void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c new file mode 100644 index 000000000000..1639628b832d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ret_val.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ret_val.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("invalid map_fd for function call") +__failure __msg("fd 0 is not pointing to valid bpf_map") +__failure_unpriv +__naked void map_fd_for_function_call(void) +{ + asm volatile (" \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + .8byte %[ld_map_fd]; \ + .8byte 0; \ + call %[bpf_map_delete_elem]; \ + exit; \ +" : + : __imm(bpf_map_delete_elem), + __imm_insn(ld_map_fd, BPF_RAW_INSN(BPF_LD | BPF_DW | BPF_IMM, BPF_REG_1, BPF_PSEUDO_MAP_FD, 0, 0)) + : __clobber_all); +} + +SEC("socket") +__description("don't check return value before access") +__failure __msg("R0 invalid mem access 'map_value_or_null'") +__failure_unpriv +__naked void check_return_value_before_access(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("access memory with incorrect alignment") +__failure __msg("misaligned value access") +__failure_unpriv +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void access_memory_with_incorrect_alignment_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r0 + 4) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("sometimes access memory with incorrect alignment") +__failure __msg("R0 invalid mem access") +__msg_unpriv("R0 leaks addr") +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void access_memory_with_incorrect_alignment_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +l0_%=: r1 = 1; \ + *(u64*)(r0 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/map_ret_val.c b/tools/testing/selftests/bpf/verifier/map_ret_val.c deleted file mode 100644 index bdd0e8d18333..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_ret_val.c +++ /dev/null @@ -1,65 +0,0 @@ -{ - "invalid map_fd for function call", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem), - BPF_EXIT_INSN(), - }, - .errstr = "fd 0 is not pointing to valid bpf_map", - .result = REJECT, -}, -{ - "don't check return value before access", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 invalid mem access 'map_value_or_null'", - .result = REJECT, -}, -{ - "access memory with incorrect alignment", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 4, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "misaligned value access", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, -}, -{ - "sometimes access memory with incorrect alignment", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 invalid mem access", - .errstr_unpriv = "R0 leaks addr", - .result = REJECT, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, -}, -- cgit v1.2.3-70-g09d2 From ade3f08fc236ce8a5f00c1ffaf85ba42aa18ded4 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:10 +0200 Subject: selftests/bpf: verifier/masking.c converted to inline assembly Test verifier/masking.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-30-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_masking.c | 410 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/masking.c | 322 ---------------- 3 files changed, 412 insertions(+), 322 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_masking.c delete mode 100644 tools/testing/selftests/bpf/verifier/masking.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 5131a73fd225..b23fcbe4f83b 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -26,6 +26,7 @@ #include "verifier_leak_ptr.skel.h" #include "verifier_map_ptr.skel.h" #include "verifier_map_ret_val.skel.h" +#include "verifier_masking.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -74,3 +75,4 @@ void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } +void test_verifier_masking(void) { RUN(verifier_masking); } diff --git a/tools/testing/selftests/bpf/progs/verifier_masking.c b/tools/testing/selftests/bpf/progs/verifier_masking.c new file mode 100644 index 000000000000..5732cc1b4c47 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_masking.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/masking.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("masking, test out of bounds 1") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_1(void) +{ + asm volatile (" \ + w1 = 5; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 2") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_2(void) +{ + asm volatile (" \ + w1 = 1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 3") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_3(void) +{ + asm volatile (" \ + w1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 4") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_4(void) +{ + asm volatile (" \ + w1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 5") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_5(void) +{ + asm volatile (" \ + w1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 6") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_6(void) +{ + asm volatile (" \ + w1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 7") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_7(void) +{ + asm volatile (" \ + r1 = 5; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 8") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_8(void) +{ + asm volatile (" \ + r1 = 1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 9") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_9(void) +{ + asm volatile (" \ + r1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 10") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_10(void) +{ + asm volatile (" \ + r1 = 0xffffffff; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 11") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_11(void) +{ + asm volatile (" \ + r1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test out of bounds 12") +__success __success_unpriv __retval(0) +__naked void test_out_of_bounds_12(void) +{ + asm volatile (" \ + r1 = -1; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 1") +__success __success_unpriv __retval(4) +__naked void masking_test_in_bounds_1(void) +{ + asm volatile (" \ + w1 = 4; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 5 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 2") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_2(void) +{ + asm volatile (" \ + w1 = 0; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 3") +__success __success_unpriv __retval(0xfffffffe) +__naked void masking_test_in_bounds_3(void) +{ + asm volatile (" \ + w1 = 0xfffffffe; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffffffff - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 4") +__success __success_unpriv __retval(0xabcde) +__naked void masking_test_in_bounds_4(void) +{ + asm volatile (" \ + w1 = 0xabcde; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 0xabcdef - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 5") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_5(void) +{ + asm volatile (" \ + w1 = 0; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 1 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 6") +__success __success_unpriv __retval(46) +__naked void masking_test_in_bounds_6(void) +{ + asm volatile (" \ + w1 = 46; \ + w2 = %[__imm_0]; \ + r2 -= r1; \ + r2 |= r1; \ + r2 = -r2; \ + r2 s>>= 63; \ + r1 &= r2; \ + r0 = r1; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 7") +__success __success_unpriv __retval(46) +__naked void masking_test_in_bounds_7(void) +{ + asm volatile (" \ + r3 = -46; \ + r3 *= -1; \ + w2 = %[__imm_0]; \ + r2 -= r3; \ + r2 |= r3; \ + r2 = -r2; \ + r2 s>>= 63; \ + r3 &= r2; \ + r0 = r3; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +SEC("socket") +__description("masking, test in bounds 8") +__success __success_unpriv __retval(0) +__naked void masking_test_in_bounds_8(void) +{ + asm volatile (" \ + r3 = -47; \ + r3 *= -1; \ + w2 = %[__imm_0]; \ + r2 -= r3; \ + r2 |= r3; \ + r2 = -r2; \ + r2 s>>= 63; \ + r3 &= r2; \ + r0 = r3; \ + exit; \ +" : + : __imm_const(__imm_0, 47 - 1) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/masking.c b/tools/testing/selftests/bpf/verifier/masking.c deleted file mode 100644 index 6e1358c544fd..000000000000 --- a/tools/testing/selftests/bpf/verifier/masking.c +++ /dev/null @@ -1,322 +0,0 @@ -{ - "masking, test out of bounds 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 5), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 3", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 4", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 5", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 6", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 9", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 10", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0xffffffff), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 11", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test out of bounds 12", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test in bounds 1", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 4), - BPF_MOV32_IMM(BPF_REG_2, 5 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 4, -}, -{ - "masking, test in bounds 2", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test in bounds 3", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xfffffffe), - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xfffffffe, -}, -{ - "masking, test in bounds 4", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0xabcde), - BPF_MOV32_IMM(BPF_REG_2, 0xabcdef - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xabcde, -}, -{ - "masking, test in bounds 5", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_MOV32_IMM(BPF_REG_2, 1 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "masking, test in bounds 6", - .insns = { - BPF_MOV32_IMM(BPF_REG_1, 46), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 46, -}, -{ - "masking, test in bounds 7", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -46), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 46, -}, -{ - "masking, test in bounds 8", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -47), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, -1), - BPF_MOV32_IMM(BPF_REG_2, 47 - 1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_3), - BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_2, 0), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_2, 63), - BPF_ALU64_REG(BPF_AND, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -- cgit v1.2.3-70-g09d2 From 65428312e38d896ce101ee3489403cff320d9b74 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:11 +0200 Subject: selftests/bpf: verifier/meta_access.c converted to inline assembly Test verifier/meta_access.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-31-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_meta_access.c | 284 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/meta_access.c | 235 ----------------- 3 files changed, 286 insertions(+), 235 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_meta_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/meta_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index b23fcbe4f83b..bd48a584a356 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -27,6 +27,7 @@ #include "verifier_map_ptr.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" +#include "verifier_meta_access.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -76,3 +77,4 @@ void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } +void test_verifier_meta_access(void) { RUN(verifier_meta_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_meta_access.c b/tools/testing/selftests/bpf/progs/verifier_meta_access.c new file mode 100644 index 000000000000..d81722fb5f19 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_meta_access.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("xdp") +__description("meta access, test1") +__success __retval(0) +__naked void meta_access_test1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test2") +__failure __msg("invalid access to packet, off=-8") +__naked void meta_access_test2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r2; \ + r0 -= 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test3") +__failure __msg("invalid access to packet") +__naked void meta_access_test3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test4") +__failure __msg("invalid access to packet") +__naked void meta_access_test4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r4 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r4; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test5") +__failure __msg("R3 !read_ok") +__naked void meta_access_test5(void) +{ + asm volatile (" \ + r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r4 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + if r0 > r4 goto l0_%=; \ + r2 = -8; \ + call %[bpf_xdp_adjust_meta]; \ + r0 = *(u8*)(r3 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_xdp_adjust_meta), + __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test6") +__failure __msg("invalid access to packet") +__naked void meta_access_test6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test7") +__success __retval(0) +__naked void meta_access_test7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r0 = r3; \ + r0 += 8; \ + r4 = r2; \ + r4 += 8; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test8") +__success __retval(0) +__naked void meta_access_test8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = r2; \ + r4 += 0xFFFF; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test9") +__failure __msg("invalid access to packet") +__naked void meta_access_test9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = r2; \ + r4 += 0xFFFF; \ + r4 += 1; \ + if r4 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test10") +__failure __msg("invalid access to packet") +__naked void meta_access_test10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r5 = 42; \ + r6 = 24; \ + *(u64*)(r10 - 8) = r5; \ + lock *(u64 *)(r10 - 8) += r6; \ + r5 = *(u64*)(r10 - 8); \ + if r5 > 100 goto l0_%=; \ + r3 += r5; \ + r5 = r3; \ + r6 = r2; \ + r6 += 8; \ + if r6 > r5 goto l0_%=; \ + r2 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test11") +__success __retval(0) +__naked void meta_access_test11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r5 = 42; \ + r6 = 24; \ + *(u64*)(r10 - 8) = r5; \ + lock *(u64 *)(r10 - 8) += r6; \ + r5 = *(u64*)(r10 - 8); \ + if r5 > 100 goto l0_%=; \ + r2 += r5; \ + r5 = r2; \ + r6 = r2; \ + r6 += 8; \ + if r6 > r3 goto l0_%=; \ + r5 = *(u8*)(r5 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("meta access, test12") +__success __retval(0) +__naked void meta_access_test12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r5 = r3; \ + r5 += 16; \ + if r5 > r4 goto l0_%=; \ + r0 = *(u8*)(r3 + 0); \ + r5 = r2; \ + r5 += 16; \ + if r5 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/meta_access.c b/tools/testing/selftests/bpf/verifier/meta_access.c deleted file mode 100644 index b45e8af41420..000000000000 --- a/tools/testing/selftests/bpf/verifier/meta_access.c +++ /dev/null @@ -1,235 +0,0 @@ -{ - "meta access, test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet, off=-8", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test4", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test5", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_4, 3), - BPF_MOV64_IMM(BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_xdp_adjust_meta), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R3 !read_ok", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test6", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test7", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test8", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test9", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xFFFF), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test10", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_IMM(BPF_REG_5, 42), - BPF_MOV64_IMM(BPF_REG_6, 24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_5, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test11", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_IMM(BPF_REG_5, 42), - BPF_MOV64_IMM(BPF_REG_6, 24), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_5, -8), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -8), - BPF_JMP_IMM(BPF_JGT, BPF_REG_5, 100, 6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_5), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_5, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "meta access, test12", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 5), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -- cgit v1.2.3-70-g09d2 From 5a77a01f3320562db51dfdd24b2921768637daf2 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:12 +0200 Subject: selftests/bpf: verifier/raw_stack.c converted to inline assembly Test verifier/raw_stack.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-32-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_raw_stack.c | 371 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/raw_stack.c | 305 ----------------- 3 files changed, 373 insertions(+), 305 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_raw_stack.c delete mode 100644 tools/testing/selftests/bpf/verifier/raw_stack.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index bd48a584a356..4a73cac3f9ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -28,6 +28,7 @@ #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" +#include "verifier_raw_stack.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -78,3 +79,4 @@ void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } +void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c new file mode 100644 index 000000000000..efbfc3a4ad6a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("tc") +__description("raw_stack: no skb_load_bytes") +__failure __msg("invalid read from stack R6 off=-8 size=8") +__naked void stack_no_skb_load_bytes(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 8; \ + /* Call to skb_load_bytes() omitted. */ \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, negative len") +__failure __msg("R4 min value is negative") +__naked void skb_load_bytes_negative_len(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = -8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, negative len 2") +__failure __msg("R4 min value is negative") +__naked void load_bytes_negative_len_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = %[__imm_0]; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__imm_0, ~0) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, zero len") +__failure __msg("invalid zero-sized read") +__naked void skb_load_bytes_zero_len(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 0; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, no init") +__success __retval(0) +__naked void skb_load_bytes_no_init(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, init") +__success __retval(0) +__naked void stack_skb_load_bytes_init(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + r3 = 0xcafe; \ + *(u64*)(r6 + 0) = r3; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs around bounds") +__success __retval(0) +__naked void bytes_spilled_regs_around_bounds(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs corruption") +__failure __msg("R0 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void load_bytes_spilled_regs_corruption(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs corruption 2") +__failure __msg("R3 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bytes_spilled_regs_corruption_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r3 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \ + r0 += r3; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, spilled regs + data") +__success __retval(0) +__naked void load_bytes_spilled_regs_data(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -16; \ + *(u64*)(r6 - 8) = r1; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 8) = r1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 - 8); \ + r2 = *(u64*)(r6 + 8); \ + r3 = *(u64*)(r6 + 0); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + r2 = *(u32*)(r2 + %[__sk_buff_priority]); \ + r0 += r2; \ + r0 += r3; \ + exit; \ +" : + : __imm(bpf_skb_load_bytes), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority)) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 1") +__failure __msg("invalid indirect access to stack R3 off=-513 size=8") +__naked void load_bytes_invalid_access_1(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -513; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 2") +__failure __msg("invalid indirect access to stack R3 off=-1 size=8") +__naked void load_bytes_invalid_access_2(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -1; \ + r3 = r6; \ + r4 = 8; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 3") +__failure __msg("R4 min value is negative") +__naked void load_bytes_invalid_access_3(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += 0xffffffff; \ + r3 = r6; \ + r4 = 0xffffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 4") +__failure +__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'") +__naked void load_bytes_invalid_access_4(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -1; \ + r3 = r6; \ + r4 = 0x7fffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 5") +__failure +__msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'") +__naked void load_bytes_invalid_access_5(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 0x7fffffff; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, invalid access 6") +__failure __msg("invalid zero-sized read") +__naked void load_bytes_invalid_access_6(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 0; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +SEC("tc") +__description("raw_stack: skb_load_bytes, large access") +__success __retval(0) +__naked void skb_load_bytes_large_access(void) +{ + asm volatile (" \ + r2 = 4; \ + r6 = r10; \ + r6 += -512; \ + r3 = r6; \ + r4 = 512; \ + call %[bpf_skb_load_bytes]; \ + r0 = *(u64*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_skb_load_bytes) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c deleted file mode 100644 index eb5ed936580b..000000000000 --- a/tools/testing/selftests/bpf/verifier/raw_stack.c +++ /dev/null @@ -1,305 +0,0 @@ -{ - "raw_stack: no skb_load_bytes", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - /* Call to skb_load_bytes() omitted. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid read from stack R6 off=-8 size=8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, negative len", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, negative len 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, ~0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, zero len", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid zero-sized read", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, no init", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, init", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, spilled regs around bounds", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, spilled regs corruption", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R0 invalid mem access 'scalar'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "raw_stack: skb_load_bytes, spilled regs corruption 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3, - offsetof(struct __sk_buff, pkt_type)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R3 invalid mem access 'scalar'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "raw_stack: skb_load_bytes, spilled regs + data", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, - offsetof(struct __sk_buff, priority)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid indirect access to stack R3 off=-513 size=8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid indirect access to stack R3 off=-1 size=8", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 min value is negative", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 5", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, invalid access 6", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid zero-sized read", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "raw_stack: skb_load_bytes, large access", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_4, 512), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_skb_load_bytes), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -- cgit v1.2.3-70-g09d2 From 18cdc2b531fbe13450c51132a5b511c670c77585 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:13 +0200 Subject: selftests/bpf: verifier/raw_tp_writable.c converted to inline assembly Test verifier/raw_tp_writable.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-33-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_raw_tp_writable.c | 50 ++++++++++++++++++++++ .../selftests/bpf/verifier/raw_tp_writable.c | 35 --------------- 3 files changed, 52 insertions(+), 35 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c delete mode 100644 tools/testing/selftests/bpf/verifier/raw_tp_writable.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 4a73cac3f9ba..f7488904f26e 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -29,6 +29,7 @@ #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" #include "verifier_raw_stack.skel.h" +#include "verifier_raw_tp_writable.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -80,3 +81,4 @@ void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } +void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c new file mode 100644 index 000000000000..14a0172e2141 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/raw_tp_writable.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("raw_tracepoint.w") +__description("raw_tracepoint_writable: reject variable offset") +__failure +__msg("R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void tracepoint_writable_reject_variable_offset(void) +{ + asm volatile (" \ + /* r6 is our tp buffer */ \ + r6 = *(u64*)(r1 + 0); \ + r1 = %[map_hash_8b] ll; \ + /* move the key (== 0) to r10-8 */ \ + w0 = 0; \ + r2 = r10; \ + r2 += -8; \ + *(u64*)(r2 + 0) = r0; \ + /* lookup in the map */ \ + call %[bpf_map_lookup_elem]; \ + /* exit clean if null */ \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* shift the buffer pointer to a variable location */\ + r0 = *(u32*)(r0 + 0); \ + r6 += r0; \ + /* clobber whatever's there */ \ + r7 = 4242; \ + *(u64*)(r6 + 0) = r7; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/raw_tp_writable.c b/tools/testing/selftests/bpf/verifier/raw_tp_writable.c deleted file mode 100644 index 2978fb5a769d..000000000000 --- a/tools/testing/selftests/bpf/verifier/raw_tp_writable.c +++ /dev/null @@ -1,35 +0,0 @@ -{ - "raw_tracepoint_writable: reject variable offset", - .insns = { - /* r6 is our tp buffer */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - - BPF_LD_MAP_FD(BPF_REG_1, 0), - /* move the key (== 0) to r10-8 */ - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - /* lookup in the map */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - - /* exit clean if null */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - - /* shift the buffer pointer to a variable location */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_0), - /* clobber whatever's there */ - BPF_MOV64_IMM(BPF_REG_7, 4242), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, 0), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1, }, - .prog_type = BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, - .errstr = "R6 invalid variable buffer offset: off=0, var_off=(0x0; 0xffffffff)", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -- cgit v1.2.3-70-g09d2 From b7e4203086eb4d85aa7bccd7f33c2835b34778d6 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:14 +0200 Subject: selftests/bpf: verifier/ringbuf.c converted to inline assembly Test verifier/ringbuf.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-34-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_ringbuf.c | 131 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/ringbuf.c | 95 --------------- 3 files changed, 133 insertions(+), 95 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_ringbuf.c delete mode 100644 tools/testing/selftests/bpf/verifier/ringbuf.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index f7488904f26e..df5fc6fe1647 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -30,6 +30,7 @@ #include "verifier_meta_access.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" +#include "verifier_ringbuf.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -82,3 +83,4 @@ void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } +void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ringbuf.c b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c new file mode 100644 index 000000000000..ae1d521f326c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ringbuf.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("socket") +__description("ringbuf: invalid reservation offset 1") +__failure __msg("R1 must have zero offset when passed to release func") +__failure_unpriv +__naked void ringbuf_invalid_reservation_offset_1(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + /* add invalid offset to reserved ringbuf memory */\ + r1 += 0xcafe; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("ringbuf: invalid reservation offset 2") +__failure __msg("R7 min value is outside of the allowed memory range") +__failure_unpriv +__naked void ringbuf_invalid_reservation_offset_2(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* add invalid offset to reserved ringbuf memory */\ + r7 += 0xcafe; \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("xdp") +__description("ringbuf: check passing rb mem to helpers") +__success __retval(0) +__naked void passing_rb_mem_to_helpers(void) +{ + asm volatile (" \ + r6 = r1; \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + r7 = r0; \ + /* check whether the reservation was successful */\ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* pass allocated ring buffer memory to fib lookup */\ + r1 = r6; \ + r2 = r0; \ + r3 = 8; \ + r4 = 0; \ + call %[bpf_fib_lookup]; \ + /* submit the ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_fib_lookup), + __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/ringbuf.c b/tools/testing/selftests/bpf/verifier/ringbuf.c deleted file mode 100644 index 92e3f6a61a79..000000000000 --- a/tools/testing/selftests/bpf/verifier/ringbuf.c +++ /dev/null @@ -1,95 +0,0 @@ -{ - "ringbuf: invalid reservation offset 1", - .insns = { - /* reserve 8 byte ringbuf memory */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), - /* store a pointer to the reserved memory in R6 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* check whether the reservation was successful */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - /* spill R6(mem) into the stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - /* fill it back in R7 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), - /* should be able to access *(R7) = 0 */ - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), - /* submit the reserved ringbuf memory */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - /* add invalid offset to reserved ringbuf memory */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xcafe), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 1 }, - .result = REJECT, - .errstr = "R1 must have zero offset when passed to release func", -}, -{ - "ringbuf: invalid reservation offset 2", - .insns = { - /* reserve 8 byte ringbuf memory */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), - /* store a pointer to the reserved memory in R6 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* check whether the reservation was successful */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - /* spill R6(mem) into the stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - /* fill it back in R7 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), - /* add invalid offset to reserved ringbuf memory */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xcafe), - /* should be able to access *(R7) = 0 */ - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), - /* submit the reserved ringbuf memory */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 1 }, - .result = REJECT, - .errstr = "R7 min value is outside of the allowed memory range", -}, -{ - "ringbuf: check passing rb mem to helpers", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - /* reserve 8 byte ringbuf memory */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* check whether the reservation was successful */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - /* pass allocated ring buffer memory to fib lookup */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_fib_lookup), - /* submit the ringbuf memory */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 2 }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From f4fe3cfe6c3ac60a6ec086a291b1c0f59e7daaa2 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:15 +0200 Subject: selftests/bpf: verifier/spill_fill.c converted to inline assembly Test verifier/spill_fill.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-35-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_spill_fill.c | 374 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/spill_fill.c | 345 ------------------- 3 files changed, 376 insertions(+), 345 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_spill_fill.c delete mode 100644 tools/testing/selftests/bpf/verifier/spill_fill.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index df5fc6fe1647..e2b131d2ba94 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -31,6 +31,7 @@ #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_ringbuf.skel.h" +#include "verifier_spill_fill.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -84,3 +85,4 @@ void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } +void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c new file mode 100644 index 000000000000..136e5530b72c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +SEC("socket") +__description("check valid spill/fill") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(POINTER_VALUE) +__naked void check_valid_spill_fill(void) +{ + asm volatile (" \ + /* spill R1(ctx) into stack */ \ + *(u64*)(r10 - 8) = r1; \ + /* fill it back into R2 */ \ + r2 = *(u64*)(r10 - 8); \ + /* should be able to access R0 = *(R2 + 8) */ \ + /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check valid spill/fill, skb mark") +__success __success_unpriv __retval(0) +__naked void valid_spill_fill_skb_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + *(u64*)(r10 - 8) = r6; \ + r0 = *(u64*)(r10 - 8); \ + r0 = *(u32*)(r0 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("check valid spill/fill, ptr to mem") +__success __success_unpriv __retval(0) +__naked void spill_fill_ptr_to_mem(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* spill R6(mem) into the stack */ \ + *(u64*)(r10 - 8) = r6; \ + /* fill it back in R7 */ \ + r7 = *(u64*)(r10 - 8); \ + /* should be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u64*)(r7 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r7; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("check with invalid reg offset 0") +__failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited") +__failure_unpriv +__naked void with_invalid_reg_offset_0(void) +{ + asm volatile (" \ + /* reserve 8 byte ringbuf memory */ \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + /* store a pointer to the reserved memory in R6 */\ + r6 = r0; \ + /* add invalid offset to memory or NULL */ \ + r0 += 1; \ + /* check whether the reservation was successful */\ + if r0 == 0 goto l0_%=; \ + /* should not be able to access *(R7) = 0 */ \ + r1 = 0; \ + *(u32*)(r6 + 0) = r1; \ + /* submit the reserved ringbuf memory */ \ + r1 = r6; \ + r2 = 0; \ + call %[bpf_ringbuf_submit]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ringbuf_reserve), + __imm(bpf_ringbuf_submit), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("attempt to corrupt spilled") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void check_corrupted_spill_fill(void) +{ + asm volatile (" \ + /* spill R1(ctx) into stack */ \ + *(u64*)(r10 - 8) = r1; \ + /* mess up with R1 pointer on stack */ \ + r0 = 0x23; \ + *(u8*)(r10 - 7) = r0; \ + /* fill back into R0 is fine for priv. \ + * R0 now becomes SCALAR_VALUE. \ + */ \ + r0 = *(u64*)(r10 - 8); \ + /* Load from R0 should fail. */ \ + r0 = *(u64*)(r0 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill, LSB") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(POINTER_VALUE) +__naked void check_corrupted_spill_fill_lsb(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r1; \ + r0 = 0xcafe; \ + *(u16*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("check corrupted spill/fill, MSB") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(POINTER_VALUE) +__naked void check_corrupted_spill_fill_msb(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r1; \ + r0 = 0x12345678; \ + *(u32*)(r10 - 4) = r0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("Spill and refill a u32 const scalar. Offset to skb->data") +__success __retval(0) +__naked void scalar_offset_to_skb_data_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u32*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("socket") +__description("Spill a u32 const, refill from another half of the uninit u32 from the stack") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -4+0 size 4") +__retval(0) +__naked void uninit_u32_from_the_stack(void) +{ + asm volatile (" \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \ + r4 = *(u32*)(r10 - 4); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void u16_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u16*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill u32 const scalars. Refill as u64. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void u64_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w6 = 0; \ + w7 = 20; \ + *(u32*)(r10 - 4) = r6; \ + *(u32*)(r10 - 8) = r7; \ + r4 = *(u16*)(r10 - 8); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void _6_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u16*)(r10 - 6); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data") +__failure __msg("invalid access to packet") +__naked void addr_offset_to_skb_data(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + w4 = 20; \ + *(u32*)(r10 - 8) = r4; \ + *(u32*)(r10 - 4) = r4; \ + r4 = *(u32*)(r10 - 4); \ + r0 = r2; \ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\ + r0 += r4; \ + /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ + if r0 > r3 goto l0_%=; \ + /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\ + r0 = *(u32*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("Spill and refill a umax=40 bounded scalar. Offset to skb->data") +__success __retval(0) +__naked void scalar_offset_to_skb_data_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \ + if r4 <= 40 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \ + *(u32*)(r10 - 8) = r4; \ + /* r4 = (*u32 *)(r10 - 8) */ \ + r4 = *(u32*)(r10 - 8); \ + /* r2 += r4 R2=pkt R4=umax=40 */ \ + r2 += r4; \ + /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \ + r0 = r2; \ + /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \ + r2 += 20; \ + /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\ + if r2 > r3 goto l1_%=; \ + /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\ + r0 = *(u32*)(r0 + 0); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp)) + : __clobber_all); +} + +SEC("tc") +__description("Spill a u32 scalar at fp-4 and then at fp-8") +__success __retval(0) +__naked void and_then_at_fp_8(void) +{ + asm volatile (" \ + w4 = 4321; \ + *(u32*)(r10 - 4) = r4; \ + *(u32*)(r10 - 8) = r4; \ + r4 = *(u64*)(r10 - 8); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c deleted file mode 100644 index d1463bf4949a..000000000000 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ /dev/null @@ -1,345 +0,0 @@ -{ - "check valid spill/fill", - .insns = { - /* spill R1(ctx) into stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - /* fill it back into R2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - /* should be able to access R0 = *(R2 + 8) */ - /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .retval = POINTER_VALUE, -}, -{ - "check valid spill/fill, skb mark", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = ACCEPT, -}, -{ - "check valid spill/fill, ptr to mem", - .insns = { - /* reserve 8 byte ringbuf memory */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), - /* store a pointer to the reserved memory in R6 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* check whether the reservation was successful */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* spill R6(mem) into the stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - /* fill it back in R7 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8), - /* should be able to access *(R7) = 0 */ - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0), - /* submit the reserved ringbuf memory */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 1 }, - .result = ACCEPT, - .result_unpriv = ACCEPT, -}, -{ - "check with invalid reg offset 0", - .insns = { - /* reserve 8 byte ringbuf memory */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), - /* store a pointer to the reserved memory in R6 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* add invalid offset to memory or NULL */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - /* check whether the reservation was successful */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* should not be able to access *(R7) = 0 */ - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0), - /* submit the reserved ringbuf memory */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_ringbuf = { 1 }, - .result = REJECT, - .errstr = "R0 pointer arithmetic on ringbuf_mem_or_null prohibited", -}, -{ - "check corrupted spill/fill", - .insns = { - /* spill R1(ctx) into stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - /* mess up with R1 pointer on stack */ - BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23), - /* fill back into R0 is fine for priv. - * R0 now becomes SCALAR_VALUE. - */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - /* Load from R0 should fail. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .errstr = "R0 invalid mem access 'scalar'", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "check corrupted spill/fill, LSB", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = POINTER_VALUE, -}, -{ - "check corrupted spill/fill, MSB", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = POINTER_VALUE, -}, -{ - "Spill and refill a u32 const scalar. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - /* r4 = 20 */ - BPF_MOV32_IMM(BPF_REG_4, 20), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = *(u32 *)(r10 -8) */ - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), - /* r0 = r2 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill a u32 const, refill from another half of the uninit u32 from the stack", - .insns = { - /* r4 = 20 */ - BPF_MOV32_IMM(BPF_REG_4, 20), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid read from stack off -4+0 size 4", - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "Spill a u32 const scalar. Refill as u16. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - /* r4 = 20 */ - BPF_MOV32_IMM(BPF_REG_4, 20), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = *(u16 *)(r10 -8) */ - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), - /* r0 = r2 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill u32 const scalars. Refill as u64. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - /* r6 = 0 */ - BPF_MOV32_IMM(BPF_REG_6, 0), - /* r7 = 20 */ - BPF_MOV32_IMM(BPF_REG_7, 20), - /* *(u32 *)(r10 -4) = r6 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), - /* *(u32 *)(r10 -8) = r7 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), - /* r4 = *(u64 *)(r10 -8) */ - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), - /* r0 = r2 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - /* r4 = 20 */ - BPF_MOV32_IMM(BPF_REG_4, 20), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = *(u16 *)(r10 -6) */ - BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6), - /* r0 = r2 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - /* r4 = 20 */ - BPF_MOV32_IMM(BPF_REG_4, 20), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* *(u32 *)(r10 -4) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), - /* r4 = *(u32 *)(r10 -4), */ - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), - /* r0 = r2 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill and refill a umax=40 bounded scalar. Offset to skb->data", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, tstamp)), - BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = (*u32 *)(r10 - 8) */ - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), - /* r2 += r4 R2=pkt R4=umax=40 */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4), - /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20), - /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1), - /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "Spill a u32 scalar at fp-4 and then at fp-8", - .insns = { - /* r4 = 4321 */ - BPF_MOV32_IMM(BPF_REG_4, 4321), - /* *(u32 *)(r10 -4) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4), - /* *(u32 *)(r10 -8) = r4 */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), - /* r4 = *(u64 *)(r10 -8) */ - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -- cgit v1.2.3-70-g09d2 From edff37b2f28f5c24f628981d0f26ca2fcd7e2ad5 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:16 +0200 Subject: selftests/bpf: verifier/stack_ptr.c converted to inline assembly Test verifier/stack_ptr.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-36-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_stack_ptr.c | 484 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/stack_ptr.c | 359 --------------- 3 files changed, 486 insertions(+), 359 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_stack_ptr.c delete mode 100644 tools/testing/selftests/bpf/verifier/stack_ptr.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index e2b131d2ba94..ce1ca8c0c02e 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -32,6 +32,7 @@ #include "verifier_raw_tp_writable.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_spill_fill.skel.h" +#include "verifier_stack_ptr.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -86,3 +87,4 @@ void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } +void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c new file mode 100644 index 000000000000..e0f77e3e7869 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_stack_ptr.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/stack_ptr.c */ + +#include +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +SEC("socket") +__description("PTR_TO_STACK store/load") +__success __success_unpriv __retval(0xfaceb00c) +__naked void ptr_to_stack_store_load(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -10; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 2) = r0; \ + r0 = *(u64*)(r1 + 2); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - bad alignment on off") +__failure __msg("misaligned stack access off (0x0; 0x0)+-8+2 size 8") +__failure_unpriv +__naked void load_bad_alignment_on_off(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 2) = r0; \ + r0 = *(u64*)(r1 + 2); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - bad alignment on reg") +__failure __msg("misaligned stack access off (0x0; 0x0)+-10+8 size 8") +__failure_unpriv +__naked void load_bad_alignment_on_reg(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -10; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - out of bounds low") +__failure __msg("invalid write to stack R1 off=-79992 size=8") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void load_out_of_bounds_low(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -80000; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK store/load - out of bounds high") +__failure __msg("invalid write to stack R1 off=0 size=8") +__failure_unpriv +__naked void load_out_of_bounds_high(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -8; \ + r0 = 0xfaceb00c; \ + *(u64*)(r1 + 8) = r0; \ + r0 = *(u64*)(r1 + 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 1") +__success __success_unpriv __retval(42) +__naked void to_stack_check_high_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -1; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 2") +__success __success_unpriv __retval(42) +__naked void to_stack_check_high_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r0 = 42; \ + *(u8*)(r1 - 1) = r0; \ + r0 = *(u8*)(r1 - 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 3") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(42) +__naked void to_stack_check_high_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0; \ + r0 = 42; \ + *(u8*)(r1 - 1) = r0; \ + r0 = *(u8*)(r1 - 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 4") +__failure __msg("invalid write to stack R1 off=0 size=1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_4(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += 0; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 5") +__failure __msg("invalid write to stack R1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_5(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 6") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_6(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + %[shrt_max]) = r0; \ + r0 = *(u8*)(r1 + %[shrt_max]); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(shrt_max, SHRT_MAX) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check high 7") +__failure __msg("fp pointer offset") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_high_7(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + %[shrt_max]) = r0; \ + r0 = *(u8*)(r1 + %[shrt_max]); \ + exit; \ +" : + : __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(shrt_max, SHRT_MAX) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 1") +__success __success_unpriv __retval(42) +__naked void to_stack_check_low_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -512; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 2") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(42) +__naked void to_stack_check_low_2(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -513; \ + r0 = 42; \ + *(u8*)(r1 + 1) = r0; \ + r0 = *(u8*)(r1 + 1); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 3") +__failure __msg("invalid write to stack R1 off=-513 size=1") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -513; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 4") +__failure __msg("math between fp pointer") +__failure_unpriv +__naked void to_stack_check_low_4(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[int_min]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(int_min, INT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 5") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_5(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 6") +__failure __msg("invalid write to stack") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_6(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 %[shrt_min]) = r0; \ + r0 = *(u8*)(r1 %[shrt_min]); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)), + __imm_const(shrt_min, SHRT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK check low 7") +__failure __msg("fp pointer offset") +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__naked void to_stack_check_low_7(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += %[__imm_0]; \ + r1 += %[__imm_0]; \ + r0 = 42; \ + *(u8*)(r1 %[shrt_min]) = r0; \ + r0 = *(u8*)(r1 %[shrt_min]); \ + exit; \ +" : + : __imm_const(__imm_0, -((1 << 29) - 1)), + __imm_const(shrt_min, SHRT_MIN) + : __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 1") +__success __success_unpriv __retval(42) +__naked void stack_mixed_reg_k_1(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 2") +__success __success_unpriv __retval(42) +__naked void stack_mixed_reg_k_2(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + r0 = 0; \ + *(u64*)(r10 - 16) = r0; \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r5 = r10; \ + r0 = *(u8*)(r5 - 6); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK mixed reg/k, 3") +__success __success_unpriv __retval(-3) +__naked void stack_mixed_reg_k_3(void) +{ + asm volatile (" \ + r1 = r10; \ + r1 += -3; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("PTR_TO_STACK reg") +__success __success_unpriv __retval(42) +__naked void ptr_to_stack_reg(void) +{ + asm volatile (" \ + r1 = r10; \ + r2 = -3; \ + r1 += r2; \ + r0 = 42; \ + *(u8*)(r1 + 0) = r0; \ + r0 = *(u8*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("stack pointer arithmetic") +__success __success_unpriv __retval(0) +__naked void stack_pointer_arithmetic(void) +{ + asm volatile (" \ + r1 = 4; \ + goto l0_%=; \ +l0_%=: r7 = r10; \ + r7 += -10; \ + r7 += -10; \ + r2 = r7; \ + r2 += r1; \ + r0 = 0; \ + *(u32*)(r2 + 4) = r0; \ + r2 = r7; \ + r2 += 8; \ + r0 = 0; \ + *(u32*)(r2 + 4) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("store PTR_TO_STACK in R10 to array map using BPF_B") +__success __retval(42) +__naked void array_map_using_bpf_b(void) +{ + asm volatile (" \ + /* Load pointer to map. */ \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = 2; \ + exit; \ +l0_%=: r1 = r0; \ + /* Copy R10 to R9. */ \ + r9 = r10; \ + /* Pollute other registers with unaligned values. */\ + r2 = -1; \ + r3 = -1; \ + r4 = -1; \ + r5 = -1; \ + r6 = -1; \ + r7 = -1; \ + r8 = -1; \ + /* Store both R9 and R10 with BPF_B and read back. */\ + *(u8*)(r1 + 0) = r10; \ + r2 = *(u8*)(r1 + 0); \ + *(u8*)(r1 + 0) = r9; \ + r3 = *(u8*)(r1 + 0); \ + /* Should read back as same value. */ \ + if r2 == r3 goto l1_%=; \ + r0 = 1; \ + exit; \ +l1_%=: r0 = 42; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c deleted file mode 100644 index 8ab94d65f3d5..000000000000 --- a/tools/testing/selftests/bpf/verifier/stack_ptr.c +++ /dev/null @@ -1,359 +0,0 @@ -{ - "PTR_TO_STACK store/load", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0xfaceb00c, -}, -{ - "PTR_TO_STACK store/load - bad alignment on off", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", -}, -{ - "PTR_TO_STACK store/load - bad alignment on reg", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", -}, -{ - "PTR_TO_STACK store/load - out of bounds low", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid write to stack R1 off=-79992 size=8", - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", -}, -{ - "PTR_TO_STACK store/load - out of bounds high", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid write to stack R1 off=0 size=8", -}, -{ - "PTR_TO_STACK check high 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK check high 2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK check high 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ST_MEM(BPF_B, BPF_REG_1, -1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK check high 4", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid write to stack R1 off=0 size=1", - .result = REJECT, -}, -{ - "PTR_TO_STACK check high 5", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid write to stack R1", -}, -{ - "PTR_TO_STACK check high 6", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid write to stack", -}, -{ - "PTR_TO_STACK check high 7", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, (1 << 29) - 1), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MAX, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MAX), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "fp pointer offset", -}, -{ - "PTR_TO_STACK check low 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -512), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK check low 2", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), - BPF_ST_MEM(BPF_B, BPF_REG_1, 1, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 1), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK check low 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -513), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid write to stack R1 off=-513 size=1", - .result = REJECT, -}, -{ - "PTR_TO_STACK check low 4", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, INT_MIN), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "math between fp pointer", -}, -{ - "PTR_TO_STACK check low 5", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "invalid write to stack", -}, -{ - "PTR_TO_STACK check low 6", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid write to stack", - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", -}, -{ - "PTR_TO_STACK check low 7", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -((1 << 29) - 1)), - BPF_ST_MEM(BPF_B, BPF_REG_1, SHRT_MIN, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, SHRT_MIN), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .errstr = "fp pointer offset", -}, -{ - "PTR_TO_STACK mixed reg/k, 1", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK mixed reg/k, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_5, -6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "PTR_TO_STACK mixed reg/k, 3", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -3, -}, -{ - "PTR_TO_STACK reg", - .insns = { - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_2, -3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 42), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 42, -}, -{ - "stack pointer arithmetic", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -10), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ST_MEM(0, BPF_REG_2, 4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_ST_MEM(0, BPF_REG_2, 4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "store PTR_TO_STACK in R10 to array map using BPF_B", - .insns = { - /* Load pointer to map. */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - /* Copy R10 to R9. */ - BPF_MOV64_REG(BPF_REG_9, BPF_REG_10), - /* Pollute other registers with unaligned values. */ - BPF_MOV64_IMM(BPF_REG_2, -1), - BPF_MOV64_IMM(BPF_REG_3, -1), - BPF_MOV64_IMM(BPF_REG_4, -1), - BPF_MOV64_IMM(BPF_REG_5, -1), - BPF_MOV64_IMM(BPF_REG_6, -1), - BPF_MOV64_IMM(BPF_REG_7, -1), - BPF_MOV64_IMM(BPF_REG_8, -1), - /* Store both R9 and R10 with BPF_B and read back. */ - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_1, 0), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_9, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_1, 0), - /* Should read back as same value. */ - BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 42, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -- cgit v1.2.3-70-g09d2 From ab839a58194633bd1c5ff42e6a9de87a746a67aa Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:17 +0200 Subject: selftests/bpf: verifier/uninit.c converted to inline assembly Test verifier/uninit.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-37-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_uninit.c | 61 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/uninit.c | 39 -------------- 3 files changed, 63 insertions(+), 39 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_uninit.c delete mode 100644 tools/testing/selftests/bpf/verifier/uninit.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index ce1ca8c0c02e..c6e69b3827dc 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -33,6 +33,7 @@ #include "verifier_ringbuf.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_uninit.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -88,3 +89,4 @@ void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_uninit(void) { RUN(verifier_uninit); } diff --git a/tools/testing/selftests/bpf/progs/verifier_uninit.c b/tools/testing/selftests/bpf/progs/verifier_uninit.c new file mode 100644 index 000000000000..7718cd7d19ce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_uninit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/uninit.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +SEC("socket") +__description("read uninitialized register") +__failure __msg("R2 !read_ok") +__failure_unpriv +__naked void read_uninitialized_register(void) +{ + asm volatile (" \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("read invalid register") +__failure __msg("R15 is invalid") +__failure_unpriv +__naked void read_invalid_register(void) +{ + asm volatile (" \ + .8byte %[mov64_reg]; \ + exit; \ +" : + : __imm_insn(mov64_reg, BPF_MOV64_REG(BPF_REG_0, -1)) + : __clobber_all); +} + +SEC("socket") +__description("program doesn't init R0 before exit") +__failure __msg("R0 !read_ok") +__failure_unpriv +__naked void t_init_r0_before_exit(void) +{ + asm volatile (" \ + r2 = r1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("program doesn't init R0 before exit in all branches") +__failure __msg("R0 !read_ok") +__msg_unpriv("R1 pointer comparison") +__naked void before_exit_in_all_branches(void) +{ + asm volatile (" \ + if r1 >= 0 goto l0_%=; \ + r0 = 1; \ + r0 += 2; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/uninit.c b/tools/testing/selftests/bpf/verifier/uninit.c deleted file mode 100644 index 987a5871ff1d..000000000000 --- a/tools/testing/selftests/bpf/verifier/uninit.c +++ /dev/null @@ -1,39 +0,0 @@ -{ - "read uninitialized register", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .errstr = "R2 !read_ok", - .result = REJECT, -}, -{ - "read invalid register", - .insns = { - BPF_MOV64_REG(BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr = "R15 is invalid", - .result = REJECT, -}, -{ - "program doesn't init R0 before exit", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, -}, -{ - "program doesn't init R0 before exit in all branches", - .insns = { - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .errstr_unpriv = "R1 pointer comparison", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From 033914942da4696dcd9009ba88bc1bba06706549 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:18 +0200 Subject: selftests/bpf: verifier/value_adj_spill.c converted to inline assembly Test verifier/value_adj_spill.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-38-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_value_adj_spill.c | 78 ++++++++++++++++++++++ .../selftests/bpf/verifier/value_adj_spill.c | 43 ------------ 3 files changed, 80 insertions(+), 43 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c delete mode 100644 tools/testing/selftests/bpf/verifier/value_adj_spill.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c6e69b3827dc..825c8583fecf 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -34,6 +34,7 @@ #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" +#include "verifier_value_adj_spill.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -90,3 +91,4 @@ void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } +void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } diff --git a/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c new file mode 100644 index 000000000000..d7a5ba9bbe6a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_adj_spill.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value is preserved across register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void is_preserved_across_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = r10; \ + r1 += -184; \ + *(u64*)(r1 + 0) = r0; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value or null is marked on register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void is_marked_on_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = r10; \ + r1 += -152; \ + *(u64*)(r1 + 0) = r0; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/value_adj_spill.c b/tools/testing/selftests/bpf/verifier/value_adj_spill.c deleted file mode 100644 index 7135e8021b81..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_adj_spill.c +++ /dev/null @@ -1,43 +0,0 @@ -{ - "map element value is preserved across register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, -}, -{ - "map element value or null is marked on register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, -}, -- cgit v1.2.3-70-g09d2 From 8f59e87a3bc6a5618f0ed459f1d36838c16bcad7 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:19 +0200 Subject: selftests/bpf: verifier/value.c converted to inline assembly Test verifier/value.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-39-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_value.c | 158 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/value.c | 104 -------------- 3 files changed, 160 insertions(+), 104 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_value.c delete mode 100644 tools/testing/selftests/bpf/verifier/value.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 825c8583fecf..c77df746d650 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -35,6 +35,7 @@ #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" #include "verifier_value_adj_spill.skel.h" +#include "verifier_value.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -92,3 +93,4 @@ void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } +void test_verifier_value(void) { RUN(verifier_value); } diff --git a/tools/testing/selftests/bpf/progs/verifier_value.c b/tools/testing/selftests/bpf/progs/verifier_value.c new file mode 100644 index 000000000000..b5af6b6f5acd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value store of cleared call register") +__failure __msg("R1 !read_ok") +__failure_unpriv __msg_unpriv("R1 !read_ok") +__naked void store_of_cleared_call_register(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value with unaligned store") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void element_value_with_unaligned_store(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += 3; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = 43; \ + *(u64*)(r0 + 2) = r1; \ + r1 = 44; \ + *(u64*)(r0 - 2) = r1; \ + r8 = r0; \ + r1 = 32; \ + *(u64*)(r8 + 0) = r1; \ + r1 = 33; \ + *(u64*)(r8 + 2) = r1; \ + r1 = 34; \ + *(u64*)(r8 - 2) = r1; \ + r8 += 5; \ + r1 = 22; \ + *(u64*)(r8 + 0) = r1; \ + r1 = 23; \ + *(u64*)(r8 + 4) = r1; \ + r1 = 24; \ + *(u64*)(r8 - 7) = r1; \ + r7 = r8; \ + r7 += 3; \ + r1 = 22; \ + *(u64*)(r7 + 0) = r1; \ + r1 = 23; \ + *(u64*)(r7 + 4) = r1; \ + r1 = 24; \ + *(u64*)(r7 - 4) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value with unaligned load") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void element_value_with_unaligned_load(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[max_entries] goto l0_%=; \ + r0 += 3; \ + r7 = *(u64*)(r0 + 0); \ + r7 = *(u64*)(r0 + 2); \ + r8 = r0; \ + r7 = *(u64*)(r8 + 0); \ + r7 = *(u64*)(r8 + 2); \ + r0 += 5; \ + r7 = *(u64*)(r0 + 0); \ + r7 = *(u64*)(r0 + 4); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES) + : __clobber_all); +} + +SEC("socket") +__description("map element value is preserved across register spilling") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void is_preserved_across_register_spilling(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += %[test_val_foo]; \ + r1 = 42; \ + *(u64*)(r0 + 0) = r1; \ + r1 = r10; \ + r1 += -184; \ + *(u64*)(r1 + 0) = r0; \ + r3 = *(u64*)(r1 + 0); \ + r1 = 42; \ + *(u64*)(r3 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/value.c b/tools/testing/selftests/bpf/verifier/value.c deleted file mode 100644 index 0e42592b1218..000000000000 --- a/tools/testing/selftests/bpf/verifier/value.c +++ /dev/null @@ -1,104 +0,0 @@ -{ - "map element value store of cleared call register", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R1 !read_ok", - .errstr = "R1 !read_ok", - .result = REJECT, - .result_unpriv = REJECT, -}, -{ - "map element value with unaligned store", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43), - BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33), - BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22), - BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23), - BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3), - BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22), - BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23), - BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map element value with unaligned load", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map element value is preserved across register spilling", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, offsetof(struct test_val, foo)), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result = ACCEPT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -- cgit v1.2.3-70-g09d2 From d330528617b78ae893c405bf06c6c604bd2e6357 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:20 +0200 Subject: selftests/bpf: verifier/value_or_null.c converted to inline assembly Test verifier/value_or_null.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-40-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_value_or_null.c | 288 +++++++++++++++++++++ .../testing/selftests/bpf/verifier/value_or_null.c | 220 ---------------- 3 files changed, 290 insertions(+), 220 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_value_or_null.c delete mode 100644 tools/testing/selftests/bpf/verifier/value_or_null.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c77df746d650..54eb21ef9fad 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -36,6 +36,7 @@ #include "verifier_uninit.skel.h" #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" +#include "verifier_value_or_null.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -94,3 +95,4 @@ void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } +void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } diff --git a/tools/testing/selftests/bpf/progs/verifier_value_or_null.c b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c new file mode 100644 index 000000000000..8ff668a242eb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_or_null.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_or_null.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tc") +__description("multiple registers share map_lookup_elem result") +__success __retval(0) +__naked void share_map_lookup_elem_result(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 1") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_1(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 += -2; \ + r4 += 2; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 2") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_2(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 &= -1; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("alu ops on ptr_to_map_value_or_null, 3") +__failure __msg("R4 pointer arithmetic on map_value_or_null") +__naked void map_value_or_null_3(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r4 <<= 1; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("invalid memory access with multiple map_lookup_elem calls") +__failure __msg("R4 !read_ok") +__naked void multiple_map_lookup_elem_calls(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r8 = r1; \ + r7 = r2; \ + call %[bpf_map_lookup_elem]; \ + r4 = r0; \ + r1 = r8; \ + r2 = r7; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("valid indirect map_lookup_elem access with 2nd lookup in branch") +__success __retval(0) +__naked void with_2nd_lookup_in_branch(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r8 = r1; \ + r7 = r2; \ + call %[bpf_map_lookup_elem]; \ + r2 = 10; \ + if r2 != 0 goto l0_%=; \ + r1 = r8; \ + r2 = r7; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r4 = r0; \ + if r0 == 0 goto l1_%=; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("invalid map access from else condition") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void map_access_from_else_condition(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 >= %[__imm_0] goto l1_%=; \ + r1 += 1; \ +l1_%=: r1 <<= 2; \ + r0 += r1; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, MAX_ENTRIES-1), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tc") +__description("map lookup and null branch prediction") +__success __retval(0) +__naked void lookup_and_null_branch_prediction(void) +{ + asm volatile (" \ + r1 = 10; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r6 = r0; \ + if r6 == 0 goto l0_%=; \ + if r6 != 0 goto l0_%=; \ + r10 += 10; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("MAP_VALUE_OR_NULL check_ids() in regsafe()") +__failure __msg("R8 invalid mem access 'map_value_or_null'") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void null_check_ids_in_regsafe(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + /* r9 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r9 = r0; \ + /* r8 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r8 = r0; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * r9 = r8 ; optionally share ID between r9 and r8\ + */ \ + if r6 > r7 goto l0_%=; \ + r9 = r8; \ +l0_%=: /* if r9 == 0 goto */ \ + if r9 == 0 goto l1_%=; \ + /* read map value via r8, this is not always \ + * safe because r8 might be not equal to r9. \ + */ \ + r0 = *(u64*)(r8 + 0); \ +l1_%=: /* exit 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/value_or_null.c b/tools/testing/selftests/bpf/verifier/value_or_null.c deleted file mode 100644 index 52a8bca14f03..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_or_null.c +++ /dev/null @@ -1,220 +0,0 @@ -{ - "multiple registers share map_lookup_elem result", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "alu ops on ptr_to_map_value_or_null, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "alu ops on ptr_to_map_value_or_null, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, -1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "alu ops on ptr_to_map_value_or_null, 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "R4 pointer arithmetic on map_value_or_null", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "invalid memory access with multiple map_lookup_elem calls", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = REJECT, - .errstr = "R4 !read_ok", - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "valid indirect map_lookup_elem access with 2nd lookup in branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_2, 10), - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS -}, -{ - "invalid map access from else condition", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 unbounded memory access", - .result = REJECT, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map lookup and null branch prediction", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_10, 10), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "MAP_VALUE_OR_NULL check_ids() in regsafe()", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* r9 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - /* r8 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - /* r7 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if r6 > r7 goto +1 ; no new information about the state is derived from - * ; this check, thus produced verifier states differ - * ; only in 'insn_idx' - * r9 = r8 ; optionally share ID between r9 and r8 - */ - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), - /* if r9 == 0 goto */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1), - /* read map value via r8, this is not always - * safe because r8 might be not equal to r9. - */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0), - /* exit 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .flags = BPF_F_TEST_STATE_FREQ, - .fixup_map_hash_8b = { 3, 9 }, - .result = REJECT, - .errstr = "R8 invalid mem access 'map_value_or_null'", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -- cgit v1.2.3-70-g09d2 From d15f5b68b63ad7f47e05aba33c4794fb3bfaf1af Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:21 +0200 Subject: selftests/bpf: verifier/var_off.c converted to inline assembly Test verifier/var_off.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-41-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_var_off.c | 349 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/var_off.c | 291 ----------------- 3 files changed, 351 insertions(+), 291 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_var_off.c delete mode 100644 tools/testing/selftests/bpf/verifier/var_off.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 54eb21ef9fad..44350e328da2 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -37,6 +37,7 @@ #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" #include "verifier_value_or_null.skel.h" +#include "verifier_var_off.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -96,3 +97,4 @@ void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } +void test_verifier_var_off(void) { RUN(verifier_var_off); } diff --git a/tools/testing/selftests/bpf/progs/verifier_var_off.c b/tools/testing/selftests/bpf/progs/verifier_var_off.c new file mode 100644 index 000000000000..83a90afba785 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_var_off.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/var_off.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("lwt_in") +__description("variable-offset ctx access") +__failure __msg("variable ctx access var_off=(0x0; 0x4)") +__naked void variable_offset_ctx_access(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + /* add it to skb. We now have either &skb->len or\ + * &skb->pkt_type, but we don't know which \ + */ \ + r1 += r2; \ + /* dereference it */ \ + r0 = *(u32*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("cgroup/skb") +__description("variable-offset stack read, priv vs unpriv") +__success __failure_unpriv +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_read_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r0 = 0; \ + *(u64*)(r10 - 8) = r0; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it for a stack read */ \ + r0 = *(u32*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("lwt_in") +__description("variable-offset stack read, uninitialized") +__failure __msg("invalid variable-offset read from stack R2") +__naked void variable_offset_stack_read_uninitialized(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it for a stack read */ \ + r0 = *(u32*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("variable-offset stack write, priv vs unpriv") +__success __failure_unpriv +/* Variable stack access is rejected for unprivileged. + */ +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_write_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 8-byte aligned */ \ + r2 &= 8; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-8 or fp-16, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* Dereference it for a stack write */ \ + r0 = 0; \ + *(u64*)(r2 + 0) = r0; \ + /* Now read from the address we just wrote. This shows\ + * that, after a variable-offset write, a priviledged\ + * program can read the slots that were in the range of\ + * that write (even if the verifier doesn't actually know\ + * if the slot being read was really written to or not.\ + */ \ + r3 = *(u64*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("variable-offset stack write clobbers spilled regs") +__failure +/* In the priviledged case, dereferencing a spilled-and-then-filled + * register is rejected because the previous variable offset stack + * write might have overwritten the spilled pointer (i.e. we lose track + * of the spilled register when we analyze the write). + */ +__msg("R2 invalid mem access 'scalar'") +__failure_unpriv +/* The unprivileged case is not too interesting; variable + * stack access is rejected. + */ +__msg_unpriv("R2 variable stack access prohibited for !root") +__naked void stack_write_clobbers_spilled_regs(void) +{ + asm volatile (" \ + /* Dummy instruction; needed because we need to patch the next one\ + * and we can't patch the first instruction. \ + */ \ + r6 = 0; \ + /* Make R0 a map ptr */ \ + r0 = %[map_hash_8b] ll; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 8-byte aligned */ \ + r2 &= 8; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-8 or fp-16, but\ + * we don't know which. \ + */ \ + r2 += r10; \ + /* Spill R0(map ptr) into stack */ \ + *(u64*)(r10 - 8) = r0; \ + /* Dereference the unknown value for a stack write */\ + r0 = 0; \ + *(u64*)(r2 + 0) = r0; \ + /* Fill the register back into R2 */ \ + r2 = *(u64*)(r10 - 8); \ + /* Try to dereference R2 for a memory load */ \ + r0 = *(u64*)(r2 + 8); \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("sockops") +__description("indirect variable-offset stack access, unbounded") +__failure __msg("invalid unbounded variable-offset indirect access to stack R4") +__naked void variable_offset_stack_access_unbounded(void) +{ + asm volatile (" \ + r2 = 6; \ + r3 = 28; \ + /* Fill the top 16 bytes of the stack. */ \ + r4 = 0; \ + *(u64*)(r10 - 16) = r4; \ + r4 = 0; \ + *(u64*)(r10 - 8) = r4; \ + /* Get an unknown value. */ \ + r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\ + /* Check the lower bound but don't check the upper one. */\ + if r4 s< 0 goto l0_%=; \ + /* Point the lower bound to initialized stack. Offset is now in range\ + * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\ + */ \ + r4 -= 16; \ + r4 += r10; \ + r5 = 8; \ + /* Dereference it indirectly. */ \ + call %[bpf_getsockopt]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_getsockopt), + __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received)) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, max out of bound") +__failure __msg("invalid variable-offset indirect access to stack R2") +__naked void access_max_out_of_bound(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 8; \ + /* add it to fp. We now have either fp-4 or fp-8, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it indirectly */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, min out of bound") +__failure __msg("invalid variable-offset indirect access to stack R2") +__naked void access_min_out_of_bound(void) +{ + asm volatile (" \ + /* Fill the top 8 bytes of the stack */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned */ \ + r2 &= 4; \ + r2 -= 516; \ + /* add it to fp. We now have either fp-516 or fp-512, but\ + * we don't know which \ + */ \ + r2 += r10; \ + /* dereference it indirectly */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, min_off < min_initialized") +__failure __msg("invalid indirect read from stack R2 var_off") +__naked void access_min_off_min_initialized(void) +{ + asm volatile (" \ + /* Fill only the top 8 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\ + * which. fp-16 size 8 is partially uninitialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("indirect variable-offset stack access, priv vs unpriv") +__success __failure_unpriv +__msg_unpriv("R2 variable stack access prohibited for !root") +__retval(0) +__naked void stack_access_priv_vs_unpriv(void) +{ + asm volatile (" \ + /* Fill the top 16 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 16) = r2; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value. */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ + * which, but either way it points to initialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("indirect variable-offset stack access, ok") +__success __retval(0) +__naked void variable_offset_stack_access_ok(void) +{ + asm volatile (" \ + /* Fill the top 16 bytes of the stack. */ \ + r2 = 0; \ + *(u64*)(r10 - 16) = r2; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + /* Get an unknown value. */ \ + r2 = *(u32*)(r1 + 0); \ + /* Make it small and 4-byte aligned. */ \ + r2 &= 4; \ + r2 -= 16; \ + /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ + * which, but either way it points to initialized stack.\ + */ \ + r2 += r10; \ + /* Dereference it indirectly. */ \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c deleted file mode 100644 index b183e26c03f1..000000000000 --- a/tools/testing/selftests/bpf/verifier/var_off.c +++ /dev/null @@ -1,291 +0,0 @@ -{ - "variable-offset ctx access", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - /* add it to skb. We now have either &skb->len or - * &skb->pkt_type, but we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - /* dereference it */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .errstr = "variable ctx access var_off=(0x0; 0x4)", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "variable-offset stack read, priv vs unpriv", - .insns = { - /* Fill the top 8 bytes of the stack */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), - /* add it to fp. We now have either fp-4 or fp-8, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it for a stack read */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R2 variable stack access prohibited for !root", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "variable-offset stack read, uninitialized", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), - /* add it to fp. We now have either fp-4 or fp-8, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it for a stack read */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid variable-offset read from stack R2", - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "variable-offset stack write, priv vs unpriv", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 8-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), - /* Add it to fp. We now have either fp-8 or fp-16, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* Dereference it for a stack write */ - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - /* Now read from the address we just wrote. This shows - * that, after a variable-offset write, a priviledged - * program can read the slots that were in the range of - * that write (even if the verifier doesn't actually know - * if the slot being read was really written to or not. - */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - /* Variable stack access is rejected for unprivileged. - */ - .errstr_unpriv = "R2 variable stack access prohibited for !root", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "variable-offset stack write clobbers spilled regs", - .insns = { - /* Dummy instruction; needed because we need to patch the next one - * and we can't patch the first instruction. - */ - BPF_MOV64_IMM(BPF_REG_6, 0), - /* Make R0 a map ptr */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 8-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), - /* Add it to fp. We now have either fp-8 or fp-16, but - * we don't know which. - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* Spill R0(map ptr) into stack */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - /* Dereference the unknown value for a stack write */ - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - /* Fill the register back into R2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - /* Try to dereference R2 for a memory load */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - /* The unprivileged case is not too interesting; variable - * stack access is rejected. - */ - .errstr_unpriv = "R2 variable stack access prohibited for !root", - .result_unpriv = REJECT, - /* In the priviledged case, dereferencing a spilled-and-then-filled - * register is rejected because the previous variable offset stack - * write might have overwritten the spilled pointer (i.e. we lose track - * of the spilled register when we analyze the write). - */ - .errstr = "R2 invalid mem access 'scalar'", - .result = REJECT, -}, -{ - "indirect variable-offset stack access, unbounded", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_3, 28), - /* Fill the top 16 bytes of the stack. */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value. */ - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, offsetof(struct bpf_sock_ops, - bytes_received)), - /* Check the lower bound but don't check the upper one. */ - BPF_JMP_IMM(BPF_JSLT, BPF_REG_4, 0, 4), - /* Point the lower bound to initialized stack. Offset is now in range - * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded. - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_4, 16), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_5, 8), - /* Dereference it indirectly. */ - BPF_EMIT_CALL(BPF_FUNC_getsockopt), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid unbounded variable-offset indirect access to stack R4", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SOCK_OPS, -}, -{ - "indirect variable-offset stack access, max out of bound", - .insns = { - /* Fill the top 8 bytes of the stack */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), - /* add it to fp. We now have either fp-4 or fp-8, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it indirectly */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "invalid variable-offset indirect access to stack R2", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "indirect variable-offset stack access, min out of bound", - .insns = { - /* Fill the top 8 bytes of the stack */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 516), - /* add it to fp. We now have either fp-516 or fp-512, but - * we don't know which - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* dereference it indirectly */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "invalid variable-offset indirect access to stack R2", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "indirect variable-offset stack access, min_off < min_initialized", - .insns = { - /* Fill only the top 8 bytes of the stack. */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned. */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), - /* Add it to fp. We now have either fp-12 or fp-16, but we don't know - * which. fp-16 size 8 is partially uninitialized stack. - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* Dereference it indirectly. */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 5 }, - .errstr = "invalid indirect read from stack R2 var_off", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "indirect variable-offset stack access, priv vs unpriv", - .insns = { - /* Fill the top 16 bytes of the stack. */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value. */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned. */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), - /* Add it to fp. We now have either fp-12 or fp-16, we don't know - * which, but either way it points to initialized stack. - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* Dereference it indirectly. */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .errstr_unpriv = "R2 variable stack access prohibited for !root", - .result_unpriv = REJECT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "indirect variable-offset stack access, ok", - .insns = { - /* Fill the top 16 bytes of the stack. */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - /* Get an unknown value. */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* Make it small and 4-byte aligned. */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 16), - /* Add it to fp. We now have either fp-12 or fp-16, we don't know - * which, but either way it points to initialized stack. - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), - /* Dereference it indirectly. */ - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 6 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -- cgit v1.2.3-70-g09d2 From a8036aea2d4f412c98a5fdbc0c987fa8a3c023ed Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:22 +0200 Subject: selftests/bpf: verifier/xadd.c converted to inline assembly Test verifier/xadd.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-42-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_xadd.c | 124 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/xadd.c | 97 ----------------- 3 files changed, 126 insertions(+), 97 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_xadd.c delete mode 100644 tools/testing/selftests/bpf/verifier/xadd.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 44350e328da2..cd56fe520145 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -38,6 +38,7 @@ #include "verifier_value.skel.h" #include "verifier_value_or_null.skel.h" #include "verifier_var_off.skel.h" +#include "verifier_xadd.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -98,3 +99,4 @@ void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } +void test_verifier_xadd(void) { RUN(verifier_xadd); } diff --git a/tools/testing/selftests/bpf/progs/verifier_xadd.c b/tools/testing/selftests/bpf/progs/verifier_xadd.c new file mode 100644 index 000000000000..05a0a55adb45 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xadd.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xadd.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("tc") +__description("xadd/w check unaligned stack") +__failure __msg("misaligned stack access off") +__naked void xadd_w_check_unaligned_stack(void) +{ + asm volatile (" \ + r0 = 1; \ + *(u64*)(r10 - 8) = r0; \ + lock *(u32 *)(r10 - 7) += w0; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("xadd/w check unaligned map") +__failure __msg("misaligned value access off") +__naked void xadd_w_check_unaligned_map(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 1; \ + lock *(u32 *)(r0 + 3) += w1; \ + r0 = *(u32*)(r0 + 3); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("xdp") +__description("xadd/w check unaligned pkt") +__failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void xadd_w_check_unaligned_pkt(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = 99; \ + goto l1_%=; \ +l0_%=: r0 = 1; \ + r1 = 0; \ + *(u32*)(r2 + 0) = r1; \ + r1 = 0; \ + *(u32*)(r2 + 3) = r1; \ + lock *(u32 *)(r2 + 1) += w0; \ + lock *(u32 *)(r2 + 2) += w0; \ + r0 = *(u32*)(r2 + 1); \ +l1_%=: exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("xadd/w check whether src/dst got mangled, 1") +__success __retval(3) +__naked void src_dst_got_mangled_1(void) +{ + asm volatile (" \ + r0 = 1; \ + r6 = r0; \ + r7 = r10; \ + *(u64*)(r10 - 8) = r0; \ + lock *(u64 *)(r10 - 8) += r0; \ + lock *(u64 *)(r10 - 8) += r0; \ + if r6 != r0 goto l0_%=; \ + if r7 != r10 goto l0_%=; \ + r0 = *(u64*)(r10 - 8); \ + exit; \ +l0_%=: r0 = 42; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("xadd/w check whether src/dst got mangled, 2") +__success __retval(3) +__naked void src_dst_got_mangled_2(void) +{ + asm volatile (" \ + r0 = 1; \ + r6 = r0; \ + r7 = r10; \ + *(u32*)(r10 - 8) = r0; \ + lock *(u32 *)(r10 - 8) += w0; \ + lock *(u32 *)(r10 - 8) += w0; \ + if r6 != r0 goto l0_%=; \ + if r7 != r10 goto l0_%=; \ + r0 = *(u32*)(r10 - 8); \ + exit; \ +l0_%=: r0 = 42; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/xadd.c b/tools/testing/selftests/bpf/verifier/xadd.c deleted file mode 100644 index b96ef3526815..000000000000 --- a/tools/testing/selftests/bpf/verifier/xadd.c +++ /dev/null @@ -1,97 +0,0 @@ -{ - "xadd/w check unaligned stack", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -7), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "misaligned stack access off", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "xadd/w check unaligned map", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "misaligned value access off", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "xadd/w check unaligned pkt", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 99), - BPF_JMP_IMM(BPF_JA, 0, 0, 6), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 1), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_2, BPF_REG_0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "BPF_ATOMIC stores into R2 pkt is not allowed", - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "xadd/w check whether src/dst got mangled, 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 3, -}, -{ - "xadd/w check whether src/dst got mangled, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_10, BPF_REG_0, -8), - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3), - BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 42), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 3, -}, -- cgit v1.2.3-70-g09d2 From ffb515c933a9e8000e50b03f76569ffb6ef4d39d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Sat, 25 Mar 2023 04:55:23 +0200 Subject: selftests/bpf: verifier/xdp.c converted to inline assembly Test verifier/xdp.c automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230325025524.144043-43-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 ++ tools/testing/selftests/bpf/progs/verifier_xdp.c | 24 +++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/xdp.c | 14 ------------- 3 files changed, 26 insertions(+), 14 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_xdp.c delete mode 100644 tools/testing/selftests/bpf/verifier/xdp.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index cd56fe520145..a774d5b193f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -39,6 +39,7 @@ #include "verifier_value_or_null.skel.h" #include "verifier_var_off.skel.h" #include "verifier_xadd.skel.h" +#include "verifier_xdp.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -100,3 +101,4 @@ void test_verifier_value(void) { RUN(verifier_value); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } +void test_verifier_xdp(void) { RUN(verifier_xdp); } diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp.c b/tools/testing/selftests/bpf/progs/verifier_xdp.c new file mode 100644 index 000000000000..50768ed179b3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xdp.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xdp.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("xdp") +__description("XDP, using ifindex from netdev") +__success __retval(1) +__naked void xdp_using_ifindex_from_netdev(void) +{ + asm volatile (" \ + r0 = 0; \ + r2 = *(u32*)(r1 + %[xdp_md_ingress_ifindex]); \ + if r2 < 1 goto l0_%=; \ + r0 = 1; \ +l0_%=: exit; \ +" : + : __imm_const(xdp_md_ingress_ifindex, offsetof(struct xdp_md, ingress_ifindex)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/xdp.c b/tools/testing/selftests/bpf/verifier/xdp.c deleted file mode 100644 index 5ac390508139..000000000000 --- a/tools/testing/selftests/bpf/verifier/xdp.c +++ /dev/null @@ -1,14 +0,0 @@ -{ - "XDP, using ifindex from netdev", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, ingress_ifindex)), - BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 1, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 1, -}, -- cgit v1.2.3-70-g09d2 From d8db84d71c0e539f7ce902e2fe297e535ba4d46c Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 22 Mar 2023 14:52:45 -0700 Subject: selftests/bpf: Test task storage when local_storage->smap is NULL The current sk storage test ensures the memory free works when the local_storage->smap is NULL. This patch adds a task storage test to ensure the memory free code path works when local_storage->smap is NULL. Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20230322215246.1675516-5-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/test_local_storage.c | 7 +-- tools/testing/selftests/bpf/progs/local_storage.c | 56 ++++++++++++++++------ 2 files changed, 46 insertions(+), 17 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c index 563a9c746b7b..bcf2e1905ed7 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c @@ -23,7 +23,7 @@ struct storage { /* Fork and exec the provided rm binary and return the exit code of the * forked process and its pid. */ -static int run_self_unlink(int *monitored_pid, const char *rm_path) +static int run_self_unlink(struct local_storage *skel, const char *rm_path) { int child_pid, child_status, ret; int null_fd; @@ -35,7 +35,7 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path) dup2(null_fd, STDERR_FILENO); close(null_fd); - *monitored_pid = getpid(); + skel->bss->monitored_pid = getpid(); /* Use the copied /usr/bin/rm to delete itself * /tmp/copy_of_rm /tmp/copy_of_rm. */ @@ -44,6 +44,7 @@ static int run_self_unlink(int *monitored_pid, const char *rm_path) exit(errno); } else if (child_pid > 0) { waitpid(child_pid, &child_status, 0); + ASSERT_EQ(skel->data->task_storage_result, 0, "task_storage_result"); return WEXITSTATUS(child_status); } @@ -133,7 +134,7 @@ void test_test_local_storage(void) * unlink its executable. This operation should be denied by the loaded * LSM program. */ - err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path); + err = run_self_unlink(skel, tmp_exec_path); if (!ASSERT_EQ(err, EPERM, "run_self_unlink")) goto close_prog_rmdir; diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index c8ba7207f5a5..bc8ea56671a1 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -16,6 +16,7 @@ char _license[] SEC("license") = "GPL"; int monitored_pid = 0; int inode_storage_result = -1; int sk_storage_result = -1; +int task_storage_result = -1; struct local_storage { struct inode *exec_inode; @@ -50,26 +51,57 @@ struct { __type(value, struct local_storage); } task_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct local_storage); +} task_storage_map2 SEC(".maps"); + SEC("lsm/inode_unlink") int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim) { __u32 pid = bpf_get_current_pid_tgid() >> 32; + struct bpf_local_storage *local_storage; struct local_storage *storage; + struct task_struct *task; bool is_self_unlink; if (pid != monitored_pid) return 0; - storage = bpf_task_storage_get(&task_storage_map, - bpf_get_current_task_btf(), 0, 0); - if (storage) { - /* Don't let an executable delete itself */ - is_self_unlink = storage->exec_inode == victim->d_inode; - if (is_self_unlink) - return -EPERM; - } + task = bpf_get_current_task_btf(); + if (!task) + return 0; - return 0; + task_storage_result = -1; + + storage = bpf_task_storage_get(&task_storage_map, task, 0, 0); + if (!storage) + return 0; + + /* Don't let an executable delete itself */ + is_self_unlink = storage->exec_inode == victim->d_inode; + + storage = bpf_task_storage_get(&task_storage_map2, task, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!storage || storage->value) + return 0; + + if (bpf_task_storage_delete(&task_storage_map, task)) + return 0; + + /* Ensure that the task_storage_map is disconnected from the storage. + * The storage memory should not be freed back to the + * bpf_mem_alloc. + */ + local_storage = task->bpf_storage; + if (!local_storage || local_storage->smap) + return 0; + + task_storage_result = 0; + + return is_self_unlink ? -EPERM : 0; } SEC("lsm.s/inode_rename") @@ -139,11 +171,7 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, if (bpf_sk_storage_delete(&sk_storage_map, sock->sk)) return 0; - /* Ensure that the sk_storage_map is disconnected from the storage. - * The storage memory should not be freed back to the - * bpf_mem_alloc of the sk_bpf_storage_map because - * sk_bpf_storage_map may have been gone. - */ + /* Ensure that the sk_storage_map is disconnected from the storage. */ if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap) return 0; -- cgit v1.2.3-70-g09d2 From cbe9d93d58b16b5912498ea42b5173022fff7c04 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 22 Mar 2023 14:52:46 -0700 Subject: selftests/bpf: Add bench for task storage creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a task storage benchmark to the existing local-storage-create benchmark. For task storage, ./bench --storage-type task --batch-size 32: bpf_ma: Summary: creates 30.456 ± 0.507k/s ( 30.456k/prod), 6.08 kmallocs/create no bpf_ma: Summary: creates 31.962 ± 0.486k/s ( 31.962k/prod), 6.13 kmallocs/create ./bench --storage-type task --batch-size 64: bpf_ma: Summary: creates 30.197 ± 1.476k/s ( 30.197k/prod), 6.08 kmallocs/create no bpf_ma: Summary: creates 31.103 ± 0.297k/s ( 31.103k/prod), 6.13 kmallocs/create Signed-off-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20230322215246.1675516-6-martin.lau@linux.dev Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/bench.c | 2 + .../bpf/benchs/bench_local_storage_create.c | 151 +++++++++++++++++++-- .../bpf/progs/bench_local_storage_create.c | 25 ++++ 3 files changed, 164 insertions(+), 14 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index dc3827c1f139..d9c080ac1796 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -278,6 +278,7 @@ extern struct argp bench_local_storage_argp; extern struct argp bench_local_storage_rcu_tasks_trace_argp; extern struct argp bench_strncmp_argp; extern struct argp bench_hashmap_lookup_argp; +extern struct argp bench_local_storage_create_argp; static const struct argp_child bench_parsers[] = { { &bench_ringbufs_argp, 0, "Ring buffers benchmark", 0 }, @@ -288,6 +289,7 @@ static const struct argp_child bench_parsers[] = { { &bench_local_storage_rcu_tasks_trace_argp, 0, "local_storage RCU Tasks Trace slowdown benchmark", 0 }, { &bench_hashmap_lookup_argp, 0, "Hashmap lookup benchmark", 0 }, + { &bench_local_storage_create_argp, 0, "local-storage-create benchmark", 0 }, {}, }; diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c index f8b2a640ccbe..abb0321d4f34 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c @@ -3,19 +3,71 @@ #include #include +#include +#include #include "bench.h" #include "bench_local_storage_create.skel.h" -#define BATCH_SZ 32 - struct thread { - int fds[BATCH_SZ]; + int *fds; + pthread_t *pthds; + int *pthd_results; }; static struct bench_local_storage_create *skel; static struct thread *threads; -static long socket_errs; +static long create_owner_errs; +static int storage_type = BPF_MAP_TYPE_SK_STORAGE; +static int batch_sz = 32; + +enum { + ARG_BATCH_SZ = 9000, + ARG_STORAGE_TYPE = 9001, +}; + +static const struct argp_option opts[] = { + { "batch-size", ARG_BATCH_SZ, "BATCH_SIZE", 0, + "The number of storage creations in each batch" }, + { "storage-type", ARG_STORAGE_TYPE, "STORAGE_TYPE", 0, + "The type of local storage to test (socket or task)" }, + {}, +}; + +static error_t parse_arg(int key, char *arg, struct argp_state *state) +{ + int ret; + + switch (key) { + case ARG_BATCH_SZ: + ret = atoi(arg); + if (ret < 1) { + fprintf(stderr, "invalid batch-size\n"); + argp_usage(state); + } + batch_sz = ret; + break; + case ARG_STORAGE_TYPE: + if (!strcmp(arg, "task")) { + storage_type = BPF_MAP_TYPE_TASK_STORAGE; + } else if (!strcmp(arg, "socket")) { + storage_type = BPF_MAP_TYPE_SK_STORAGE; + } else { + fprintf(stderr, "invalid storage-type (socket or task)\n"); + argp_usage(state); + } + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +const struct argp bench_local_storage_create_argp = { + .options = opts, + .parser = parse_arg, +}; static void validate(void) { @@ -28,6 +80,8 @@ static void validate(void) static void setup(void) { + int i; + skel = bench_local_storage_create__open_and_load(); if (!skel) { fprintf(stderr, "error loading skel\n"); @@ -35,10 +89,16 @@ static void setup(void) } skel->bss->bench_pid = getpid(); - - if (!bpf_program__attach(skel->progs.socket_post_create)) { - fprintf(stderr, "Error attaching bpf program\n"); - exit(1); + if (storage_type == BPF_MAP_TYPE_SK_STORAGE) { + if (!bpf_program__attach(skel->progs.socket_post_create)) { + fprintf(stderr, "Error attaching bpf program\n"); + exit(1); + } + } else { + if (!bpf_program__attach(skel->progs.fork)) { + fprintf(stderr, "Error attaching bpf program\n"); + exit(1); + } } if (!bpf_program__attach(skel->progs.kmalloc)) { @@ -52,6 +112,29 @@ static void setup(void) fprintf(stderr, "cannot alloc thread_res\n"); exit(1); } + + for (i = 0; i < env.producer_cnt; i++) { + struct thread *t = &threads[i]; + + if (storage_type == BPF_MAP_TYPE_SK_STORAGE) { + t->fds = malloc(batch_sz * sizeof(*t->fds)); + if (!t->fds) { + fprintf(stderr, "cannot alloc t->fds\n"); + exit(1); + } + } else { + t->pthds = malloc(batch_sz * sizeof(*t->pthds)); + if (!t->pthds) { + fprintf(stderr, "cannot alloc t->pthds\n"); + exit(1); + } + t->pthd_results = malloc(batch_sz * sizeof(*t->pthd_results)); + if (!t->pthd_results) { + fprintf(stderr, "cannot alloc t->pthd_results\n"); + exit(1); + } + } + } } static void measure(struct bench_res *res) @@ -65,20 +148,20 @@ static void *consumer(void *input) return NULL; } -static void *producer(void *input) +static void *sk_producer(void *input) { struct thread *t = &threads[(long)(input)]; int *fds = t->fds; int i; while (true) { - for (i = 0; i < BATCH_SZ; i++) { + for (i = 0; i < batch_sz; i++) { fds[i] = socket(AF_INET6, SOCK_DGRAM, 0); if (fds[i] == -1) - atomic_inc(&socket_errs); + atomic_inc(&create_owner_errs); } - for (i = 0; i < BATCH_SZ; i++) { + for (i = 0; i < batch_sz; i++) { if (fds[i] != -1) close(fds[i]); } @@ -87,6 +170,42 @@ static void *producer(void *input) return NULL; } +static void *thread_func(void *arg) +{ + return NULL; +} + +static void *task_producer(void *input) +{ + struct thread *t = &threads[(long)(input)]; + pthread_t *pthds = t->pthds; + int *pthd_results = t->pthd_results; + int i; + + while (true) { + for (i = 0; i < batch_sz; i++) { + pthd_results[i] = pthread_create(&pthds[i], NULL, thread_func, NULL); + if (pthd_results[i]) + atomic_inc(&create_owner_errs); + } + + for (i = 0; i < batch_sz; i++) { + if (!pthd_results[i]) + pthread_join(pthds[i], NULL);; + } + } + + return NULL; +} + +static void *producer(void *input) +{ + if (storage_type == BPF_MAP_TYPE_SK_STORAGE) + return sk_producer(input); + else + return task_producer(input); +} + static void report_progress(int iter, struct bench_res *res, long delta_ns) { double creates_per_sec, kmallocs_per_create; @@ -123,14 +242,18 @@ static void report_final(struct bench_res res[], int res_cnt) printf("Summary: creates %8.3lf \u00B1 %5.3lfk/s (%7.3lfk/prod), ", creates_mean, creates_stddev, creates_mean / env.producer_cnt); printf("%4.2lf kmallocs/create\n", (double)total_kmallocs / total_creates); - if (socket_errs || skel->bss->create_errs) - printf("socket() errors %ld create_errs %ld\n", socket_errs, + if (create_owner_errs || skel->bss->create_errs) + printf("%s() errors %ld create_errs %ld\n", + storage_type == BPF_MAP_TYPE_SK_STORAGE ? + "socket" : "pthread_create", + create_owner_errs, skel->bss->create_errs); } /* Benchmark performance of creating bpf local storage */ const struct bench bench_local_storage_create = { .name = "local-storage-create", + .argp = &bench_local_storage_create_argp, .validate = validate, .setup = setup, .producer_thread = producer, diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c index 2814bab54d28..7c851c9d5e47 100644 --- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c @@ -22,6 +22,13 @@ struct { __type(value, struct storage); } sk_storage_map SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct storage); +} task_storage_map SEC(".maps"); + SEC("raw_tp/kmalloc") int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr, size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, @@ -32,6 +39,24 @@ int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr, return 0; } +SEC("tp_btf/sched_process_fork") +int BPF_PROG(fork, struct task_struct *parent, struct task_struct *child) +{ + struct storage *stg; + + if (parent->tgid != bench_pid) + return 0; + + stg = bpf_task_storage_get(&task_storage_map, child, NULL, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (stg) + __sync_fetch_and_add(&create_cnts, 1); + else + __sync_fetch_and_add(&create_errs, 1); + + return 0; +} + SEC("lsm.s/socket_post_create") int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, int protocol, int kern) -- cgit v1.2.3-70-g09d2 From a504d246d2129b6fe40059d372288ffb36e4588b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 24 Mar 2023 11:17:57 -0700 Subject: selftests: tls: add a test for queuing data before setting the ULP Other tests set up the connection fully on both ends before communicating any data. Add a test which will queue up TLS records to TCP before the TLS ULP is installed. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- tools/testing/selftests/net/tls.c | 45 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 2cbb12736596..e699548d4247 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -1820,4 +1820,49 @@ TEST(tls_v6ops) { close(sfd); } +TEST(prequeue) { + struct tls_crypto_info_keys tls12; + char buf[20000], buf2[20000]; + struct sockaddr_in addr; + int sfd, cfd, ret, fd; + socklen_t len; + + len = sizeof(addr); + memrnd(buf, sizeof(buf)); + + tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_256, &tls12); + + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_ANY); + addr.sin_port = 0; + + fd = socket(AF_INET, SOCK_STREAM, 0); + sfd = socket(AF_INET, SOCK_STREAM, 0); + + ASSERT_EQ(bind(sfd, &addr, sizeof(addr)), 0); + ASSERT_EQ(listen(sfd, 10), 0); + ASSERT_EQ(getsockname(sfd, &addr, &len), 0); + ASSERT_EQ(connect(fd, &addr, sizeof(addr)), 0); + ASSERT_GE(cfd = accept(sfd, &addr, &len), 0); + close(sfd); + + ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")); + if (ret) { + ASSERT_EQ(errno, ENOENT); + SKIP(return, "no TLS support"); + } + + ASSERT_EQ(setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len), 0); + EXPECT_EQ(send(fd, buf, sizeof(buf), MSG_DONTWAIT), sizeof(buf)); + + ASSERT_EQ(setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls")), 0); + ASSERT_EQ(setsockopt(cfd, SOL_TLS, TLS_RX, &tls12, tls12.len), 0); + EXPECT_EQ(recv(cfd, buf2, sizeof(buf2), MSG_WAITALL), sizeof(buf2)); + + EXPECT_EQ(memcmp(buf, buf2, sizeof(buf)), 0); + + close(fd); + close(cfd); +} + TEST_HARNESS_MAIN -- cgit v1.2.3-70-g09d2 From 7283137a7622292076dd8b7f3b8b2bb203ce5a14 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Sun, 26 Mar 2023 09:53:41 +0000 Subject: selftests/bpf: Don't assume page size is 4096 The verifier test creates BPF ringbuf maps using hard-coded 4096 as max_entries. Some tests will fail if the page size of the running kernel is not 4096. Use getpagesize() instead. Signed-off-by: Hengqi Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230326095341.816023-1-hengqi.chen@gmail.com --- tools/testing/selftests/bpf/test_verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 5b90eef09ade..e4657c5bc3f1 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1079,7 +1079,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } if (*fixup_map_ringbuf) { map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0, - 0, 4096); + 0, getpagesize()); do { prog[*fixup_map_ringbuf].imm = map_fds[20]; fixup_map_ringbuf++; -- cgit v1.2.3-70-g09d2 From 6e9e141a7a28520a1cd13c96ad9127860e32ffbb Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 28 Mar 2023 05:08:12 +0300 Subject: selftests/bpf: Verifier/xdp_direct_packet_access.c converted to inline assembly Test verifier/xdp_direct_packet_access.c automatically converted to use inline assembly. Original test would be removed in the next patch. Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230328020813.392560-2-eddyz87@gmail.com --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_xdp_direct_packet_access.c | 1722 ++++++++++++++++++++ 2 files changed, 1724 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index a774d5b193f1..efc8cf2e18d0 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -40,6 +40,7 @@ #include "verifier_var_off.skel.h" #include "verifier_xadd.skel.h" #include "verifier_xdp.skel.h" +#include "verifier_xdp_direct_packet_access.skel.h" __maybe_unused static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) @@ -102,3 +103,4 @@ void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } +void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c new file mode 100644 index 000000000000..df2dfd1b15d1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c @@ -0,0 +1,1722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("xdp") +__description("XDP pkt read, pkt_end mangling, bad access 1") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void end_mangling_bad_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + r3 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end mangling, bad access 2") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void end_mangling_bad_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + r3 -= 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_pkt_data_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end > pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_end_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' < pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end < pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_end_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_end_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 9; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void end_pkt_data_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 6; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 7; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_end <= pkt_data', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 > r3 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_9(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_meta_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data > pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_10(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 > r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_pkt_data_good_access_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 6); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_6(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_11(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 < r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_1_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 4); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 < r1 goto l0_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 9); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_12(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 < r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_pkt_data_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_13(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_7(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_13(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 >= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_1_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 >= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_14(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_14(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 >= r1 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_corner_case_good_access_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 1") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_1_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 4); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_data_bad_access_2_8(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r1 <= r3 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_15(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 9; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 9); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_15(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r1 <= r3 goto l0_%=; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r1 - 7); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void data_pkt_meta_good_access_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u32*)(r1 - 5); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_bad_access_16(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 6; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', bad access 2") +__failure __msg("R1 offset is outside of the packet") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void pkt_meta_bad_access_2_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ +l0_%=: r0 = *(u32*)(r1 - 5); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void meta_corner_case_good_access_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 7; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 7); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +SEC("xdp") +__description("XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void corner_case_1_good_access_16(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ + r3 = *(u32*)(r1 + %[xdp_md_data]); \ + r1 = r2; \ + r1 += 8; \ + if r3 <= r1 goto l0_%=; \ + r0 = *(u64*)(r1 - 8); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From c63a7d8bbb54a904f3ab8ff0aae39cd571b2c39c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 28 Mar 2023 05:08:13 +0300 Subject: selftests/bpf: Remove verifier/xdp_direct_packet_access.c, converted to progs/verifier_xdp_direct_packet_access.c Removing verifier/xdp_direct_packet_access.c.c as it was automatically converted to use inline assembly in the previous commit. It is available in progs/verifier_xdp_direct_packet_access.c.c. Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230328020813.392560-3-eddyz87@gmail.com --- .../bpf/verifier/xdp_direct_packet_access.c | 1468 -------------------- 1 file changed, 1468 deletions(-) delete mode 100644 tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c deleted file mode 100644 index b4ec228eb95d..000000000000 --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c +++ /dev/null @@ -1,1468 +0,0 @@ -{ - "XDP pkt read, pkt_end mangling, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "XDP pkt read, pkt_end mangling, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "XDP pkt read, pkt_data' > pkt_end, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' > pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' > pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end > pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end > pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end > pkt_data', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' < pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' < pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' < pkt_end, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end < pkt_data', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end < pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end < pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' >= pkt_end, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' >= pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end >= pkt_data', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end >= pkt_data', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end >= pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' <= pkt_end, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' <= pkt_end, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end <= pkt_data', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end <= pkt_data', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end <= pkt_data', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' > pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' > pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data > pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data > pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data > pkt_meta', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' < pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' < pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data < pkt_meta', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data < pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data < pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' >= pkt_data, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' >= pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data >= pkt_meta', bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data >= pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' <= pkt_data, bad access 1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' <= pkt_data, bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data <= pkt_meta', good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data <= pkt_meta', bad access 2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R1 offset is outside of the packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct xdp_md, data_meta)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -- cgit v1.2.3-70-g09d2 From 24265c2c91ad6aae9446e18472566cd83e92b602 Mon Sep 17 00:00:00 2001 From: Bobby Eshleman Date: Mon, 27 Mar 2023 22:16:06 +0000 Subject: testing/vsock: add vsock_perf to gitignore This adds the vsock_perf binary to the gitignore file. Fixes: 8abbffd27ced ("test/vsock: vsock_perf utility") Signed-off-by: Bobby Eshleman Reviewed-by: Arseniy Krasnov Reviewed-by: Stefano Garzarella Link: https://lore.kernel.org/r/20230327-vsock-add-vsock-perf-to-ignore-v1-1-f28a84f3606b@bytedance.com Signed-off-by: Jakub Kicinski --- tools/testing/vsock/.gitignore | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/testing') diff --git a/tools/testing/vsock/.gitignore b/tools/testing/vsock/.gitignore index 87ca2731cff9..a8adcfdc292b 100644 --- a/tools/testing/vsock/.gitignore +++ b/tools/testing/vsock/.gitignore @@ -2,3 +2,4 @@ *.d vsock_test vsock_diag_test +vsock_perf -- cgit v1.2.3-70-g09d2 From c7c605c982d63b2140280d005a9df321d74f81dc Mon Sep 17 00:00:00 2001 From: Bobby Eshleman Date: Mon, 27 Mar 2023 19:11:52 +0000 Subject: selftests/bpf: add vsock to vmtest.sh Add vsock loopback to the test kernel. This allows sockmap for vsock to be tested. Signed-off-by: Bobby Eshleman Acked-by: Stefano Garzarella Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/config.aarch64 | 2 ++ tools/testing/selftests/bpf/config.s390x | 3 +++ tools/testing/selftests/bpf/config.x86_64 | 3 +++ 3 files changed, 8 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/config.aarch64 b/tools/testing/selftests/bpf/config.aarch64 index 1f0437644186..253821494884 100644 --- a/tools/testing/selftests/bpf/config.aarch64 +++ b/tools/testing/selftests/bpf/config.aarch64 @@ -176,6 +176,8 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y CONFIG_VIRTIO_MMIO=y CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_VSOCKETS_COMMON=y CONFIG_VLAN_8021Q=y CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_LOOPBACK=y CONFIG_XFRM_USER=y diff --git a/tools/testing/selftests/bpf/config.s390x b/tools/testing/selftests/bpf/config.s390x index d49f6170e7bd..2ba92167be35 100644 --- a/tools/testing/selftests/bpf/config.s390x +++ b/tools/testing/selftests/bpf/config.s390x @@ -140,5 +140,8 @@ CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_VSOCKETS_COMMON=y CONFIG_VLAN_8021Q=y +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_LOOPBACK=y CONFIG_XFRM_USER=y diff --git a/tools/testing/selftests/bpf/config.x86_64 b/tools/testing/selftests/bpf/config.x86_64 index dd97d61d325c..b650b2e617b8 100644 --- a/tools/testing/selftests/bpf/config.x86_64 +++ b/tools/testing/selftests/bpf/config.x86_64 @@ -234,7 +234,10 @@ CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_CONSOLE=y CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_VSOCKETS_COMMON=y CONFIG_VLAN_8021Q=y +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_LOOPBACK=y CONFIG_X86_ACPI_CPUFREQ=y CONFIG_X86_CPUID=y CONFIG_X86_MSR=y -- cgit v1.2.3-70-g09d2 From d61bd8c1fd02cfc8aed00a58c20bd58c72549e3a Mon Sep 17 00:00:00 2001 From: Bobby Eshleman Date: Mon, 27 Mar 2023 19:11:53 +0000 Subject: selftests/bpf: add a test case for vsock sockmap Add a test case testing the redirection from connectible AF_VSOCK sockets to connectible AF_UNIX sockets. Signed-off-by: Bobby Eshleman Acked-by: Stefano Garzarella Signed-off-by: David S. Miller --- .../selftests/bpf/prog_tests/sockmap_listen.c | 163 +++++++++++++++++++++ 1 file changed, 163 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 567e07c19ecc..8f09e1ea3ba7 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -251,6 +252,16 @@ static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len) *len = sizeof(*addr6); } +static void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len) +{ + struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss)); + + addr->svm_family = AF_VSOCK; + addr->svm_port = VMADDR_PORT_ANY; + addr->svm_cid = VMADDR_CID_LOCAL; + *len = sizeof(*addr); +} + static void init_addr_loopback(int family, struct sockaddr_storage *ss, socklen_t *len) { @@ -261,6 +272,9 @@ static void init_addr_loopback(int family, struct sockaddr_storage *ss, case AF_INET6: init_addr_loopback6(ss, len); return; + case AF_VSOCK: + init_addr_loopback_vsock(ss, len); + return; default: FAIL("unsupported address family %d", family); } @@ -1478,6 +1492,8 @@ static const char *family_str(sa_family_t family) return "IPv6"; case AF_UNIX: return "Unix"; + case AF_VSOCK: + return "VSOCK"; default: return "unknown"; } @@ -1689,6 +1705,151 @@ static void test_unix_redir(struct test_sockmap_listen *skel, struct bpf_map *ma unix_skb_redir_to_connected(skel, map, sotype); } +/* Returns two connected loopback vsock sockets */ +static int vsock_socketpair_connectible(int sotype, int *v0, int *v1) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + int s, p, c; + + s = socket_loopback(AF_VSOCK, sotype); + if (s < 0) + return -1; + + c = xsocket(AF_VSOCK, sotype | SOCK_NONBLOCK, 0); + if (c == -1) + goto close_srv; + + if (getsockname(s, sockaddr(&addr), &len) < 0) + goto close_cli; + + if (connect(c, sockaddr(&addr), len) < 0 && errno != EINPROGRESS) { + FAIL_ERRNO("connect"); + goto close_cli; + } + + len = sizeof(addr); + p = accept_timeout(s, sockaddr(&addr), &len, IO_TIMEOUT_SEC); + if (p < 0) + goto close_cli; + + *v0 = p; + *v1 = c; + + return 0; + +close_cli: + close(c); +close_srv: + close(s); + + return -1; +} + +static void vsock_unix_redir_connectible(int sock_mapfd, int verd_mapfd, + enum redir_mode mode, int sotype) +{ + const char *log_prefix = redir_mode_str(mode); + char a = 'a', b = 'b'; + int u0, u1, v0, v1; + int sfd[2]; + unsigned int pass; + int err, n; + u32 key; + + zero_verdict_count(verd_mapfd); + + if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_NONBLOCK, 0, sfd)) + return; + + u0 = sfd[0]; + u1 = sfd[1]; + + err = vsock_socketpair_connectible(sotype, &v0, &v1); + if (err) { + FAIL("vsock_socketpair_connectible() failed"); + goto close_uds; + } + + err = add_to_sockmap(sock_mapfd, u0, v0); + if (err) { + FAIL("add_to_sockmap failed"); + goto close_vsock; + } + + n = write(v1, &a, sizeof(a)); + if (n < 0) + FAIL_ERRNO("%s: write", log_prefix); + if (n == 0) + FAIL("%s: incomplete write", log_prefix); + if (n < 1) + goto out; + + n = recv(mode == REDIR_INGRESS ? u0 : u1, &b, sizeof(b), MSG_DONTWAIT); + if (n < 0) + FAIL("%s: recv() err, errno=%d", log_prefix, errno); + if (n == 0) + FAIL("%s: incomplete recv", log_prefix); + if (b != a) + FAIL("%s: vsock socket map failed, %c != %c", log_prefix, a, b); + + key = SK_PASS; + err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass); + if (err) + goto out; + if (pass != 1) + FAIL("%s: want pass count 1, have %d", log_prefix, pass); +out: + key = 0; + bpf_map_delete_elem(sock_mapfd, &key); + key = 1; + bpf_map_delete_elem(sock_mapfd, &key); + +close_vsock: + close(v0); + close(v1); + +close_uds: + close(u0); + close(u1); +} + +static void vsock_unix_skb_redir_connectible(struct test_sockmap_listen *skel, + struct bpf_map *inner_map, + int sotype) +{ + int verdict = bpf_program__fd(skel->progs.prog_skb_verdict); + int verdict_map = bpf_map__fd(skel->maps.verdict_map); + int sock_map = bpf_map__fd(inner_map); + int err; + + err = xbpf_prog_attach(verdict, sock_map, BPF_SK_SKB_VERDICT, 0); + if (err) + return; + + skel->bss->test_ingress = false; + vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_EGRESS, sotype); + skel->bss->test_ingress = true; + vsock_unix_redir_connectible(sock_map, verdict_map, REDIR_INGRESS, sotype); + + xbpf_prog_detach2(verdict, sock_map, BPF_SK_SKB_VERDICT); +} + +static void test_vsock_redir(struct test_sockmap_listen *skel, struct bpf_map *map) +{ + const char *family_name, *map_name; + char s[MAX_TEST_NAME]; + + family_name = family_str(AF_VSOCK); + map_name = map_type_str(map); + snprintf(s, sizeof(s), "%s %s %s", map_name, family_name, __func__); + if (!test__start_subtest(s)) + return; + + vsock_unix_skb_redir_connectible(skel, map, SOCK_STREAM); + vsock_unix_skb_redir_connectible(skel, map, SOCK_SEQPACKET); +} + static void test_reuseport(struct test_sockmap_listen *skel, struct bpf_map *map, int family, int sotype) { @@ -2060,12 +2221,14 @@ void serial_test_sockmap_listen(void) run_tests(skel, skel->maps.sock_map, AF_INET6); test_unix_redir(skel, skel->maps.sock_map, SOCK_DGRAM); test_unix_redir(skel, skel->maps.sock_map, SOCK_STREAM); + test_vsock_redir(skel, skel->maps.sock_map); skel->bss->test_sockmap = false; run_tests(skel, skel->maps.sock_hash, AF_INET); run_tests(skel, skel->maps.sock_hash, AF_INET6); test_unix_redir(skel, skel->maps.sock_hash, SOCK_DGRAM); test_unix_redir(skel, skel->maps.sock_hash, SOCK_STREAM); + test_vsock_redir(skel, skel->maps.sock_hash); test_sockmap_listen__destroy(skel); } -- cgit v1.2.3-70-g09d2 From 9095ce97bf8a4f5123dc9dbfd1cabcf2a78f5883 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 27 Mar 2023 12:22:24 +0200 Subject: selftests: mptcp: add mptcp_info tests This patch adds the mptcp_info fields tests in endpoint_tests(). Add a new function chk_mptcp_info() to check the given number of the given mptcp_info field. Link: https://github.com/multipath-tcp/mptcp_net-next/issues/330 Signed-off-by: Geliang Tang Reviewed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller --- tools/testing/selftests/net/mptcp/mptcp_join.sh | 47 ++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 42e3bd1a05f5..fafd19ec7e1f 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -1719,6 +1719,46 @@ chk_subflow_nr() fi } +chk_mptcp_info() +{ + local nr_info=$1 + local info + local cnt1 + local cnt2 + local dump_stats + + if [[ $nr_info = "subflows_"* ]]; then + info="subflows" + nr_info=${nr_info:9} + else + echo "[fail] unsupported argument: $nr_info" + fail_test + return 1 + fi + + printf "%-${nr_blank}s %-30s" " " "mptcp_info $info=$nr_info" + + cnt1=$(ss -N $ns1 -inmHM | grep "$info:" | + sed -n 's/.*\('"$info"':\)\([[:digit:]]*\).*$/\2/p;q') + [ -z "$cnt1" ] && cnt1=0 + cnt2=$(ss -N $ns2 -inmHM | grep "$info:" | + sed -n 's/.*\('"$info"':\)\([[:digit:]]*\).*$/\2/p;q') + [ -z "$cnt2" ] && cnt2=0 + if [ "$cnt1" != "$nr_info" ] || [ "$cnt2" != "$nr_info" ]; then + echo "[fail] got $cnt1:$cnt2 $info expected $nr_info" + fail_test + dump_stats=1 + else + echo "[ ok ]" + fi + + if [ "$dump_stats" = 1 ]; then + ss -N $ns1 -inmHM + ss -N $ns2 -inmHM + dump_stats + fi +} + chk_link_usage() { local ns=$1 @@ -3118,13 +3158,18 @@ endpoint_tests() run_tests $ns1 $ns2 10.0.1.1 4 0 0 speed_20 2>/dev/null & wait_mpj $ns2 + chk_subflow_nr needtitle "before delete" 2 + chk_mptcp_info subflows_1 + pm_nl_del_endpoint $ns2 2 10.0.2.2 sleep 0.5 - chk_subflow_nr needtitle "after delete" 1 + chk_subflow_nr "" "after delete" 1 + chk_mptcp_info subflows_0 pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow wait_mpj $ns2 chk_subflow_nr "" "after re-add" 2 + chk_mptcp_info subflows_1 kill_tests_wait fi } -- cgit v1.2.3-70-g09d2 From 4239561b69feb94e52e43d93685cc46fb9dbcae5 Mon Sep 17 00:00:00 2001 From: Yixin Shen Date: Wed, 29 Mar 2023 07:35:58 +0000 Subject: selftests/bpf: test a BPF CC writing app_limited Test whether a TCP CC implemented in BPF is allowed to write app_limited in struct tcp_sock. This is already allowed for the built-in TCP CC. Signed-off-by: Yixin Shen Link: https://lore.kernel.org/r/20230329073558.8136-3-bobankhshen@gmail.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c index 43447704cf0e..0724a79cec78 100644 --- a/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c +++ b/tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c @@ -16,6 +16,16 @@ static inline struct tcp_sock *tcp_sk(const struct sock *sk) return (struct tcp_sock *)sk; } +static inline unsigned int tcp_left_out(const struct tcp_sock *tp) +{ + return tp->sacked_out + tp->lost_out; +} + +static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) +{ + return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; +} + SEC("struct_ops/write_sk_pacing_init") void BPF_PROG(write_sk_pacing_init, struct sock *sk) { @@ -31,11 +41,12 @@ SEC("struct_ops/write_sk_pacing_cong_control") void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk, const struct rate_sample *rs) { - const struct tcp_sock *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned long rate = ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) / (tp->srtt_us ?: 1U << 3); sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate); + tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1; } SEC("struct_ops/write_sk_pacing_ssthresh") -- cgit v1.2.3-70-g09d2 From d6e6286a12e7b8a4ddc66237c4ccf6f531ef1c82 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Mar 2023 11:52:00 -0700 Subject: libbpf: disassociate section handler on explicit bpf_program__set_type() call If user explicitly overrides programs's type with bpf_program__set_type() API call, we need to disassociate whatever SEC_DEF handler libbpf determined initially based on program's SEC() definition, as it's not goind to be valid anymore and could lead to crashes and/or confusing failures. Also, fix up bpf_prog_test_load() helper in selftests/bpf, which is force-setting program type (even if that's completely unnecessary; this is quite a legacy piece of code), and thus should expect auto-attach to not work, yet one of the tests explicitly relies on auto-attach for testing. Instead, force-set program type only if it differs from the desired one. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230327185202.1929145-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 1 + tools/testing/selftests/bpf/testing_helpers.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 15737d7b5a28..49cd304ae3bc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8468,6 +8468,7 @@ int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) return libbpf_err(-EBUSY); prog->type = type; + prog->sec_def = NULL; return 0; } diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 6c44153755e6..ecfea13f938b 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -195,7 +195,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, goto err_out; } - if (type != BPF_PROG_TYPE_UNSPEC) + if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type) bpf_program__set_type(prog, type); flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32; -- cgit v1.2.3-70-g09d2 From b3c63d7ad81ad6f43921d59af18fc25c64327a74 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Mar 2023 11:52:01 -0700 Subject: veristat: add -d debug mode option to see debug libbpf log Add -d option to allow requesting libbpf debug logs from veristat. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230327185202.1929145-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 83231456d3c5..263df32fbda8 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -135,6 +135,7 @@ static struct env { char **filenames; int filename_cnt; bool verbose; + bool debug; bool quiet; int log_level; enum resfmt out_fmt; @@ -169,7 +170,7 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va { if (!env.verbose) return 0; - if (level == LIBBPF_DEBUG /* && !env.verbose */) + if (level == LIBBPF_DEBUG && !env.debug) return 0; return vfprintf(stderr, format, args); } @@ -186,6 +187,7 @@ static const struct argp_option opts[] = { { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" }, { "verbose", 'v', NULL, 0, "Verbose mode" }, { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, + { "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" }, { "quiet", 'q', NULL, 0, "Quiet mode" }, { "emit", 'e', "SPEC", 0, "Specify stats to be emitted" }, { "sort", 's', "SPEC", 0, "Specify sort order" }, @@ -212,6 +214,10 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case 'v': env.verbose = true; break; + case 'd': + env.debug = true; + env.verbose = true; + break; case 'q': env.quiet = true; break; -- cgit v1.2.3-70-g09d2 From fa7cc90620870e4444bb5184c08148495b1627c6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Mar 2023 11:52:02 -0700 Subject: veristat: guess and substitue underlying program type for freplace (EXT) progs SEC("freplace") (i.e., BPF_PROG_TYPE_EXT) programs are not loadable as is through veristat, as kernel expects actual program's FD during BPF_PROG_LOAD time, which veristat has no way of knowing. Unfortunately, freplace programs are a pretty important class of programs, especially when dealing with XDP chaining solutions, which rely on EXT programs. So let's do our best and teach veristat to try to guess the original program type, based on program's context argument type. And if guessing process succeeds, we manually override freplace/EXT with guessed program type using bpf_program__set_type() setter to increase chances of proper BPF verification. We rely on BTF and maintain a simple lookup table. This process is obviously not 100% bulletproof, as valid program might not use context and thus wouldn't have to specify correct type. Also, __sk_buff is very ambiguous and is the context type across many different program types. We pick BPF_PROG_TYPE_CGROUP_SKB for now, which seems to work fine in practice so far. Similarly, some program types require specifying attach type, and so we pick one out of possible few variants. Best effort at its best. But this makes veristat even more widely applicable. Signed-off-by: Andrii Nakryiko Tested-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230327185202.1929145-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 121 +++++++++++++++++++++++++++++++-- 1 file changed, 117 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 263df32fbda8..055df1abd7ca 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -778,7 +779,62 @@ static int parse_verif_log(char * const buf, size_t buf_sz, struct verif_stats * return 0; } -static void fixup_obj(struct bpf_object *obj) +static int guess_prog_type_by_ctx_name(const char *ctx_name, + enum bpf_prog_type *prog_type, + enum bpf_attach_type *attach_type) +{ + /* We need to guess program type based on its declared context type. + * This guess can't be perfect as many different program types might + * share the same context type. So we can only hope to reasonably + * well guess this and get lucky. + * + * Just in case, we support both UAPI-side type names and + * kernel-internal names. + */ + static struct { + const char *uapi_name; + const char *kern_name; + enum bpf_prog_type prog_type; + enum bpf_attach_type attach_type; + } ctx_map[] = { + /* __sk_buff is most ambiguous, for now we assume cgroup_skb */ + { "__sk_buff", "sk_buff", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS }, + { "bpf_sock", "sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND }, + { "bpf_sock_addr", "bpf_sock_addr_kern", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND }, + { "bpf_sock_ops", "bpf_sock_ops_kern", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS }, + { "sk_msg_md", "sk_msg", BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT }, + { "bpf_cgroup_dev_ctx", "bpf_cgroup_dev_ctx", BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE }, + { "bpf_sysctl", "bpf_sysctl_kern", BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_CGROUP_SYSCTL }, + { "bpf_sockopt", "bpf_sockopt_kern", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT }, + { "sk_reuseport_md", "sk_reuseport_kern", BPF_PROG_TYPE_SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE }, + { "bpf_sk_lookup", "bpf_sk_lookup_kern", BPF_PROG_TYPE_SK_LOOKUP, BPF_SK_LOOKUP }, + { "xdp_md", "xdp_buff", BPF_PROG_TYPE_XDP, BPF_XDP }, + /* tracing types with no expected attach type */ + { "bpf_user_pt_regs_t", "pt_regs", BPF_PROG_TYPE_KPROBE }, + { "bpf_perf_event_data", "bpf_perf_event_data_kern", BPF_PROG_TYPE_PERF_EVENT }, + /* raw_tp programs use u64[] from kernel side, we don't want + * to match on that, probably; so NULL for kern-side type + */ + { "bpf_raw_tracepoint_args", NULL, BPF_PROG_TYPE_RAW_TRACEPOINT }, + }; + int i; + + if (!ctx_name) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(ctx_map); i++) { + if (strcmp(ctx_map[i].uapi_name, ctx_name) == 0 || + (ctx_map[i].kern_name && strcmp(ctx_map[i].kern_name, ctx_name) == 0)) { + *prog_type = ctx_map[i].prog_type; + *attach_type = ctx_map[i].attach_type; + return 0; + } + } + + return -ESRCH; +} + +static void fixup_obj(struct bpf_object *obj, struct bpf_program *prog, const char *filename) { struct bpf_map *map; @@ -798,18 +854,75 @@ static void fixup_obj(struct bpf_object *obj) bpf_map__set_max_entries(map, 1); } } + + /* SEC(freplace) programs can't be loaded with veristat as is, + * but we can try guessing their target program's expected type by + * looking at the type of program's first argument and substituting + * corresponding program type + */ + if (bpf_program__type(prog) == BPF_PROG_TYPE_EXT) { + const struct btf *btf = bpf_object__btf(obj); + const char *prog_name = bpf_program__name(prog); + enum bpf_prog_type prog_type; + enum bpf_attach_type attach_type; + const struct btf_type *t; + const char *ctx_name; + int id; + + if (!btf) + goto skip_freplace_fixup; + + id = btf__find_by_name_kind(btf, prog_name, BTF_KIND_FUNC); + t = btf__type_by_id(btf, id); + t = btf__type_by_id(btf, t->type); + if (!btf_is_func_proto(t) || btf_vlen(t) != 1) + goto skip_freplace_fixup; + + /* context argument is a pointer to a struct/typedef */ + t = btf__type_by_id(btf, btf_params(t)[0].type); + while (t && btf_is_mod(t)) + t = btf__type_by_id(btf, t->type); + if (!t || !btf_is_ptr(t)) + goto skip_freplace_fixup; + t = btf__type_by_id(btf, t->type); + while (t && btf_is_mod(t)) + t = btf__type_by_id(btf, t->type); + if (!t) + goto skip_freplace_fixup; + + ctx_name = btf__name_by_offset(btf, t->name_off); + + if (guess_prog_type_by_ctx_name(ctx_name, &prog_type, &attach_type) == 0) { + bpf_program__set_type(prog, prog_type); + bpf_program__set_expected_attach_type(prog, attach_type); + + if (!env.quiet) { + printf("Using guessed program type '%s' for %s/%s...\n", + libbpf_bpf_prog_type_str(prog_type), + filename, prog_name); + } + } else { + if (!env.quiet) { + printf("Failed to guess program type for freplace program with context type name '%s' for %s/%s. Consider using canonical type names to help veristat...\n", + ctx_name, filename, prog_name); + } + } + } +skip_freplace_fixup: + return; } static int process_prog(const char *filename, struct bpf_object *obj, struct bpf_program *prog) { const char *prog_name = bpf_program__name(prog); + const char *base_filename = basename(filename); size_t buf_sz = sizeof(verif_log_buf); char *buf = verif_log_buf; struct verif_stats *stats; int err = 0; void *tmp; - if (!should_process_file_prog(basename(filename), bpf_program__name(prog))) { + if (!should_process_file_prog(base_filename, bpf_program__name(prog))) { env.progs_skipped++; return 0; } @@ -835,12 +948,12 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf verif_log_buf[0] = '\0'; /* increase chances of successful BPF object loading */ - fixup_obj(obj); + fixup_obj(obj, prog, base_filename); err = bpf_object__load(obj); env.progs_processed++; - stats->file_name = strdup(basename(filename)); + stats->file_name = strdup(base_filename); stats->prog_name = strdup(bpf_program__name(prog)); stats->stats[VERDICT] = err == 0; /* 1 - success, 0 - failure */ parse_verif_log(buf, buf_sz, stats); -- cgit v1.2.3-70-g09d2 From 4ca13d1002f37c10038ff4ed3cfdc70dbe049d60 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Tue, 28 Mar 2023 21:10:48 -0400 Subject: selftests/bpf: Rewrite two infinite loops in bound check cases The two infinite loops in bound check cases added by commit 1a3148fc171f ("selftests/bpf: Check when bounds are not in the 32-bit range") increased the execution time of test_verifier from about 6 seconds to about 9 seconds. Rewrite these two infinite loops to finite loops to get rid of this extra time cost. Signed-off-by: Xu Kuohai Link: https://lore.kernel.org/r/20230329011048.1721937-1-xukuohai@huaweicloud.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/verifier/bounds.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c index 74b1917d4208..43942ce8cf15 100644 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -784,22 +784,26 @@ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 13), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_2, 0x8000000000000fff), BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), /* r1 signed range is [S64_MIN, S64_MAX] */ - BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2), + BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), + + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "BPF program is too large", - .result = REJECT, + .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, }, { @@ -856,21 +860,25 @@ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 10), BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_2, 0x80000fff), BPF_MOV32_IMM(BPF_REG_0, 0x80000000), BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), + BPF_JMP32_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), /* r1 signed range is [S32_MIN, S32_MAX] */ - BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -2), + BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), + + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "BPF program is too large", - .result = REJECT, + .result = ACCEPT, .prog_type = BPF_PROG_TYPE_XDP, }, -- cgit v1.2.3-70-g09d2 From d816129530e77b905b492631651eb09a18789692 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 30 Mar 2023 12:01:15 -0700 Subject: veristat: change guess for __sk_buff from CGROUP_SKB to SCHED_CLS SCHED_CLS seems to be a better option as a default guess for freplace programs that have __sk_buff as a context type. Reported-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230330190115.3942962-1-andrii@kernel.org Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/veristat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 055df1abd7ca..7888c03ba631 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -798,7 +798,7 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name, enum bpf_attach_type attach_type; } ctx_map[] = { /* __sk_buff is most ambiguous, for now we assume cgroup_skb */ - { "__sk_buff", "sk_buff", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS }, + { "__sk_buff", "sk_buff", BPF_PROG_TYPE_SCHED_CLS }, { "bpf_sock", "sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND }, { "bpf_sock_addr", "bpf_sock_addr_kern", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND }, { "bpf_sock_ops", "bpf_sock_ops_kern", BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS }, -- cgit v1.2.3-70-g09d2 From 67efbd57bc6e57de276b964f023f8f947bc52460 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Thu, 30 Mar 2023 09:52:03 -0500 Subject: selftests/bpf: Add testcases for ptr_*_or_null_ in bpf_kptr_xchg The second argument of the bpf_kptr_xchg() helper function is ARG_PTR_TO_BTF_ID_OR_NULL. A recent patch fixed a bug whereby the verifier would fail with an internal error message if a program invoked the helper with a PTR_TO_BTF_ID | PTR_MAYBE_NULL register. This testcase adds some testcases to ensure that it fails gracefully moving forward. Before the fix, these testcases would have failed an error resembling the following: ; p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 99: (7b) *(u64 *)(r10 -16) = r7 ; frame1: ... 100: (bf) r1 = r10 ; frame1: ... 101: (07) r1 += -16 ; frame1: ... ; p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); 102: (85) call bpf_kfunc_call_test_acquire#13908 ; frame1: R0_w=ptr_or_null_prog_test_ref_kfunc... ; p = bpf_kptr_xchg(&v->ref_ptr, p); 103: (bf) r1 = r6 ; frame1: ... 104: (bf) r2 = r0 ; frame1: R0_w=ptr_or_null_prog_test_ref_kfunc... 105: (85) call bpf_kptr_xchg#194 verifier internal error: invalid PTR_TO_BTF_ID register for type match Signed-off-by: David Vernet Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230330145203.80506-2-void@manifault.com --- .../testing/selftests/bpf/progs/cpumask_failure.c | 25 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/map_kptr_fail.c | 23 ++++++++++++++++++++ 2 files changed, 48 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/cpumask_failure.c b/tools/testing/selftests/bpf/progs/cpumask_failure.c index db4f94e72b61..a9bf6ea336cf 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_failure.c +++ b/tools/testing/selftests/bpf/progs/cpumask_failure.c @@ -165,3 +165,28 @@ int BPF_PROG(test_global_mask_no_null_check, struct task_struct *task, u64 clone return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to helper arg2") +int BPF_PROG(test_global_mask_rcu_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *prev, *curr; + + curr = bpf_cpumask_create(); + if (!curr) + return 0; + + prev = bpf_kptr_xchg(&global_mask, curr); + if (prev) + bpf_cpumask_release(prev); + + bpf_rcu_read_lock(); + curr = global_mask; + /* PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU passed to bpf_kptr_xchg() */ + prev = bpf_kptr_xchg(&global_mask, curr); + bpf_rcu_read_unlock(); + if (prev) + bpf_cpumask_release(prev); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 08f9ec18c345..15bf3127dba3 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -20,6 +20,7 @@ struct array_map { } array_map SEC(".maps"); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; +extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; extern struct prog_test_ref_kfunc * bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; @@ -442,4 +443,26 @@ int kptr_get_ref_state(struct __sk_buff *ctx) return 0; } +SEC("?tc") +__failure __msg("Possibly NULL pointer passed to helper arg2") +int kptr_xchg_possibly_null(struct __sk_buff *ctx) +{ + struct prog_test_ref_kfunc *p; + struct map_value *v; + int key = 0; + + v = bpf_map_lookup_elem(&array_map, &key); + if (!v) + return 0; + + p = bpf_kfunc_call_test_acquire(&(unsigned long){0}); + + /* PTR_TO_BTF_ID | PTR_MAYBE_NULL passed to bpf_kptr_xchg() */ + p = bpf_kptr_xchg(&v->ref_ptr, p); + if (p) + bpf_kfunc_call_test_release(p); + + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 46e9acb7ae2a36643b8c43f5107c072cbbc48572 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 29 Mar 2023 17:24:53 +0200 Subject: selftests: rtnetlink: Fix do_test_address_proto() This selftest was introduced recently in the commit cited below. It misses several check_err() invocations to actually verify that the previous command succeeded. When these are added, the first one fails, because besides the addresses added by hand, there can be a link-local address added by the kernel. Adjust the check to expect at least three addresses instead of exactly three, and add the missing check_err's. Furthermore, the explanatory comments assume that the address with no protocol is $addr2, when in fact it is $addr3. Update the comments. Fixes: 6a414fd77f61 ("selftests: rtnetlink: Add an address proto test") Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Link: https://lore.kernel.org/r/53a579bc883e1bf2fe490d58427cf22c2d1aa21f.1680102695.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/rtnetlink.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 3b15c686c03f..383ac6fc037d 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -1302,19 +1302,23 @@ do_test_address_proto() count=$(address_count) check_err $? - (( count == 3 )) # $addr, $addr2 and $addr3 + (( count >= 3 )) # $addr, $addr2 and $addr3 plus any kernel addresses + check_err $? count=$(address_count proto 0) check_err $? - (( count == 1 )) # just $addr2 + (( count == 1 )) # just $addr3 + check_err $? count=$(address_count proto 0x11) check_err $? - (( count == 2 )) # $addr and $addr2 + (( count == 2 )) # $addr and $addr3 + check_err $? count=$(address_count proto 0xab) check_err $? - (( count == 1 )) # just $addr2 + (( count == 1 )) # just $addr3 + check_err $? ip address del dev "$devdummy" "$addr" ip address del dev "$devdummy" "$addr2" -- cgit v1.2.3-70-g09d2 From 7f3f86402609315d6b98f6c76713e07b5a9562aa Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 29 Mar 2023 11:54:53 +0200 Subject: selftests: tc-testing: add "depends_on" property to skip tests currently, users can skip individual test cases by means of writing "skip": "yes" in the scenario file. Extend this functionality, introducing 'dependsOn': it's optional property like "skip", but the value contains a command (for example, a probe on iproute2 to check if it supports a specific feature). If such property is present, tdc executes that command and skips the test when the return value is non-zero. Reviewed-by: Pedro Tammela Signed-off-by: Davide Caratti Signed-off-by: Jakub Kicinski --- .../tc-testing/creating-testcases/AddingTestCases.txt | 2 ++ tools/testing/selftests/tc-testing/tdc.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt index a28571aff0e1..ff956d8c99c5 100644 --- a/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt +++ b/tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt @@ -38,6 +38,8 @@ skip: A completely optional key, if the corresponding value is "yes" this test case will still appear in the results output but marked as skipped. This key can be placed anywhere inside the test case at the top level. +dependsOn: Same as 'skip', but the value is executed as a command. The test + is skipped when the command returns non-zero. category: A list of single-word descriptions covering what the command under test is testing. Example: filter, actions, u32, gact, etc. setup: The list of commands required to ensure the command under test diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index 7bd94f8e490a..b98256f38447 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -369,6 +369,19 @@ def run_one_test(pm, args, index, tidx): pm.call_post_execute() return res + if 'dependsOn' in tidx: + if (args.verbose > 0): + print('probe command for test skip') + (p, procout) = exec_cmd(args, pm, 'execute', tidx['dependsOn']) + if p: + if (p.returncode != 0): + res = TestResult(tidx['id'], tidx['name']) + res.set_result(ResultState.skip) + res.set_errormsg('probe command: test skipped.') + pm.call_pre_case(tidx, test_skip=True) + pm.call_post_execute() + return res + # populate NAMES with TESTID for this test NAMES['TESTID'] = tidx['id'] -- cgit v1.2.3-70-g09d2 From b8617f8eed84e12c26f2220733280187466a0615 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 29 Mar 2023 11:54:54 +0200 Subject: selftests: tc-testing: add tunnel_key "nofrag" test case # ./tdc.py -e 6bda -l 6bda: (actions, tunnel_key) Add tunnel_key action with nofrag option Acked-by: Jamal Hadi Salim Signed-off-by: Davide Caratti Signed-off-by: Jakub Kicinski --- .../tc-testing/tc-tests/actions/tunnel_key.json | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json index b40ee602918a..b5b47fbf6c00 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json @@ -983,5 +983,30 @@ "teardown": [ "$TC actions flush action tunnel_key" ] + }, + { + "id": "6bda", + "name": "Add tunnel_key action with nofrag option", + "category": [ + "actions", + "tunnel_key" + ], + "dependsOn": "$TC actions add action tunnel_key help 2>&1 | grep -q nofrag", + "setup": [ + [ + "$TC action flush action tunnel_key", + 0, + 1, + 255 + ] + ], + "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 1111 nofrag index 222", + "expExitCode": "0", + "verifyCmd": "$TC actions get action tunnel_key index 222", + "matchPattern": "action order [0-9]+: tunnel_key.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 1111.*csum.*nofrag pipe.*index 222", + "matchCount": "1", + "teardown": [ + "$TC actions flush action tunnel_key" + ] } ] -- cgit v1.2.3-70-g09d2 From 533a89b1940f527c6819f046e2aefb60df8a13d3 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 29 Mar 2023 11:54:55 +0200 Subject: selftests: forwarding: add tunnel_key "nofrag" test case Add a selftest that configures metadata tunnel encapsulation using the TC "tunnel_key" action: it includes a test case for setting "nofrag" flag. Example output: # selftests: net/forwarding: tc_tunnel_key.sh # TEST: tunnel_key nofrag (skip_hw) [ OK ] # INFO: Could not test offloaded functionality ok 1 selftests: net/forwarding: tc_tunnel_key.sh Signed-off-by: Davide Caratti Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/forwarding/Makefile | 1 + .../selftests/net/forwarding/tc_tunnel_key.sh | 161 +++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/tc_tunnel_key.sh (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile index 91201ab3c4fc..236f6b796a52 100644 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -85,6 +85,7 @@ TEST_PROGS = bridge_igmp.sh \ tc_mpls_l2vpn.sh \ tc_police.sh \ tc_shblocks.sh \ + tc_tunnel_key.sh \ tc_vlan_modify.sh \ vxlan_asymmetric_ipv6.sh \ vxlan_asymmetric.sh \ diff --git a/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh b/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh new file mode 100755 index 000000000000..5ac184d51809 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/tc_tunnel_key.sh @@ -0,0 +1,161 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +ALL_TESTS="tunnel_key_nofrag_test" + +NUM_NETIFS=4 +source tc_common.sh +source lib.sh + +tcflags="skip_hw" + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 + forwarding_enable + mtu_set $h1 1500 + tunnel_create h1-et vxlan 192.0.2.1 192.0.2.2 dev $h1 dstport 0 external + tc qdisc add dev h1-et clsact + mtu_set h1-et 1230 + mtu_restore $h1 + mtu_set $h1 1000 +} + +h1_destroy() +{ + tc qdisc del dev h1-et clsact + tunnel_destroy h1-et + forwarding_restore + mtu_restore $h1 + simple_if_fini $h1 192.0.2.1/24 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/24 +} + +h2_destroy() +{ + simple_if_fini $h2 192.0.2.2/24 +} + +switch_create() +{ + simple_if_init $swp1 192.0.2.2/24 + tc qdisc add dev $swp1 clsact + simple_if_init $swp2 192.0.2.1/24 +} + +switch_destroy() +{ + simple_if_fini $swp2 192.0.2.1/24 + tc qdisc del dev $swp1 clsact + simple_if_fini $swp1 192.0.2.2/24 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + h1mac=$(mac_get $h1) + h2mac=$(mac_get $h2) + + swp1origmac=$(mac_get $swp1) + swp2origmac=$(mac_get $swp2) + ip link set $swp1 address $h2mac + ip link set $swp2 address $h1mac + + vrf_prepare + + h1_create + h2_create + switch_create + + if ! tc action add action tunnel_key help 2>&1 | grep -q nofrag; then + log_test "SKIP: iproute doesn't support nofrag" + exit $ksft_skip + fi +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h2_destroy + h1_destroy + + vrf_cleanup + + ip link set $swp2 address $swp2origmac + ip link set $swp1 address $swp1origmac +} + +tunnel_key_nofrag_test() +{ + RET=0 + local i + + tc filter add dev $swp1 ingress protocol ip pref 100 handle 100 \ + flower ip_flags nofrag action drop + tc filter add dev $swp1 ingress protocol ip pref 101 handle 101 \ + flower ip_flags firstfrag action drop + tc filter add dev $swp1 ingress protocol ip pref 102 handle 102 \ + flower ip_flags nofirstfrag action drop + + # test 'nofrag' set + tc filter add dev h1-et egress protocol all pref 1 handle 1 matchall $tcflags \ + action tunnel_key set src_ip 192.0.2.1 dst_ip 192.0.2.2 id 42 nofrag index 10 + $MZ h1-et -c 1 -p 930 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q + tc_check_packets "dev $swp1 ingress" 100 1 + check_err $? "packet smaller than MTU was not tunneled" + + $MZ h1-et -c 1 -p 931 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q + tc_check_packets "dev $swp1 ingress" 100 1 + check_err $? "packet bigger than MTU matched nofrag (nofrag was set)" + tc_check_packets "dev $swp1 ingress" 101 0 + check_err $? "packet bigger than MTU matched firstfrag (nofrag was set)" + tc_check_packets "dev $swp1 ingress" 102 0 + check_err $? "packet bigger than MTU matched nofirstfrag (nofrag was set)" + + # test 'nofrag' cleared + tc actions change action tunnel_key set src_ip 192.0.2.1 dst_ip 192.0.2.2 id 42 index 10 + $MZ h1-et -c 1 -p 931 -a 00:aa:bb:cc:dd:ee -b 00:ee:dd:cc:bb:aa -t ip -q + tc_check_packets "dev $swp1 ingress" 100 1 + check_err $? "packet bigger than MTU matched nofrag (nofrag was unset)" + tc_check_packets "dev $swp1 ingress" 101 1 + check_err $? "packet bigger than MTU didn't match firstfrag (nofrag was unset) " + tc_check_packets "dev $swp1 ingress" 102 1 + check_err $? "packet bigger than MTU didn't match nofirstfrag (nofrag was unset) " + + for i in 100 101 102; do + tc filter del dev $swp1 ingress protocol ip pref $i handle $i flower + done + tc filter del dev h1-et egress pref 1 handle 1 matchall + + log_test "tunnel_key nofrag ($tcflags)" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +tc_offload_check +if [[ $? -ne 0 ]]; then + log_info "Could not test offloaded functionality" +else + tcflags="skip_sw" + tests_run +fi + +exit $EXIT_STATUS -- cgit v1.2.3-70-g09d2 From 328bafc9a373da5f268d82533dd8f2e66526d168 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 31 Mar 2023 11:31:55 +0200 Subject: selftests/bpf: Add err.h header Moving error macros from profiler.inc.h to new err.h header. It will be used in following changes. Also adding PTR_ERR macro that will be used in following changes. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20230331093157.1749137-2-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/err.h | 18 ++++++++++++++++++ tools/testing/selftests/bpf/progs/profiler.inc.h | 3 +-- 2 files changed, 19 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/err.h (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/err.h b/tools/testing/selftests/bpf/progs/err.h new file mode 100644 index 000000000000..d66d283d9e59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/err.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ERR_H__ +#define __ERR_H__ + +#define MAX_ERRNO 4095 +#define IS_ERR_VALUE(x) (unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO + +static inline int IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + +static inline long PTR_ERR(const void *ptr) +{ + return (long) ptr; +} + +#endif /* __ERR_H__ */ diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index 875513866032..f799d87e8700 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -6,6 +6,7 @@ #include #include "profiler.h" +#include "err.h" #ifndef NULL #define NULL 0 @@ -16,7 +17,6 @@ #define O_DIRECTORY 00200000 #define __O_TMPFILE 020000000 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY) -#define MAX_ERRNO 4095 #define S_IFMT 00170000 #define S_IFSOCK 0140000 #define S_IFLNK 0120000 @@ -34,7 +34,6 @@ #define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK) #define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO) #define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK) -#define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO #define KILL_DATA_ARRAY_SIZE 8 -- cgit v1.2.3-70-g09d2 From 88dc8b3605b38a440fba45edcc53a6c7a98eee3b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 31 Mar 2023 11:31:56 +0200 Subject: selftests/bpf: Add read_build_id function Adding read_build_id function that parses out build id from specified binary. It will replace extract_build_id and also be used in following changes. Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20230331093157.1749137-3-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/trace_helpers.c | 82 +++++++++++++++++++++++++++++ tools/testing/selftests/bpf/trace_helpers.h | 5 ++ 2 files changed, 87 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 934bf28fc888..9b070cdf44ac 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -11,6 +11,9 @@ #include #include #include "trace_helpers.h" +#include +#include +#include #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe" #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe" @@ -234,3 +237,82 @@ ssize_t get_rel_offset(uintptr_t addr) fclose(f); return -EINVAL; } + +static int +parse_build_id_buf(const void *note_start, Elf32_Word note_size, char *build_id) +{ + Elf32_Word note_offs = 0; + + while (note_offs + sizeof(Elf32_Nhdr) < note_size) { + Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); + + if (nhdr->n_type == 3 && nhdr->n_namesz == sizeof("GNU") && + !strcmp((char *)(nhdr + 1), "GNU") && nhdr->n_descsz > 0 && + nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { + memcpy(build_id, note_start + note_offs + + ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), nhdr->n_descsz); + memset(build_id + nhdr->n_descsz, 0, BPF_BUILD_ID_SIZE - nhdr->n_descsz); + return (int) nhdr->n_descsz; + } + + note_offs = note_offs + sizeof(Elf32_Nhdr) + + ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); + } + + return -ENOENT; +} + +/* Reads binary from *path* file and returns it in the *build_id* buffer + * with *size* which is expected to be at least BPF_BUILD_ID_SIZE bytes. + * Returns size of build id on success. On error the error value is + * returned. + */ +int read_build_id(const char *path, char *build_id, size_t size) +{ + int fd, err = -EINVAL; + Elf *elf = NULL; + GElf_Ehdr ehdr; + size_t max, i; + + if (size < BPF_BUILD_ID_SIZE) + return -EINVAL; + + fd = open(path, O_RDONLY | O_CLOEXEC); + if (fd < 0) + return -errno; + + (void)elf_version(EV_CURRENT); + + elf = elf_begin(fd, ELF_C_READ_MMAP, NULL); + if (!elf) + goto out; + if (elf_kind(elf) != ELF_K_ELF) + goto out; + if (!gelf_getehdr(elf, &ehdr)) + goto out; + + for (i = 0; i < ehdr.e_phnum; i++) { + GElf_Phdr mem, *phdr; + char *data; + + phdr = gelf_getphdr(elf, i, &mem); + if (!phdr) + goto out; + if (phdr->p_type != PT_NOTE) + continue; + data = elf_rawfile(elf, &max); + if (!data) + goto out; + if (phdr->p_offset + phdr->p_memsz > max) + goto out; + err = parse_build_id_buf(data + phdr->p_offset, phdr->p_memsz, build_id); + if (err > 0) + break; + } + +out: + if (elf) + elf_end(elf); + close(fd); + return err; +} diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index 53efde0e2998..876f3e711df6 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -4,6 +4,9 @@ #include +#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) +#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) + struct ksym { long addr; char *name; @@ -23,4 +26,6 @@ void read_trace_pipe(void); ssize_t get_uprobe_offset(const void *addr); ssize_t get_rel_offset(uintptr_t addr); +int read_build_id(const char *path, char *build_id, size_t size); + #endif -- cgit v1.2.3-70-g09d2 From dcc46f51d770bde625e4845cac42e808b3302b62 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 31 Mar 2023 11:31:57 +0200 Subject: selftests/bpf: Replace extract_build_id with read_build_id Replacing extract_build_id with read_build_id that parses out build id directly from elf without using readelf tool. Acked-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20230331093157.1749137-4-jolsa@kernel.org Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/stacktrace_build_id.c | 19 ++++++---------- .../bpf/prog_tests/stacktrace_build_id_nmi.c | 17 ++++++--------- tools/testing/selftests/bpf/test_progs.c | 25 ---------------------- tools/testing/selftests/bpf/test_progs.h | 1 - 4 files changed, 13 insertions(+), 49 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c index 9ad09a6c538a..b7ba5cd47d96 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -7,13 +7,12 @@ void test_stacktrace_build_id(void) int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; struct test_stacktrace_build_id *skel; - int err, stack_trace_len; + int err, stack_trace_len, build_id_size; __u32 key, prev_key, val, duration = 0; - char buf[256]; - int i, j; + char buf[BPF_BUILD_ID_SIZE]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; int build_id_matches = 0; - int retry = 1; + int i, retry = 1; retry: skel = test_stacktrace_build_id__open_and_load(); @@ -52,9 +51,10 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = extract_build_id(buf, 256); + build_id_size = read_build_id("urandom_read", buf, sizeof(buf)); + err = build_id_size < 0 ? build_id_size : 0; - if (CHECK(err, "get build_id with readelf", + if (CHECK(err, "read_build_id", "err %d errno %d\n", err, errno)) goto cleanup; @@ -64,8 +64,6 @@ retry: goto cleanup; do { - char build_id[64]; - err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); if (CHECK(err, "lookup_elem from stackmap", "err %d, errno %d\n", err, errno)) @@ -73,10 +71,7 @@ retry: for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && id_offs[i].offset != 0) { - for (j = 0; j < 20; ++j) - sprintf(build_id + 2 * j, "%02x", - id_offs[i].build_id[j] & 0xff); - if (strstr(buf, build_id) != NULL) + if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0) build_id_matches = 1; } prev_key = key; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index f4ea1a215ce4..47558b0d7f66 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -28,11 +28,10 @@ void test_stacktrace_build_id_nmi(void) .config = PERF_COUNT_HW_CPU_CYCLES, }; __u32 key, prev_key, val, duration = 0; - char buf[256]; - int i, j; + char buf[BPF_BUILD_ID_SIZE]; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; - int build_id_matches = 0; - int retry = 1; + int build_id_matches = 0, build_id_size; + int i, retry = 1; attr.sample_freq = read_perf_max_sample_freq(); @@ -94,7 +93,8 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = extract_build_id(buf, 256); + build_id_size = read_build_id("urandom_read", buf, sizeof(buf)); + err = build_id_size < 0 ? build_id_size : 0; if (CHECK(err, "get build_id with readelf", "err %d errno %d\n", err, errno)) @@ -106,8 +106,6 @@ retry: goto cleanup; do { - char build_id[64]; - err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key), id_offs, sizeof(id_offs), 0); if (CHECK(err, "lookup_elem from stackmap", @@ -116,10 +114,7 @@ retry: for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && id_offs[i].offset != 0) { - for (j = 0; j < 20; ++j) - sprintf(build_id + 2 * j, "%02x", - id_offs[i].build_id[j] & 0xff); - if (strstr(buf, build_id) != NULL) + if (memcmp(buf, id_offs[i].build_id, build_id_size) == 0) build_id_matches = 1; } prev_key = key; diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index d903e6a72a96..ea82921110da 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -629,31 +629,6 @@ out: return err; } -int extract_build_id(char *build_id, size_t size) -{ - FILE *fp; - char *line = NULL; - size_t len = 0; - - fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r"); - if (fp == NULL) - return -1; - - if (getline(&line, &len, fp) == -1) - goto err; - pclose(fp); - - if (len > size) - len = size; - memcpy(build_id, line, len); - build_id[len] = '\0'; - free(line); - return 0; -err: - pclose(fp); - return -1; -} - static int finit_module(int fd, const char *param_values, int flags) { return syscall(__NR_finit_module, fd, param_values, flags); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 4b06b8347cd4..10ba43250668 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -405,7 +405,6 @@ static inline void *u64_to_ptr(__u64 ptr) int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); -int extract_build_id(char *build_id, size_t size); int kern_sync_rcu(void); int trigger_module_test_read(int read_sz); int trigger_module_test_write(int write_sz); -- cgit v1.2.3-70-g09d2 From 9af0f555ae4add25f0950753fb410c509aa71f50 Mon Sep 17 00:00:00 2001 From: James Hilliard Date: Fri, 31 Mar 2023 01:58:42 -0600 Subject: selftests/bpf: Fix conflicts with built-in functions in bench_local_storage_create The fork function in gcc is considered a built in function due to being used by libgcov when building with gnu extensions. Rename fork to sched_process_fork to prevent this conflict. See details: https://github.com/gcc-mirror/gcc/commit/d1c38823924506d389ca58d02926ace21bdf82fa https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82457 Fixes the following error: In file included from progs/bench_local_storage_create.c:6: progs/bench_local_storage_create.c:43:14: error: conflicting types for built-in function 'fork'; expected 'int(void)' [-Werror=builtin-declaration-mismatch] 43 | int BPF_PROG(fork, struct task_struct *parent, struct task_struct *child) | ^~~~ Fixes: cbe9d93d58b1 ("selftests/bpf: Add bench for task storage creation") Signed-off-by: James Hilliard Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230331075848.1642814-1-james.hilliard1@gmail.com --- tools/testing/selftests/bpf/benchs/bench_local_storage_create.c | 2 +- tools/testing/selftests/bpf/progs/bench_local_storage_create.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c index abb0321d4f34..cff703f90e95 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c @@ -95,7 +95,7 @@ static void setup(void) exit(1); } } else { - if (!bpf_program__attach(skel->progs.fork)) { + if (!bpf_program__attach(skel->progs.sched_process_fork)) { fprintf(stderr, "Error attaching bpf program\n"); exit(1); } diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c index 7c851c9d5e47..e4bfbba6c193 100644 --- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c @@ -40,7 +40,7 @@ int BPF_PROG(kmalloc, unsigned long call_site, const void *ptr, } SEC("tp_btf/sched_process_fork") -int BPF_PROG(fork, struct task_struct *parent, struct task_struct *child) +int BPF_PROG(sched_process_fork, struct task_struct *parent, struct task_struct *child) { struct storage *stg; -- cgit v1.2.3-70-g09d2 From 3ed85ae80283885ef8491d07cdcd7124328bed35 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Mar 2023 15:24:02 -0700 Subject: veristat: relicense veristat.c as dual GPL-2.0-only or BSD-2-Clause licensed Dual-license veristat.c to dual GPL-2.0-only or BSD-2-Clause license. This is needed to mirror it to Github to make it convenient for distro packagers to package veristat as a separate package. Veristat grew into a useful tool by itself, and there are already a bunch of users relying on veristat as generic BPF loading and verification helper tool. So making it easy to packagers by providing Github mirror just like we do for bpftool and libbpf is the next step to get veristat into the hands of users. Apart from few typo fixes, I'm the sole contributor to veristat.c so far, so no extra Acks should be needed for relicensing. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230331222405.3468634-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 7888c03ba631..612ca52c6fba 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #define _GNU_SOURCE #include -- cgit v1.2.3-70-g09d2 From 71c8c39f517787af19d9f35fe60463f7eec914e1 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Mar 2023 15:24:03 -0700 Subject: veristat: improve version reporting For packaging version of the tool is important, so add a simple way to specify veristat version for upstream mirror at Github. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230331222405.3468634-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 612ca52c6fba..daac72b76508 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -140,6 +140,7 @@ static struct env { bool quiet; int log_level; enum resfmt out_fmt; + bool show_version; bool comparison_mode; bool replay_mode; @@ -176,16 +177,22 @@ static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va return vfprintf(stderr, format, args); } -const char *argp_program_version = "veristat"; +#ifndef VERISTAT_VERSION +#define VERISTAT_VERSION "" +#endif + +const char *argp_program_version = "veristat v" VERISTAT_VERSION; const char *argp_program_bug_address = ""; const char argp_program_doc[] = "veristat BPF verifier stats collection and comparison tool.\n" "\n" "USAGE: veristat [...]\n" -" OR: veristat -C \n"; +" OR: veristat -C \n" +" OR: veristat -R \n"; static const struct argp_option opts[] = { { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" }, + { "version", 'V', NULL, 0, "Print version" }, { "verbose", 'v', NULL, 0, "Verbose mode" }, { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, { "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" }, @@ -212,6 +219,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case 'h': argp_state_help(state, stderr, ARGP_HELP_STD_HELP); break; + case 'V': + env.show_version = true; + break; case 'v': env.verbose = true; break; @@ -1991,6 +2001,11 @@ int main(int argc, char **argv) if (argp_parse(&argp, argc, argv, 0, NULL, NULL)) return 1; + if (env.show_version) { + printf("%s\n", argp_program_version); + return 0; + } + if (env.verbose && env.quiet) { fprintf(stderr, "Verbose and quiet modes are incompatible, please specify just one or neither!\n\n"); argp_help(&argp, stderr, ARGP_HELP_USAGE, "veristat"); -- cgit v1.2.3-70-g09d2 From e3b65c0c1a5b8ed06818b7eeb0c44165ea817d52 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Mar 2023 15:24:04 -0700 Subject: veristat: avoid using kernel-internal headers Drop linux/compiler.h include, which seems to be needed for ARRAY_SIZE macro only. Redefine own version of ARRAY_SIZE instead. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230331222405.3468634-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index daac72b76508..e592d05bccb2 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -4,7 +4,6 @@ #include #include #include -#include #include #include #include @@ -20,6 +19,10 @@ #include #include +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + enum stat_id { VERDICT, DURATION, -- cgit v1.2.3-70-g09d2 From ebf390c9d0136e01f327439c012ab5741971e72d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Mar 2023 15:24:05 -0700 Subject: veristat: small fixed found in -O2 mode Fix few potentially unitialized variables uses, found while building veristat.c in release (-O2) mode. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230331222405.3468634-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/veristat.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index e592d05bccb2..53d7ec168268 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -810,7 +810,7 @@ static int guess_prog_type_by_ctx_name(const char *ctx_name, enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; } ctx_map[] = { - /* __sk_buff is most ambiguous, for now we assume cgroup_skb */ + /* __sk_buff is most ambiguous, we assume TC program */ { "__sk_buff", "sk_buff", BPF_PROG_TYPE_SCHED_CLS }, { "bpf_sock", "sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND }, { "bpf_sock_addr", "bpf_sock_addr_kern", BPF_PROG_TYPE_CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND }, @@ -1045,6 +1045,7 @@ static int process_obj(const char *filename) goto cleanup; } + lprog = NULL; bpf_object__for_each_program(tprog, tobj) { const char *tprog_name = bpf_program__name(tprog); @@ -1855,6 +1856,7 @@ static int handle_comparison_mode(void) one_more_time: output_comp_headers(cur_fmt); + last_idx = -1; for (i = 0; i < env.join_stat_cnt; i++) { const struct verif_stats_join *join = &env.join_stats[i]; -- cgit v1.2.3-70-g09d2 From d02c48fa113953aba0b330ec6c35f50c7d1d7986 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 31 Mar 2023 14:57:31 -0500 Subject: bpf: Make struct task_struct an RCU-safe type struct task_struct objects are a bit interesting in terms of how their lifetime is protected by refcounts. task structs have two refcount fields: 1. refcount_t usage: Protects the memory backing the task struct. When this refcount drops to 0, the task is immediately freed, without waiting for an RCU grace period to elapse. This is the field that most callers in the kernel currently use to ensure that a task remains valid while it's being referenced, and is what's currently tracked with bpf_task_acquire() and bpf_task_release(). 2. refcount_t rcu_users: A refcount field which, when it drops to 0, schedules an RCU callback that drops a reference held on the 'usage' field above (which is acquired when the task is first created). This field therefore provides a form of RCU protection on the task by ensuring that at least one 'usage' refcount will be held until an RCU grace period has elapsed. The qualifier "a form of" is important here, as a task can remain valid after task->rcu_users has dropped to 0 and the subsequent RCU gp has elapsed. In terms of BPF, we want to use task->rcu_users to protect tasks that function as referenced kptrs, and to allow tasks stored as referenced kptrs in maps to be accessed with RCU protection. Let's first determine whether we can safely use task->rcu_users to protect tasks stored in maps. All of the bpf_task* kfuncs can only be called from tracepoint, struct_ops, or BPF_PROG_TYPE_SCHED_CLS, program types. For tracepoint and struct_ops programs, the struct task_struct passed to a program handler will always be trusted, so it will always be safe to call bpf_task_acquire() with any task passed to a program. Note, however, that we must update bpf_task_acquire() to be KF_RET_NULL, as it is possible that the task has exited by the time the program is invoked, even if the pointer is still currently valid because the main kernel holds a task->usage refcount. For BPF_PROG_TYPE_SCHED_CLS, tasks should never be passed as an argument to the any program handlers, so it should not be relevant. The second question is whether it's safe to use RCU to access a task that was acquired with bpf_task_acquire(), and stored in a map. Because bpf_task_acquire() now uses task->rcu_users, it follows that if the task is present in the map, that it must have had at least one task->rcu_users refcount by the time the current RCU cs was started. Therefore, it's safe to access that task until the end of the current RCU cs. With all that said, this patch makes struct task_struct is an RCU-protected object. In doing so, we also change bpf_task_acquire() to be KF_ACQUIRE | KF_RCU | KF_RET_NULL, and adjust any selftests as necessary. A subsequent patch will remove bpf_task_kptr_get(), and bpf_task_acquire_not_zero() respectively. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230331195733.699708-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 11 +-- kernel/bpf/verifier.c | 1 + .../testing/selftests/bpf/prog_tests/task_kfunc.c | 1 + .../selftests/bpf/progs/task_kfunc_common.h | 5 ++ .../selftests/bpf/progs/task_kfunc_failure.c | 80 +++++++++++++++++++--- .../selftests/bpf/progs/task_kfunc_success.c | 26 ++++++- 6 files changed, 108 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 8980f6859443..e71a4a54ce99 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2013,7 +2014,9 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) */ __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) { - return get_task_struct(p); + if (refcount_inc_not_zero(&p->rcu_users)) + return p; + return NULL; } /** @@ -2089,7 +2092,7 @@ __bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) */ __bpf_kfunc void bpf_task_release(struct task_struct *p) { - put_task_struct(p); + put_task_struct_rcu_user(p); } #ifdef CONFIG_CGROUPS @@ -2199,7 +2202,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) rcu_read_lock(); p = find_task_by_pid_ns(pid, &init_pid_ns); if (p) - bpf_task_acquire(p); + p = bpf_task_acquire(p); rcu_read_unlock(); return p; @@ -2371,7 +2374,7 @@ BTF_ID_FLAGS(func, bpf_list_push_front) BTF_ID_FLAGS(func, bpf_list_push_back) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 52738f9dcb15..92ae4e8ab87b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4600,6 +4600,7 @@ BTF_SET_START(rcu_protected_types) BTF_ID(struct, prog_test_ref_kfunc) BTF_ID(struct, cgroup) BTF_ID(struct, bpf_cpumask) +BTF_ID(struct, task_struct) BTF_SET_END(rcu_protected_types) static bool rcu_protected_object(const struct btf *btf, u32 btf_id) diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index f79fa5bc9a8d..330133ece3f6 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -78,6 +78,7 @@ static const char * const success_tests[] = { "test_task_from_pid_arg", "test_task_from_pid_current", "test_task_from_pid_invalid", + "task_kfunc_acquire_trusted_walked", }; void test_task_kfunc(void) diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index 4c2a4b0e3a25..bf0d1da9aff8 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -24,6 +24,8 @@ struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym; void bpf_task_release(struct task_struct *p) __ksym; struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) { @@ -60,6 +62,9 @@ static inline int tasks_kfunc_map_insert(struct task_struct *p) } acquired = bpf_task_acquire(p); + if (!acquired) + return -ENOENT; + old = bpf_kptr_xchg(&v->task, acquired); if (old) { bpf_task_release(old); diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 2c374a7ffece..63aef547da87 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -40,6 +40,9 @@ int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_f /* Can't invoke bpf_task_acquire() on an untrusted pointer. */ acquired = bpf_task_acquire(v->task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; @@ -53,38 +56,49 @@ int BPF_PROG(task_kfunc_acquire_fp, struct task_struct *task, u64 clone_flags) /* Can't invoke bpf_task_acquire() on a random frame pointer. */ acquired = bpf_task_acquire((struct task_struct *)&stack_task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; } SEC("kretprobe/free_task") -__failure __msg("reg type unsupported for arg#0 function") +__failure __msg("calling kernel function bpf_task_acquire is not allowed") int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; + /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ acquired = bpf_task_acquire(task); - /* Can't release a bpf_task_acquire()'d task without a NULL check. */ + if (!acquired) + return 0; bpf_task_release(acquired); return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("R1 must be referenced or trusted") -int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) +SEC("kretprobe/free_task") +__failure __msg("calling kernel function bpf_task_acquire is not allowed") +int BPF_PROG(task_kfunc_acquire_unsafe_kretprobe_rcu, struct task_struct *task, u64 clone_flags) { struct task_struct *acquired; - /* Can't invoke bpf_task_acquire() on a trusted pointer obtained from walking a struct. */ - acquired = bpf_task_acquire(task->group_leader); - bpf_task_release(acquired); + bpf_rcu_read_lock(); + if (!task) { + bpf_rcu_read_unlock(); + return 0; + } + /* Can't call bpf_task_acquire() or bpf_task_release() in an untrusted prog. */ + acquired = bpf_task_acquire(task); + if (acquired) + bpf_task_release(acquired); + bpf_rcu_read_unlock(); return 0; } - SEC("tp_btf/task_newtask") __failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_acquire_null, struct task_struct *task, u64 clone_flags) @@ -137,6 +151,8 @@ int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clo struct task_struct *kptr, *acquired; acquired = bpf_task_acquire(task); + if (!acquired) + return 0; /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */ kptr = bpf_task_kptr_get(&acquired); @@ -185,6 +201,19 @@ int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_fla return 0; } +SEC("tp_btf/task_newtask") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + acquired = bpf_task_acquire(task); + /* Can't invoke bpf_task_release() on an acquired task without a NULL check. */ + bpf_task_release(acquired); + + return 0; +} + SEC("tp_btf/task_newtask") __failure __msg("Unreleased reference") int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) @@ -256,12 +285,13 @@ int BPF_PROG(task_kfunc_release_null, struct task_struct *task, u64 clone_flags) return -ENOENT; acquired = bpf_task_acquire(task); + if (!acquired) + return -EEXIST; old = bpf_kptr_xchg(&v->task, acquired); /* old cannot be passed to bpf_task_release() without a NULL check. */ bpf_task_release(old); - bpf_task_release(old); return 0; } @@ -298,6 +328,9 @@ int BPF_PROG(task_kfunc_from_lsm_task_free, struct task_struct *task) /* the argument of lsm task_free hook is untrusted. */ acquired = bpf_task_acquire(task); + if (!acquired) + return 0; + bpf_task_release(acquired); return 0; } @@ -337,3 +370,30 @@ int BPF_PROG(task_access_comm4, struct task_struct *task, const char *buf, bool bpf_strncmp(task->comm, 16, "foo"); return 0; } + +SEC("tp_btf/task_newtask") +__failure __msg("R1 must be referenced or trusted") +int BPF_PROG(task_kfunc_release_in_map, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *local; + struct __tasks_kfunc_map_value *v; + + if (tasks_kfunc_map_insert(task)) + return 0; + + v = tasks_kfunc_map_value_lookup(task); + if (!v) + return 0; + + bpf_rcu_read_lock(); + local = v->task; + if (!local) { + bpf_rcu_read_unlock(); + return 0; + } + /* Can't release a kptr that's still stored in a map. */ + bpf_task_release(local); + bpf_rcu_read_unlock(); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index cfa7f12b84e8..a75304a5e860 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -47,7 +47,10 @@ static int test_acquire_release(struct task_struct *task) } acquired = bpf_task_acquire(task); - bpf_task_release(acquired); + if (acquired) + bpf_task_release(acquired); + else + err = 6; return 0; } @@ -166,7 +169,10 @@ int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 cl current = bpf_get_current_task_btf(); acquired = bpf_task_acquire(current); - bpf_task_release(acquired); + if (acquired) + bpf_task_release(acquired); + else + err = 1; return 0; } @@ -241,3 +247,19 @@ int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_fla return 0; } + +SEC("tp_btf/task_newtask") +int BPF_PROG(task_kfunc_acquire_trusted_walked, struct task_struct *task, u64 clone_flags) +{ + struct task_struct *acquired; + + /* task->group_leader is listed as a trusted, non-NULL field of task struct. */ + acquired = bpf_task_acquire(task->group_leader); + if (acquired) + bpf_task_release(acquired); + else + err = 1; + + + return 0; +} -- cgit v1.2.3-70-g09d2 From f85671c6ef46d490a90dac719e0c0e0adbacfd9b Mon Sep 17 00:00:00 2001 From: David Vernet Date: Fri, 31 Mar 2023 14:57:32 -0500 Subject: bpf: Remove now-defunct task kfuncs In commit 22df776a9a86 ("tasks: Extract rcu_users out of union"), the 'refcount_t rcu_users' field was extracted out of a union with the 'struct rcu_head rcu' field. This allows us to safely perform a refcount_inc_not_zero() on task->rcu_users when acquiring a reference on a task struct. A prior patch leveraged this by making struct task_struct an RCU-protected object in the verifier, and by bpf_task_acquire() to use the task->rcu_users field for synchronization. Now that we can use RCU to protect tasks, we no longer need bpf_task_kptr_get(), or bpf_task_acquire_not_zero(). bpf_task_kptr_get() is truly completely unnecessary, as we can just use RCU to get the object. bpf_task_acquire_not_zero() is now equivalent to bpf_task_acquire(). In addition to these changes, this patch also updates the associated selftests to no longer use these kfuncs. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230331195733.699708-3-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 69 -------------------- .../testing/selftests/bpf/prog_tests/task_kfunc.c | 2 +- tools/testing/selftests/bpf/progs/rcu_read_lock.c | 9 +-- .../selftests/bpf/progs/task_kfunc_common.h | 1 - .../selftests/bpf/progs/task_kfunc_failure.c | 73 ---------------------- .../selftests/bpf/progs/task_kfunc_success.c | 22 +++---- 6 files changed, 14 insertions(+), 162 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e71a4a54ce99..6be16db9f188 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2019,73 +2019,6 @@ __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) return NULL; } -/** - * bpf_task_acquire_not_zero - Acquire a reference to a rcu task object. A task - * acquired by this kfunc which is not stored in a map as a kptr, must be - * released by calling bpf_task_release(). - * @p: The task on which a reference is being acquired. - */ -__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) -{ - /* For the time being this function returns NULL, as it's not currently - * possible to safely acquire a reference to a task with RCU protection - * using get_task_struct() and put_task_struct(). This is due to the - * slightly odd mechanics of p->rcu_users, and how task RCU protection - * works. - * - * A struct task_struct is refcounted by two different refcount_t - * fields: - * - * 1. p->usage: The "true" refcount field which tracks a task's - * lifetime. The task is freed as soon as this - * refcount drops to 0. - * - * 2. p->rcu_users: An "RCU users" refcount field which is statically - * initialized to 2, and is co-located in a union with - * a struct rcu_head field (p->rcu). p->rcu_users - * essentially encapsulates a single p->usage - * refcount, and when p->rcu_users goes to 0, an RCU - * callback is scheduled on the struct rcu_head which - * decrements the p->usage refcount. - * - * There are two important implications to this task refcounting logic - * described above. The first is that - * refcount_inc_not_zero(&p->rcu_users) cannot be used anywhere, as - * after the refcount goes to 0, the RCU callback being scheduled will - * cause the memory backing the refcount to again be nonzero due to the - * fields sharing a union. The other is that we can't rely on RCU to - * guarantee that a task is valid in a BPF program. This is because a - * task could have already transitioned to being in the TASK_DEAD - * state, had its rcu_users refcount go to 0, and its rcu callback - * invoked in which it drops its single p->usage reference. At this - * point the task will be freed as soon as the last p->usage reference - * goes to 0, without waiting for another RCU gp to elapse. The only - * way that a BPF program can guarantee that a task is valid is in this - * scenario is to hold a p->usage refcount itself. - * - * Until we're able to resolve this issue, either by pulling - * p->rcu_users and p->rcu out of the union, or by getting rid of - * p->usage and just using p->rcu_users for refcounting, we'll just - * return NULL here. - */ - return NULL; -} - -/** - * bpf_task_kptr_get - Acquire a reference on a struct task_struct kptr. A task - * kptr acquired by this kfunc which is not subsequently stored in a map, must - * be released by calling bpf_task_release(). - * @pp: A pointer to a task kptr on which a reference is being acquired. - */ -__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp) -{ - /* We must return NULL here until we have clarity on how to properly - * leverage RCU for ensuring a task's lifetime. See the comment above - * in bpf_task_acquire_not_zero() for more details. - */ - return NULL; -} - /** * bpf_task_release - Release the reference acquired on a task. * @p: The task on which a reference is being released. @@ -2375,8 +2308,6 @@ BTF_ID_FLAGS(func, bpf_list_push_back) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE) BTF_ID_FLAGS(func, bpf_rbtree_add) diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c index 330133ece3f6..740d5f644b40 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c @@ -73,7 +73,7 @@ static const char * const success_tests[] = { "test_task_acquire_release_current", "test_task_acquire_leave_in_map", "test_task_xchg_release", - "test_task_get_release", + "test_task_map_acquire_release", "test_task_current_acquire_release", "test_task_from_pid_arg", "test_task_from_pid_current", diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 6a8c88e58df2..14fb01437fb8 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -23,7 +23,7 @@ struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; void bpf_key_put(struct bpf_key *key) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; -struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; void bpf_task_release(struct task_struct *p) __ksym; SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") @@ -159,13 +159,8 @@ int task_acquire(void *ctx) goto out; /* acquire a reference which can be used outside rcu read lock region */ - gparent = bpf_task_acquire_not_zero(gparent); + gparent = bpf_task_acquire(gparent); if (!gparent) - /* Until we resolve the issues with using task->rcu_users, we - * expect bpf_task_acquire_not_zero() to return a NULL task. - * See the comment at the definition of - * bpf_task_acquire_not_zero() for more details. - */ goto out; (void)bpf_task_storage_get(&map_a, gparent, 0, 0); diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h index bf0d1da9aff8..41f2d44f49cb 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h @@ -21,7 +21,6 @@ struct hash_map { } __tasks_kfunc_map SEC(".maps"); struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; -struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym; void bpf_task_release(struct task_struct *p) __ksym; struct task_struct *bpf_task_from_pid(s32 pid) __ksym; void bpf_rcu_read_lock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c index 63aef547da87..dcdea3127086 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c @@ -128,59 +128,6 @@ int BPF_PROG(task_kfunc_acquire_unreleased, struct task_struct *task, u64 clone_ return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_non_kptr_param, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr; - - /* Cannot use bpf_task_kptr_get() on a non-kptr, even on a valid task. */ - kptr = bpf_task_kptr_get(&task); - if (!kptr) - return 0; - - bpf_task_release(kptr); - - return 0; -} - -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_non_kptr_acquired, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr, *acquired; - - acquired = bpf_task_acquire(task); - if (!acquired) - return 0; - - /* Cannot use bpf_task_kptr_get() on a non-kptr, even if it was acquired. */ - kptr = bpf_task_kptr_get(&acquired); - bpf_task_release(acquired); - if (!kptr) - return 0; - - bpf_task_release(kptr); - - return 0; -} - -SEC("tp_btf/task_newtask") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(task_kfunc_get_null, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr; - - /* Cannot use bpf_task_kptr_get() on a NULL pointer. */ - kptr = bpf_task_kptr_get(NULL); - if (!kptr) - return 0; - - bpf_task_release(kptr); - - return 0; -} - SEC("tp_btf/task_newtask") __failure __msg("Unreleased reference") int BPF_PROG(task_kfunc_xchg_unreleased, struct task_struct *task, u64 clone_flags) @@ -214,26 +161,6 @@ int BPF_PROG(task_kfunc_acquire_release_no_null_check, struct task_struct *task, return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("Unreleased reference") -int BPF_PROG(task_kfunc_get_unreleased, struct task_struct *task, u64 clone_flags) -{ - struct task_struct *kptr; - struct __tasks_kfunc_map_value *v; - - v = insert_lookup_task(task); - if (!v) - return 0; - - kptr = bpf_task_kptr_get(&v->task); - if (!kptr) - return 0; - - /* Kptr acquired above is never released. */ - - return 0; -} - SEC("tp_btf/task_newtask") __failure __msg("Possibly NULL pointer passed to trusted arg0") int BPF_PROG(task_kfunc_release_untrusted, struct task_struct *task, u64 clone_flags) diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_success.c b/tools/testing/selftests/bpf/progs/task_kfunc_success.c index a75304a5e860..b09371bba204 100644 --- a/tools/testing/selftests/bpf/progs/task_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/task_kfunc_success.c @@ -122,7 +122,7 @@ int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags) } SEC("tp_btf/task_newtask") -int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags) +int BPF_PROG(test_task_map_acquire_release, struct task_struct *task, u64 clone_flags) { struct task_struct *kptr; struct __tasks_kfunc_map_value *v; @@ -143,18 +143,18 @@ int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags) return 0; } - kptr = bpf_task_kptr_get(&v->task); - if (kptr) { - /* Until we resolve the issues with using task->rcu_users, we - * expect bpf_task_kptr_get() to return a NULL task. See the - * comment at the definition of bpf_task_acquire_not_zero() for - * more details. - */ - bpf_task_release(kptr); + bpf_rcu_read_lock(); + kptr = v->task; + if (!kptr) { err = 3; - return 0; + } else { + kptr = bpf_task_acquire(kptr); + if (!kptr) + err = 4; + else + bpf_task_release(kptr); } - + bpf_rcu_read_unlock(); return 0; } -- cgit v1.2.3-70-g09d2 From b5d54eb5899a7c2d478909041446499f379c716c Mon Sep 17 00:00:00 2001 From: Arseniy Krasnov Date: Mon, 3 Apr 2023 14:26:18 +0300 Subject: vsock/test: update expected return values This updates expected return values for invalid buffer test. Now such values are returned from transport, not from af_vsock.c. Signed-off-by: Arseniy Krasnov Reviewed-by: Stefano Garzarella Signed-off-by: Paolo Abeni --- tools/testing/vsock/vsock_test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 12b97c92fbb2..ac1bd3ac1533 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -723,7 +723,7 @@ static void test_seqpacket_invalid_rec_buffer_server(const struct test_opts *opt exit(EXIT_FAILURE); } - if (errno != ENOMEM) { + if (errno != EFAULT) { perror("unexpected errno of 'broken_buf'"); exit(EXIT_FAILURE); } @@ -887,7 +887,7 @@ static void test_inv_buf_client(const struct test_opts *opts, bool stream) exit(EXIT_FAILURE); } - if (errno != ENOMEM) { + if (errno != EFAULT) { fprintf(stderr, "unexpected recv(2) errno %d\n", errno); exit(EXIT_FAILURE); } -- cgit v1.2.3-70-g09d2 From 8fc59c26d212c23d6fd5ad47a10651cf72d83b4a Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 3 Apr 2023 19:29:35 +0200 Subject: selftests/bpf: Add RESOLVE_BTFIDS dependency to bpf_testmod.ko bpf_testmod.ko sometimes fails to build from a clean checkout: BTF [M] linux/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.ko /bin/sh: 1: linux-build//tools/build/resolve_btfids/resolve_btfids: not found The reason is that RESOLVE_BTFIDS may not yet be built. Fix by adding a dependency. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20230403172935.1553022-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 4a8ef118fd9d..febd1dae6c88 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -201,7 +201,7 @@ $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c $< -o $@ \ $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) -$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) +$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch]) $(call msg,MOD,,$@) $(Q)$(RM) bpf_testmod/bpf_testmod.ko # force re-compilation $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod -- cgit v1.2.3-70-g09d2 From 69f41a787761633b752d71166786eb642bad4913 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 3 Apr 2023 21:50:29 -0700 Subject: selftests/bpf: Add tracing tests for walking skb and req. Add tracing tests for walking skb->sk and req->sk. Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Acked-by: David Vernet Link: https://lore.kernel.org/bpf/20230404045029.82870-9-alexei.starovoitov@gmail.com --- .../selftests/bpf/progs/test_sk_storage_tracing.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c index 6dc1f28fc4b6..02e718f06e0f 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c +++ b/tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c @@ -92,4 +92,20 @@ int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern, return 0; } +SEC("tp_btf/tcp_retransmit_synack") +int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req) +{ + /* load only test */ + bpf_sk_storage_get(&sk_stg_map, sk, 0, 0); + bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0); + return 0; +} + +SEC("tp_btf/tcp_bad_csum") +int BPF_PROG(tcp_bad_csum, struct sk_buff* skb) +{ + bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0); + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 9af87166944b3ff33d1399f7a1924ef0175e96b2 Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Mon, 3 Apr 2023 15:01:51 +0200 Subject: selftests: xsk: Add xskxceiver.h dependency to Makefile xskxceiver depends on xskxceiver.h so tell make about it. Signed-off-by: Kal Conley Link: https://lore.kernel.org/r/20230403130151.31195-1-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index febd1dae6c88..b5ffdd89b86f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -612,7 +612,7 @@ $(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT) $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ -$(OUTPUT)/xskxceiver: xskxceiver.c $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT) +$(OUTPUT)/xskxceiver: xskxceiver.c xskxceiver.h $(OUTPUT)/xsk.o $(OUTPUT)/xsk_xdp_progs.skel.h $(BPFOBJ) | $(OUTPUT) $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@ -- cgit v1.2.3-70-g09d2 From 7a2050df244e2c9a4e90882052b7907450ad10ed Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Mon, 3 Apr 2023 16:50:46 +0200 Subject: selftests: xsk: Use correct UMEM size in testapp_invalid_desc Avoid UMEM_SIZE macro in testapp_invalid_desc which is incorrect when the frame size is not XSK_UMEM__DEFAULT_FRAME_SIZE. Also remove the macro since it's no longer being used. Fixes: 909f0e28207c ("selftests: xsk: Add tests for 2K frame size") Signed-off-by: Kal Conley Acked-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230403145047.33065-2-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/xskxceiver.c | 9 +++++---- tools/testing/selftests/bpf/xskxceiver.h | 1 - 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index b65e0645b0cd..3956f5db84f3 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1652,6 +1652,7 @@ static void testapp_single_pkt(struct test_spec *test) static void testapp_invalid_desc(struct test_spec *test) { + u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; struct pkt pkts[] = { /* Zero packet address allowed */ {0, PKT_SIZE, 0, true}, @@ -1662,9 +1663,9 @@ static void testapp_invalid_desc(struct test_spec *test) /* Packet too large */ {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, /* After umem ends */ - {UMEM_SIZE, PKT_SIZE, 0, false}, + {umem_size, PKT_SIZE, 0, false}, /* Straddle the end of umem */ - {UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false}, + {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false}, /* Straddle a page boundrary */ {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, /* Straddle a 2K boundrary */ @@ -1682,8 +1683,8 @@ static void testapp_invalid_desc(struct test_spec *test) } if (test->ifobj_tx->shared_umem) { - pkts[4].addr += UMEM_SIZE; - pkts[5].addr += UMEM_SIZE; + pkts[4].addr += umem_size; + pkts[5].addr += umem_size; } pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index bdb4efedf3a9..cc24ab72f3ff 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -53,7 +53,6 @@ #define THREAD_TMOUT 3 #define DEFAULT_PKT_CNT (4 * 1024) #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4) -#define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE) #define RX_FULL_RXQSIZE 32 #define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) -- cgit v1.2.3-70-g09d2 From ccd1b2933f8cbc09a8667992425996f19bf62c15 Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Mon, 3 Apr 2023 16:50:47 +0200 Subject: selftests: xsk: Add test case for packets at end of UMEM Add test case to testapp_invalid_desc for valid packets at the end of the UMEM. Signed-off-by: Kal Conley Acked-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230403145047.33065-3-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/xskxceiver.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 3956f5db84f3..34a1f32fe752 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1662,6 +1662,8 @@ static void testapp_invalid_desc(struct test_spec *test) {-2, PKT_SIZE, 0, false}, /* Packet too large */ {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, + /* Up to end of umem allowed */ + {umem_size - PKT_SIZE, PKT_SIZE, 0, true}, /* After umem ends */ {umem_size, PKT_SIZE, 0, false}, /* Straddle the end of umem */ @@ -1675,16 +1677,17 @@ static void testapp_invalid_desc(struct test_spec *test) if (test->ifobj_tx->umem->unaligned_mode) { /* Crossing a page boundrary allowed */ - pkts[6].valid = true; + pkts[7].valid = true; } if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { /* Crossing a 2K frame size boundrary not allowed */ - pkts[7].valid = false; + pkts[8].valid = false; } if (test->ifobj_tx->shared_umem) { pkts[4].addr += umem_size; pkts[5].addr += umem_size; + pkts[6].addr += umem_size; } pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); -- cgit v1.2.3-70-g09d2 From f2b50f17268390567bc0e95642170d88f336c8f4 Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Wed, 5 Apr 2023 10:29:04 +0200 Subject: selftests: xsk: Disable IPv6 on VETH1 This change fixes flakiness in the BIDIRECTIONAL test: # [is_pkt_valid] expected length [60], got length [90] not ok 1 FAIL: SKB BUSY-POLL BIDIRECTIONAL When IPv6 is enabled, the interface will periodically send MLDv1 and MLDv2 packets. These packets can cause the BIDIRECTIONAL test to fail since it uses VETH0 for RX. For other tests, this was not a problem since they only receive on VETH1 and IPv6 was already disabled on VETH0. Fixes: a89052572ebb ("selftests/bpf: Xsk selftests framework") Signed-off-by: Kal Conley Link: https://lore.kernel.org/r/20230405082905.6303-1-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/test_xsk.sh | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh index b077cf58f825..377fb157a57c 100755 --- a/tools/testing/selftests/bpf/test_xsk.sh +++ b/tools/testing/selftests/bpf/test_xsk.sh @@ -116,6 +116,7 @@ setup_vethPairs() { ip link add ${VETH0} numtxqueues 4 numrxqueues 4 type veth peer name ${VETH1} numtxqueues 4 numrxqueues 4 if [ -f /proc/net/if_inet6 ]; then echo 1 > /proc/sys/net/ipv6/conf/${VETH0}/disable_ipv6 + echo 1 > /proc/sys/net/ipv6/conf/${VETH1}/disable_ipv6 fi if [[ $verbose -eq 1 ]]; then echo "setting up ${VETH1}" -- cgit v1.2.3-70-g09d2 From 68e7322142f5e731af222892d384d311835db0f1 Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Mon, 3 Apr 2023 14:03:59 +0200 Subject: selftests: xsk: Deflakify STATS_RX_DROPPED test Fix flaky STATS_RX_DROPPED test. The receiver calls getsockopt after receiving the last (valid) packet which is not the final packet sent in the test (valid and invalid packets are sent in alternating fashion with the final packet being invalid). Since the last packet may or may not have been dropped already, both outcomes must be allowed. This issue could also be fixed by making sure the last packet sent is valid. This alternative is left as an exercise to the reader (or the benevolent maintainers of this file). This problem was quite visible on certain setups. On one machine this failure was observed 50% of the time. Also, remove a redundant assignment of pkt_stream->nb_pkts. This field is already initialized by __pkt_stream_alloc. Fixes: 27e934bec35b ("selftests: xsk: make stat tests not spin on getsockopt") Signed-off-by: Kal Conley Acked-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230403120400.31018-1-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/xskxceiver.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 34a1f32fe752..1a4bdd5aa78c 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -633,7 +633,6 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb if (!pkt_stream) exit_with_error(ENOMEM); - pkt_stream->nb_pkts = nb_pkts; for (i = 0; i < nb_pkts; i++) { pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, pkt_len); @@ -1141,7 +1140,14 @@ static int validate_rx_dropped(struct ifobject *ifobject) if (err) return TEST_FAILURE; - if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2) + /* The receiver calls getsockopt after receiving the last (valid) + * packet which is not the final packet sent in this test (valid and + * invalid packets are sent in alternating fashion with the final + * packet being invalid). Since the last packet may or may not have + * been dropped already, both outcomes must be allowed. + */ + if (stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 || + stats.rx_dropped == ifobject->pkt_stream->nb_pkts / 2 - 1) return TEST_PASS; return TEST_FAILURE; -- cgit v1.2.3-70-g09d2 From 5af607a861d43ffff830fc1890033e579ec44799 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Wed, 5 Apr 2023 19:33:54 +0000 Subject: selftests/bpf: Wait for receive in cg_storage_multi test In some cases the loopback latency might be large enough, causing the assertion on invocations to be run before ingress prog getting executed. The assertion would fail and the test would flake. This can be reliably reproduced by arbitrarily increasing the loopback latency (thanks to [1]): tc qdisc add dev lo root handle 1: htb default 12 tc class add dev lo parent 1:1 classid 1:12 htb rate 20kbps ceil 20kbps tc qdisc add dev lo parent 1:12 netem delay 100ms Fix this by waiting on the receive end, instead of instantly returning to the assert. The call to read() will wait for the default SO_RCVTIMEO timeout of 3 seconds provided by start_server(). [1] https://gist.github.com/kstevens715/4598301 Reported-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/9c5c8b7e-1d89-a3af-5400-14fde81f4429@linux.dev/ Fixes: 3573f384014f ("selftests/bpf: Test CGROUP_STORAGE behavior on shared egress + ingress") Acked-by: Stanislav Fomichev Signed-off-by: YiFei Zhu Link: https://lore.kernel.org/r/20230405193354.1956209-1-zhuyifei@google.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index 621c57222191..63ee892bc757 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -56,8 +56,9 @@ static bool assert_storage_noexist(struct bpf_map *map, const void *key) static bool connect_send(const char *cgroup_path) { - bool res = true; int server_fd = -1, client_fd = -1; + char message[] = "message"; + bool res = true; if (join_cgroup(cgroup_path)) goto out_clean; @@ -70,7 +71,10 @@ static bool connect_send(const char *cgroup_path) if (client_fd < 0) goto out_clean; - if (send(client_fd, "message", strlen("message"), 0) < 0) + if (send(client_fd, &message, sizeof(message), 0) < 0) + goto out_clean; + + if (read(server_fd, &message, sizeof(message)) < 0) goto out_clean; res = false; -- cgit v1.2.3-70-g09d2 From 905a9eb5f636f3312964b162362e4d6ca4e37378 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 5 Apr 2023 07:15:56 +0000 Subject: selftests/net: fix typo in tcp_mmap kernel test robot reported the following warning: All warnings (new ones prefixed by >>): tcp_mmap.c: In function 'child_thread': >> tcp_mmap.c:211:61: warning: 'lu' may be used uninitialized in this function [-Wmaybe-uninitialized] 211 | zc.length = min(chunk_size, FILE_SZ - lu); We want to read FILE_SZ bytes, so the correct expression should be (FILE_SZ - total) Fixes: 5c5945dc695c ("selftests/net: Add SHA256 computation over data sent in tcp_mmap") Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304042104.UFIuevBp-lkp@intel.com/ Signed-off-by: Eric Dumazet Cc: Xiaoyan Li Cc: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230405071556.1019623-1-edumazet@google.com Signed-off-by: Paolo Abeni --- tools/testing/selftests/net/tcp_mmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c index 607cc9ad8d1b..6e59b1461dcc 100644 --- a/tools/testing/selftests/net/tcp_mmap.c +++ b/tools/testing/selftests/net/tcp_mmap.c @@ -208,7 +208,7 @@ void *child_thread(void *arg) memset(&zc, 0, sizeof(zc)); zc.address = (__u64)((unsigned long)addr); - zc.length = min(chunk_size, FILE_SZ - lu); + zc.length = min(chunk_size, FILE_SZ - total); res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, &zc, &zc_len); -- cgit v1.2.3-70-g09d2 From c0801598e5430d9da2d406ed32fcedbef23977fc Mon Sep 17 00:00:00 2001 From: Kal Conley Date: Thu, 6 Apr 2023 01:59:19 +0200 Subject: selftests: xsk: Add test UNALIGNED_INV_DESC_4K1_FRAME_SIZE Add unaligned descriptor test for frame size of 4001. Using an odd frame size ensures that the end of the UMEM is not near a page boundary. This allows testing descriptors that staddle the end of the UMEM but not a page. This test used to fail without the previous commit ("xsk: Fix unaligned descriptor validation"). Signed-off-by: Kal Conley Link: https://lore.kernel.org/r/20230405235920.7305-3-kal.conley@dectris.com Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/xskxceiver.c | 24 ++++++++++++++++++++++++ tools/testing/selftests/bpf/xskxceiver.h | 1 + 2 files changed, 25 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 1a4bdd5aa78c..5a9691e942de 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -69,6 +69,7 @@ */ #define _GNU_SOURCE +#include #include #include #include @@ -1876,6 +1877,29 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ test->ifobj_rx->umem->unaligned_mode = true; testapp_invalid_desc(test); break; + case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { + u64 page_size, umem_size; + + if (!hugepages_present(test->ifobj_tx)) { + ksft_test_result_skip("No 2M huge pages present.\n"); + return; + } + test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); + /* Odd frame size so the UMEM doesn't end near a page boundary. */ + test->ifobj_tx->umem->frame_size = 4001; + test->ifobj_rx->umem->frame_size = 4001; + test->ifobj_tx->umem->unaligned_mode = true; + test->ifobj_rx->umem->unaligned_mode = true; + /* This test exists to test descriptors that staddle the end of + * the UMEM but not a page. + */ + page_size = sysconf(_SC_PAGESIZE); + umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; + assert(umem_size % page_size > PKT_SIZE); + assert(umem_size % page_size < page_size - PKT_SIZE); + testapp_invalid_desc(test); + break; + } case TEST_TYPE_UNALIGNED: if (!testapp_unaligned(test)) return; diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index cc24ab72f3ff..919327807a4e 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -78,6 +78,7 @@ enum test_type { TEST_TYPE_ALIGNED_INV_DESC, TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME, TEST_TYPE_UNALIGNED_INV_DESC, + TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME, TEST_TYPE_HEADROOM, TEST_TYPE_TEARDOWN, TEST_TYPE_BIDI, -- cgit v1.2.3-70-g09d2 From aec08d677b4d0adeb7412fa98547cf07bfce6fea Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 6 Apr 2023 09:45:00 -0700 Subject: selftests/bpf: Add tests for non-constant cond_op NE/EQ bound deduction Add various tests for code pattern ' NE/EQ ' implemented in the previous verifier patch. Without the verifier patch, these new tests will fail. Signed-off-by: Yonghong Song Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230406164500.1045715-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../progs/verifier_bounds_deduction_non_const.c | 179 +++++++++++++++++++++ 2 files changed, 181 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index efc8cf2e18d0..73dff693d411 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -7,6 +7,7 @@ #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" #include "verifier_bounds_deduction.skel.h" +#include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" @@ -70,6 +71,7 @@ void test_verifier_and(void) { RUN(verifier_and); } void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } +void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c new file mode 100644 index 000000000000..fe570d866139 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, == , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r0 == r2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, == , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 3 goto l0_%=; \ + r2 = 4; \ + if r0 == r2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, != , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_3(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r0 != r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, != , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_4(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 3 goto l0_%=; \ + r2 = 4; \ + if r0 != r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, == , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_5(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w0 == w2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, == , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_6(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 4 goto l0_%=; \ + w2 = 5; \ + if w0 == w2 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, != , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_7(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 3 goto l0_%=; \ + w2 = 2; \ + if w0 != w2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, != , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_8(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 3 goto l0_%=; \ + w2 = 4; \ + if w0 != w2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 953d9f5beaf75e88c69a13d70ce424cd606a29f5 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 6 Apr 2023 09:45:05 -0700 Subject: bpf: Improve handling of pattern ' ' in verifier Currently, the verifier does not handle ' ' well. For example, ... 10: (79) r1 = *(u64 *)(r10 -16) ; R1_w=scalar() R10=fp0 11: (b7) r2 = 0 ; R2_w=0 12: (2d) if r2 > r1 goto pc+2 13: (b7) r0 = 0 14: (95) exit 15: (65) if r1 s> 0x1 goto pc+3 16: (0f) r0 += r1 ... At insn 12, verifier decides both true and false branch are possible, but actually only false branch is possible. Currently, the verifier already supports patterns ' . Add support for patterns ' ' in a similar way. Also fix selftest 'verifier_bounds_mix_sign_unsign/bounds checks mixing signed and unsigned, variant 10' due to this change. Signed-off-by: Yonghong Song Acked-by: Dave Marchevsky Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230406164505.1046801-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 12 ++++++++++++ .../selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5c6b90e384a5..3660b573048a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -13356,6 +13356,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, src_reg->var_off.value, opcode, is_jmp32); + } else if (dst_reg->type == SCALAR_VALUE && + is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) { + pred = is_branch_taken(src_reg, + tnum_subreg(dst_reg->var_off).value, + flip_opcode(opcode), + is_jmp32); + } else if (dst_reg->type == SCALAR_VALUE && + !is_jmp32 && tnum_is_const(dst_reg->var_off)) { + pred = is_branch_taken(src_reg, + dst_reg->var_off.value, + flip_opcode(opcode), + is_jmp32); } else if (reg_is_pkt_pointer_any(dst_reg) && reg_is_pkt_pointer_any(src_reg) && !is_jmp32) { diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c index 91a66357896a..4f40144748a5 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c @@ -354,7 +354,7 @@ __naked void signed_and_unsigned_variant_10(void) call %[bpf_map_lookup_elem]; \ if r0 == 0 goto l0_%=; \ r1 = *(u64*)(r10 - 16); \ - r2 = 0; \ + r2 = -1; \ if r2 > r1 goto l1_%=; \ r0 = 0; \ exit; \ -- cgit v1.2.3-70-g09d2 From 23a88fae9f20d47bb3aed99b1e08d0d6cf65cf0c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 6 Apr 2023 09:45:10 -0700 Subject: selftests/bpf: Add verifier tests for code pattern ' ' Add various tests for code pattern ' ' to exercise the previous verifier patch. The following are veristat changed number of processed insns stat comparing the previous patch vs. this patch: File Program Insns (A) Insns (B) Insns (DIFF) ----------------------------------------------------- ---------------------------------------------------- --------- --------- ------------- test_seg6_loop.bpf.linked3.o __add_egr_x 12423 12314 -109 (-0.88%) Only one program is affected with minor change. Signed-off-by: Yonghong Song Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230406164510.1047757-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- .../progs/verifier_bounds_deduction_non_const.c | 460 +++++++++++++++++++++ 1 file changed, 460 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c index fe570d866139..823f727cf210 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c @@ -176,4 +176,464 @@ l1_%=: \ : __clobber_all); } +SEC("socket") +__description("check deducing bounds from non-const, jmp64, > , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_9(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + r2 = 0; \ + if r2 > r0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, > , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_10(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 4 goto l0_%=; \ + r2 = 4; \ + if r2 > r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, >= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_11(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 4 goto l0_%=; \ + r2 = 3; \ + if r2 >= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, < ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_12(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 4 goto l0_%=; \ + r2 = 4; \ + if r2 < r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, <= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_13(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 >= 4 goto l0_%=; \ + r2 = 4; \ + if r2 <= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, == ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_14(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r2 == r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, s> ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_15(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s< 4 goto l0_%=; \ + r2 = 4; \ + if r2 s> r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, s>= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_16(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s< 4 goto l0_%=; \ + r2 = 3; \ + if r2 s>= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, s< ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_17(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s> 4 goto l0_%=; \ + r2 = 4; \ + if r2 s< r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, s<= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_18(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 s> 4 goto l0_%=; \ + r2 = 5; \ + if r2 s<= r0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp64, != ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_19(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 < 3 goto l0_%=; \ + r2 = 2; \ + if r2 != r0 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, > , 1") +__success __retval(0) +__naked void deducing_bounds_from_non_const_20(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + w2 = 0; \ + if w2 > w0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, > , 2") +__success __retval(0) +__naked void deducing_bounds_from_non_const_21(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 4; \ + if w2 > w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, >= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_22(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w2 >= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, < ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_23(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 > 4 goto l0_%=; \ + w2 = 4; \ + if w2 < w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, <= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_24(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 >= 4 goto l0_%=; \ + w2 = 4; \ + if w2 <= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, == ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_25(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 4 goto l0_%=; \ + w2 = 3; \ + if w2 == w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, s> ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_26(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s< 4 goto l0_%=; \ + w2 = 4; \ + if w2 s> w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, s>= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_27(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s< 4 goto l0_%=; \ + w2 = 3; \ + if w2 s>= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, s< ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_28(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s> 4 goto l0_%=; \ + w2 = 5; \ + if w2 s< w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, s<= ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_29(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 s>= 4 goto l0_%=; \ + w2 = 4; \ + if w2 s<= w0 goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check deducing bounds from non-const, jmp32, != ") +__success __retval(0) +__naked void deducing_bounds_from_non_const_30(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if w0 < 3 goto l0_%=; \ + w2 = 2; \ + if w2 != w0 goto l0_%=; \ + goto l1_%=; \ +l0_%=: \ + r0 = 0; \ + exit; \ +l1_%=: \ + r0 -= r1; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From a9fda7a0b0331250c4af006f1862752dbefcab9c Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Wed, 5 Apr 2023 16:25:12 +0200 Subject: selftests: forwarding: hw_stats_l3: Detect failure to install counters Running this test makes little sense if the enabled l3_stats are not actually reported as "used". This can signify a failure of a driver to install the necessary counters, or simply lack of support for enabling in-HW counters on a given netdevice. It is generally impossible to tell from the outside which it is. But more likely than not, if somebody is running this on veth pairs, they do not intend to actually test that a certain piece of HW can install in-HW counters for the veth. It is more likely they are e.g. running the test by mistake. Therefore detect that the counter has not been actually installed. In that case, if the netdevice is one end of a veth pair, SKIP. Otherwise FAIL. Suggested-by: Hangbin Liu Signed-off-by: Petr Machata Reviewed-by: Danielle Ratson Tested-by: Hangbin Liu Link: https://lore.kernel.org/r/a86817961903cca5cb0aebf2b2a06294b8aa7dea.1680704172.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/forwarding/hw_stats_l3.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh index 9c1f76e108af..432fe8469851 100755 --- a/tools/testing/selftests/net/forwarding/hw_stats_l3.sh +++ b/tools/testing/selftests/net/forwarding/hw_stats_l3.sh @@ -319,6 +319,19 @@ trap cleanup EXIT setup_prepare setup_wait -tests_run +used=$(ip -j stats show dev $rp1.200 group offload subgroup hw_stats_info | + jq '.[].info.l3_stats.used') +kind=$(ip -j -d link show dev $rp1 | + jq -r '.[].linkinfo.info_kind') +if [[ $used != true ]]; then + if [[ $kind == veth ]]; then + log_test_skip "l3_stats not offloaded on veth interface" + EXIT_STATUS=$ksft_skip + else + RET=1 log_test "l3_stats not offloaded" + fi +else + tests_run +fi exit $EXIT_STATUS -- cgit v1.2.3-70-g09d2 From 3ebf5212bf042954666b19fe4ff5a98911b08128 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 7 Apr 2023 12:01:30 -0700 Subject: selftests/bpf: Use PERF_COUNT_HW_CPU_CYCLES event for get_branch_snapshot perf_event with type=PERF_TYPE_RAW and config=0x1b00 turned out to be not reliable in ensuring LBR is active. Thus, test_progs:get_branch_snapshot is not reliable in some systems. Replace it with PERF_COUNT_HW_CPU_CYCLES event, which gives more consistent results. Signed-off-by: Song Liu Signed-off-by: Andrii Nakryiko Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20230407190130.2093736-1-song@kernel.org --- tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c index 3948da12a528..0394a1156d99 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -37,8 +37,8 @@ static int create_perf_events(void) /* create perf event */ attr.size = sizeof(attr); - attr.type = PERF_TYPE_RAW; - attr.config = 0x1b00; + attr.type = PERF_TYPE_HARDWARE; + attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.sample_type = PERF_SAMPLE_BRANCH_STACK; attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; -- cgit v1.2.3-70-g09d2 From 5855b0999de4213bf51d856a345c4b53f2304e33 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 7 Apr 2023 18:41:25 +0300 Subject: selftests/bpf: Prevent infinite loop in veristat when base file is too short The following example forces veristat to loop indefinitely: $ cat two-ok file_name,prog_name,verdict,total_states file-a,a,success,12 file-b,b,success,67 $ cat add-failure file_name,prog_name,verdict,total_states file-a,a,success,12 file-b,b,success,67 file-b,c,failure,32 $ veristat -C two-ok add-failure The loop is caused by handle_comparison_mode() not checking if `base` variable points to `fallback_stats` prior advancing joined results using `base`. Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230407154125.896927-1-eddyz87@gmail.com --- tools/testing/selftests/bpf/veristat.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 53d7ec168268..e05954e20bba 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -1824,18 +1824,22 @@ static int handle_comparison_mode(void) join->stats_b = comp; i++; j++; - } else if (comp == &fallback_stats || r < 0) { + } else if (base != &fallback_stats && (comp == &fallback_stats || r < 0)) { join->file_name = base->file_name; join->prog_name = base->prog_name; join->stats_a = base; join->stats_b = NULL; i++; - } else { + } else if (comp != &fallback_stats && (base == &fallback_stats || r > 0)) { join->file_name = comp->file_name; join->prog_name = comp->prog_name; join->stats_a = NULL; join->stats_b = comp; j++; + } else { + fprintf(stderr, "%s:%d: should never reach here i=%i, j=%i", + __FILE__, __LINE__, i, j); + return -EINVAL; } env.join_stat_cnt += 1; } -- cgit v1.2.3-70-g09d2 From c4d3b488a90be95f4f9413dc7eae5fc113d15fe9 Mon Sep 17 00:00:00 2001 From: Manu Bretelle Date: Fri, 7 Apr 2023 19:29:19 -0700 Subject: selftests/bpf: Reset err when symbol name already exist in kprobe_multi_test When trying to add a name to the hashmap, an error code of EEXIST is returned and we continue as names are possibly duplicated in the sys file. If the last name in the file is a duplicate, we will continue to the next iteration of the while loop, and exit the loop with a value of err set to EEXIST and enter the error label with err set, which causes the test to fail when it should not. This change reset err to 0 before continue-ing into the next iteration, this way, if there is no more data to read from the file we iterate through, err will be set to 0. Behaviour prior to this change: ``` test_kprobe_multi_bench_attach:FAIL:get_syms unexpected error: -17 (errno 2) All error logs: test_kprobe_multi_bench_attach:FAIL:get_syms unexpected error: -17 (errno 2) Summary: 0/1 PASSED, 0 SKIPPED, 1 FAILED ``` After this change: ``` Summary: 1/2 PASSED, 0 SKIPPED, 0 FAILED ``` Signed-off-by: Manu Bretelle Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230408022919.54601-1-chantr4@gmail.com --- tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c index 22be0a9a5a0a..2173c4bb555e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -381,8 +381,10 @@ static int get_syms(char ***symsp, size_t *cntp, bool kernel) continue; err = hashmap__add(map, name, 0); - if (err == -EEXIST) + if (err == -EEXIST) { + err = 0; continue; + } if (err) goto error; -- cgit v1.2.3-70-g09d2 From 1216640938035e63bdbd32438e91c9bcc1fd8ee1 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:41:49 -0700 Subject: bpf: Switch BPF verifier log to be a rotating log by default Currently, if user-supplied log buffer to collect BPF verifier log turns out to be too small to contain full log, bpf() syscall returns -ENOSPC, fails BPF program verification/load, and preserves first N-1 bytes of the verifier log (where N is the size of user-supplied buffer). This is problematic in a bunch of common scenarios, especially when working with real-world BPF programs that tend to be pretty complex as far as verification goes and require big log buffers. Typically, it's when debugging tricky cases at log level 2 (verbose). Also, when BPF program is successfully validated, log level 2 is the only way to actually see verifier state progression and all the important details. Even with log level 1, it's possible to get -ENOSPC even if the final verifier log fits in log buffer, if there is a code path that's deep enough to fill up entire log, even if normally it would be reset later on (there is a logic to chop off successfully validated portions of BPF verifier log). In short, it's not always possible to pre-size log buffer. Also, what's worse, in practice, the end of the log most often is way more important than the beginning, but verifier stops emitting log as soon as initial log buffer is filled up. This patch switches BPF verifier log behavior to effectively behave as rotating log. That is, if user-supplied log buffer turns out to be too short, verifier will keep overwriting previously written log, effectively treating user's log buffer as a ring buffer. -ENOSPC is still going to be returned at the end, to notify user that log contents was truncated, but the important last N bytes of the log would be returned, which might be all that user really needs. This consistent -ENOSPC behavior, regardless of rotating or fixed log behavior, allows to prevent backwards compatibility breakage. The only user-visible change is which portion of verifier log user ends up seeing *if buffer is too small*. Given contents of verifier log itself is not an ABI, there is no breakage due to this behavior change. Specialized tools that rely on specific contents of verifier log in -ENOSPC scenario are expected to be easily adapted to accommodate old and new behaviors. Importantly, though, to preserve good user experience and not require every user-space application to adopt to this new behavior, before exiting to user-space verifier will rotate log (in place) to make it start at the very beginning of user buffer as a continuous zero-terminated string. The contents will be a chopped off N-1 last bytes of full verifier log, of course. Given beginning of log is sometimes important as well, we add BPF_LOG_FIXED (which equals 8) flag to force old behavior, which allows tools like veristat to request first part of verifier log, if necessary. BPF_LOG_FIXED flag is also a simple and straightforward way to check if BPF verifier supports rotating behavior. On the implementation side, conceptually, it's all simple. We maintain 64-bit logical start and end positions. If we need to truncate the log, start position will be adjusted accordingly to lag end position by N bytes. We then use those logical positions to calculate their matching actual positions in user buffer and handle wrap around the end of the buffer properly. Finally, right before returning from bpf_check(), we rotate user log buffer contents in-place as necessary, to make log contents contiguous. See comments in relevant functions for details. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Reviewed-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-4-andrii@kernel.org --- include/linux/bpf_verifier.h | 33 +++- kernel/bpf/btf.c | 3 +- kernel/bpf/log.c | 198 ++++++++++++++++++++- kernel/bpf/verifier.c | 19 +- tools/testing/selftests/bpf/prog_tests/log_fixup.c | 1 + 5 files changed, 228 insertions(+), 26 deletions(-) (limited to 'tools/testing') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 83dff25545ee..4c926227f612 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -491,25 +491,42 @@ struct bpf_insn_aux_data { #define BPF_VERIFIER_TMP_LOG_SIZE 1024 struct bpf_verifier_log { - u32 level; - char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; + /* Logical start and end positions of a "log window" of the verifier log. + * start_pos == 0 means we haven't truncated anything. + * Once truncation starts to happen, start_pos + len_total == end_pos, + * except during log reset situations, in which (end_pos - start_pos) + * might get smaller than len_total (see bpf_vlog_reset()). + * Generally, (end_pos - start_pos) gives number of useful data in + * user log buffer. + */ + u64 start_pos; + u64 end_pos; char __user *ubuf; - u32 len_used; + u32 level; u32 len_total; + char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; }; #define BPF_LOG_LEVEL1 1 #define BPF_LOG_LEVEL2 2 #define BPF_LOG_STATS 4 +#define BPF_LOG_FIXED 8 #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) -#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) +#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED) #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ #define BPF_LOG_MIN_ALIGNMENT 8U #define BPF_LOG_ALIGNMENT 40U +static inline u32 bpf_log_used(const struct bpf_verifier_log *log) +{ + return log->end_pos - log->start_pos; +} + static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) { - return log->len_used >= log->len_total - 1; + if (log->level & BPF_LOG_FIXED) + return bpf_log_used(log) >= log->len_total - 1; + return false; } static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) @@ -596,7 +613,7 @@ struct bpf_verifier_env { u32 scratched_regs; /* Same as scratched_regs but for stack slots */ u64 scratched_stack_slots; - u32 prev_log_len, prev_insn_print_len; + u64 prev_log_pos, prev_insn_print_pos; /* buffer used in reg_type_str() to generate reg_type string */ char type_str_buf[TYPE_STR_BUF_LEN]; }; @@ -608,7 +625,9 @@ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...); __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, const char *fmt, ...); -void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos); +void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos); +void bpf_vlog_finalize(struct bpf_verifier_log *log); +bool bpf_vlog_truncated(const struct bpf_verifier_log *log); static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 593c45a294d0..20a05b8932db 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5593,7 +5593,8 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size, } } - if (log->level && bpf_verifier_log_full(log)) { + bpf_vlog_finalize(log); + if (log->level && bpf_vlog_truncated(log)) { err = -ENOSPC; goto errout_meta; } diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c index 1974891fc324..92b1c8ad6601 100644 --- a/kernel/bpf/log.c +++ b/kernel/bpf/log.c @@ -8,6 +8,7 @@ #include #include #include +#include bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) { @@ -32,23 +33,202 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, return; } - n = min(log->len_total - log->len_used - 1, n); - log->kbuf[n] = '\0'; - if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) - log->len_used += n; - else - log->ubuf = NULL; + if (log->level & BPF_LOG_FIXED) { + n = min(log->len_total - bpf_log_used(log) - 1, n); + log->kbuf[n] = '\0'; + n += 1; + + if (copy_to_user(log->ubuf + log->end_pos, log->kbuf, n)) + goto fail; + + log->end_pos += n - 1; /* don't count terminating '\0' */ + } else { + u64 new_end, new_start, cur_pos; + u32 buf_start, buf_end, new_n; + + n += 1; + + new_end = log->end_pos + n; + if (new_end - log->start_pos >= log->len_total) + new_start = new_end - log->len_total; + else + new_start = log->start_pos; + new_n = min(n, log->len_total); + cur_pos = new_end - new_n; + + div_u64_rem(cur_pos, log->len_total, &buf_start); + div_u64_rem(new_end, log->len_total, &buf_end); + /* new_end and buf_end are exclusive indices, so if buf_end is + * exactly zero, then it actually points right to the end of + * ubuf and there is no wrap around + */ + if (buf_end == 0) + buf_end = log->len_total; + + /* if buf_start > buf_end, we wrapped around; + * if buf_start == buf_end, then we fill ubuf completely; we + * can't have buf_start == buf_end to mean that there is + * nothing to write, because we always write at least + * something, even if terminal '\0' + */ + if (buf_start < buf_end) { + /* message fits within contiguous chunk of ubuf */ + if (copy_to_user(log->ubuf + buf_start, + log->kbuf + n - new_n, + buf_end - buf_start)) + goto fail; + } else { + /* message wraps around the end of ubuf, copy in two chunks */ + if (copy_to_user(log->ubuf + buf_start, + log->kbuf + n - new_n, + log->len_total - buf_start)) + goto fail; + if (copy_to_user(log->ubuf, + log->kbuf + n - buf_end, + buf_end)) + goto fail; + } + + log->start_pos = new_start; + log->end_pos = new_end - 1; /* don't count terminating '\0' */ + } + + return; +fail: + log->ubuf = NULL; } -void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos) +void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos) { char zero = 0; + u32 pos; + + if (WARN_ON_ONCE(new_pos > log->end_pos)) + return; if (!bpf_verifier_log_needed(log)) return; - log->len_used = new_pos; - if (put_user(zero, log->ubuf + new_pos)) + /* if position to which we reset is beyond current log window, + * then we didn't preserve any useful content and should adjust + * start_pos to end up with an empty log (start_pos == end_pos) + */ + log->end_pos = new_pos; + if (log->end_pos < log->start_pos) + log->start_pos = log->end_pos; + div_u64_rem(new_pos, log->len_total, &pos); + if (put_user(zero, log->ubuf + pos)) + log->ubuf = NULL; +} + +static void bpf_vlog_reverse_kbuf(char *buf, int len) +{ + int i, j; + + for (i = 0, j = len - 1; i < j; i++, j--) + swap(buf[i], buf[j]); +} + +static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end) +{ + /* we split log->kbuf into two equal parts for both ends of array */ + int n = sizeof(log->kbuf) / 2, nn; + char *lbuf = log->kbuf, *rbuf = log->kbuf + n; + + /* Read ubuf's section [start, end) two chunks at a time, from left + * and right side; within each chunk, swap all the bytes; after that + * reverse the order of lbuf and rbuf and write result back to ubuf. + * This way we'll end up with swapped contents of specified + * [start, end) ubuf segment. + */ + while (end - start > 1) { + nn = min(n, (end - start ) / 2); + + if (copy_from_user(lbuf, log->ubuf + start, nn)) + return -EFAULT; + if (copy_from_user(rbuf, log->ubuf + end - nn, nn)) + return -EFAULT; + + bpf_vlog_reverse_kbuf(lbuf, nn); + bpf_vlog_reverse_kbuf(rbuf, nn); + + /* we write lbuf to the right end of ubuf, while rbuf to the + * left one to end up with properly reversed overall ubuf + */ + if (copy_to_user(log->ubuf + start, rbuf, nn)) + return -EFAULT; + if (copy_to_user(log->ubuf + end - nn, lbuf, nn)) + return -EFAULT; + + start += nn; + end -= nn; + } + + return 0; +} + +bool bpf_vlog_truncated(const struct bpf_verifier_log *log) +{ + if (log->level & BPF_LOG_FIXED) + return bpf_log_used(log) >= log->len_total - 1; + else + return log->start_pos > 0; +} + +void bpf_vlog_finalize(struct bpf_verifier_log *log) +{ + u32 sublen; + int err; + + if (!log || !log->level || !log->ubuf) + return; + if ((log->level & BPF_LOG_FIXED) || log->level == BPF_LOG_KERNEL) + return; + + /* If we never truncated log, there is nothing to move around. */ + if (log->start_pos == 0) + return; + + /* Otherwise we need to rotate log contents to make it start from the + * buffer beginning and be a continuous zero-terminated string. Note + * that if log->start_pos != 0 then we definitely filled up entire log + * buffer with no gaps, and we just need to shift buffer contents to + * the left by (log->start_pos % log->len_total) bytes. + * + * Unfortunately, user buffer could be huge and we don't want to + * allocate temporary kernel memory of the same size just to shift + * contents in a straightforward fashion. Instead, we'll be clever and + * do in-place array rotation. This is a leetcode-style problem, which + * could be solved by three rotations. + * + * Let's say we have log buffer that has to be shifted left by 7 bytes + * (spaces and vertical bar is just for demonstrative purposes): + * E F G H I J K | A B C D + * + * First, we reverse entire array: + * D C B A | K J I H G F E + * + * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes + * (KJIHGFE), resulting in a properly rotated array: + * A B C D | E F G H I J K + * + * We'll utilize log->kbuf to read user memory chunk by chunk, swap + * bytes, and write them back. Doing it byte-by-byte would be + * unnecessarily inefficient. Altogether we are going to read and + * write each byte twice, for total 4 memory copies between kernel and + * user space. + */ + + /* length of the chopped off part that will be the beginning; + * len(ABCD) in the example above + */ + div_u64_rem(log->start_pos, log->len_total, &sublen); + sublen = log->len_total - sublen; + + err = bpf_vlog_reverse_ubuf(log, 0, log->len_total); + err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen); + err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total); + if (err) log->ubuf = NULL; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 745ae0cd01d4..a476bb319685 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1439,10 +1439,10 @@ static inline u32 vlog_alignment(u32 pos) static void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { - if (env->prev_log_len && env->prev_log_len == env->log.len_used) { + if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) { /* remove new line character */ - bpf_vlog_reset(&env->log, env->prev_log_len - 1); - verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_len), ' '); + bpf_vlog_reset(&env->log, env->prev_log_pos - 1); + verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' '); } else { verbose(env, "%d:", env->insn_idx); } @@ -1750,7 +1750,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; - elem->log_pos = env->log.len_used; + elem->log_pos = env->log.end_pos; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); @@ -2286,7 +2286,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; - elem->log_pos = env->log.len_used; + elem->log_pos = env->log.end_pos; env->head = elem; env->stack_size++; if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { @@ -15638,11 +15638,11 @@ static int do_check(struct bpf_verifier_env *env) print_insn_state(env, state->frame[state->curframe]); verbose_linfo(env, env->insn_idx, "; "); - env->prev_log_len = env->log.len_used; + env->prev_log_pos = env->log.end_pos; verbose(env, "%d: ", env->insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); - env->prev_insn_print_len = env->log.len_used - env->prev_log_len; - env->prev_log_len = env->log.len_used; + env->prev_insn_print_pos = env->log.end_pos - env->prev_log_pos; + env->prev_log_pos = env->log.end_pos; } if (bpf_prog_is_offloaded(env->prog->aux)) { @@ -18860,7 +18860,8 @@ skip_full_check: print_verification_stats(env); env->prog->aux->verified_insns = env->insn_processed; - if (log->level && bpf_verifier_log_full(log)) + bpf_vlog_finalize(log); + if (log->level && bpf_vlog_truncated(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index 239e1c5753b0..bc27170bdeb0 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -24,6 +24,7 @@ static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type) bpf_program__set_autoload(skel->progs.bad_relo, true); memset(log_buf, 0, sizeof(log_buf)); bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf)); + bpf_program__set_log_level(skel->progs.bad_relo, 1 | 8); /* BPF_LOG_FIXED to force truncation */ err = test_log_fixup__load(skel); if (!ASSERT_ERR(err, "load_fail")) -- cgit v1.2.3-70-g09d2 From d0d75c67c45abd3930967dcafc82fd4505400665 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:41:51 -0700 Subject: veristat: Add more veristat control over verifier log options Add --log-size to be able to customize log buffer sent to bpf() syscall for BPF program verification logging. Add --log-fixed to enforce BPF_LOG_FIXED behavior for BPF verifier log. This is useful in unlikely event that beginning of truncated verifier log is more important than the end of it (which with rotating verifier log behavior is the default now). Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230406234205.323208-6-andrii@kernel.org --- tools/testing/selftests/bpf/veristat.c | 44 +++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index e05954e20bba..1db7185181da 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -141,12 +141,15 @@ static struct env { bool verbose; bool debug; bool quiet; - int log_level; enum resfmt out_fmt; bool show_version; bool comparison_mode; bool replay_mode; + int log_level; + int log_size; + bool log_fixed; + struct verif_stats *prog_stats; int prog_stat_cnt; @@ -193,12 +196,19 @@ const char argp_program_doc[] = " OR: veristat -C \n" " OR: veristat -R \n"; +enum { + OPT_LOG_FIXED = 1000, + OPT_LOG_SIZE = 1001, +}; + static const struct argp_option opts[] = { { NULL, 'h', NULL, OPTION_HIDDEN, "Show the full help" }, { "version", 'V', NULL, 0, "Print version" }, { "verbose", 'v', NULL, 0, "Verbose mode" }, - { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, { "debug", 'd', NULL, 0, "Debug mode (turns on libbpf debug logging)" }, + { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, + { "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" }, + { "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" }, { "quiet", 'q', NULL, 0, "Quiet mode" }, { "emit", 'e', "SPEC", 0, "Specify stats to be emitted" }, { "sort", 's', "SPEC", 0, "Specify sort order" }, @@ -263,6 +273,17 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) argp_usage(state); } break; + case OPT_LOG_FIXED: + env.log_fixed = true; + break; + case OPT_LOG_SIZE: + errno = 0; + env.log_size = strtol(arg, NULL, 10); + if (errno) { + fprintf(stderr, "invalid log size: %s\n", arg); + argp_usage(state); + } + break; case 'C': env.comparison_mode = true; break; @@ -929,8 +950,8 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf { const char *prog_name = bpf_program__name(prog); const char *base_filename = basename(filename); - size_t buf_sz = sizeof(verif_log_buf); - char *buf = verif_log_buf; + char *buf; + int buf_sz, log_level; struct verif_stats *stats; int err = 0; void *tmp; @@ -948,18 +969,23 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf memset(stats, 0, sizeof(*stats)); if (env.verbose) { - buf_sz = 16 * 1024 * 1024; + buf_sz = env.log_size ? env.log_size : 16 * 1024 * 1024; buf = malloc(buf_sz); if (!buf) return -ENOMEM; - bpf_program__set_log_buf(prog, buf, buf_sz); - bpf_program__set_log_level(prog, env.log_level | 4); /* stats + log */ + /* ensure we always request stats */ + log_level = env.log_level | 4 | (env.log_fixed ? 8 : 0); } else { - bpf_program__set_log_buf(prog, buf, buf_sz); - bpf_program__set_log_level(prog, 4); /* only verifier stats */ + buf = verif_log_buf; + buf_sz = sizeof(verif_log_buf); + /* request only verifier stats */ + log_level = 4 | (env.log_fixed ? 8 : 0); } verif_log_buf[0] = '\0'; + bpf_program__set_log_buf(prog, buf, buf_sz); + bpf_program__set_log_level(prog, log_level); + /* increase chances of successful BPF object loading */ fixup_obj(obj, prog, base_filename); -- cgit v1.2.3-70-g09d2 From b1a7a480a1120d4f70305f5e8859f527e0efe4a5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:41:52 -0700 Subject: selftests/bpf: Add fixed vs rotating verifier log tests Add selftests validating BPF_LOG_FIXED behavior, which used to be the only behavior, and now default rotating BPF verifier log, which returns just up to last N bytes of full verifier log, instead of returning -ENOSPC. To stress test correctness of in-kernel verifier log logic, we force it to truncate program's verifier log to all lengths from 1 all the way to its full size (about 450 bytes today). This was a useful stress test while developing the feature. For both fixed and rotating log modes we expect -ENOSPC if log contents doesn't fit in user-supplied log buffer. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-7-andrii@kernel.org --- .../selftests/bpf/prog_tests/verifier_log.c | 179 +++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/verifier_log.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c new file mode 100644 index 000000000000..3284108a6ce8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "test_log_buf.skel.h" + + +static bool check_prog_load(int prog_fd, bool expect_err, const char *tag) +{ + if (expect_err) { + if (!ASSERT_LT(prog_fd, 0, tag)) { + close(prog_fd); + return false; + } + } else /* !expect_err */ { + if (!ASSERT_GT(prog_fd, 0, tag)) + return false; + } + return true; +} + +static void verif_log_subtest(const char *name, bool expect_load_error, int log_level) +{ + LIBBPF_OPTS(bpf_prog_load_opts, opts); + struct { + /* strategically placed before others to avoid accidental modification by kernel */ + char filler[1024]; + char buf[1024]; + /* strategically placed after buf[] to catch more accidental corruptions */ + char reference[1024]; + } logs; + char *exp_log, prog_name[16], op_name[32]; + struct test_log_buf *skel; + struct bpf_program *prog; + const struct bpf_insn *insns; + size_t insn_cnt, fixed_log_sz; + int i, mode, err, prog_fd; + + skel = test_log_buf__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_object__for_each_program(prog, skel->obj) { + if (strcmp(bpf_program__name(prog), name) == 0) + bpf_program__set_autoload(prog, true); + else + bpf_program__set_autoload(prog, false); + } + + err = test_log_buf__load(skel); + if (!expect_load_error && !ASSERT_OK(err, "unexpected_load_failure")) + goto cleanup; + if (expect_load_error && !ASSERT_ERR(err, "unexpected_load_success")) + goto cleanup; + + insns = bpf_program__insns(skel->progs.good_prog); + insn_cnt = bpf_program__insn_cnt(skel->progs.good_prog); + + opts.log_buf = logs.reference; + opts.log_size = sizeof(logs.reference); + opts.log_level = log_level | 8 /* BPF_LOG_FIXED */; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed", + "GPL", insns, insn_cnt, &opts); + if (!check_prog_load(prog_fd, expect_load_error, "fixed_buf_prog_load")) + goto cleanup; + close(prog_fd); + + fixed_log_sz = strlen(logs.reference) + 1; + if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz")) + goto cleanup; + memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz); + + /* validate BPF_LOG_FIXED works as verifier log used to work, that is: + * we get -ENOSPC and beginning of the full verifier log. This only + * works for log_level 2 and log_level 1 + failed program. For log + * level 2 we don't reset log at all. For log_level 1 + failed program + * we don't get to verification stats output. With log level 1 + * for successful program final result will be just verifier stats. + * But if provided too short log buf, kernel will NULL-out log->ubuf + * and will stop emitting further log. This means we'll never see + * predictable verifier stats. + * Long story short, we do the following -ENOSPC test only for + * predictable combinations. + */ + if (log_level >= 2 || expect_load_error) { + opts.log_buf = logs.buf; + opts.log_level = log_level | 8; /* fixed-length log */ + opts.log_size = 25; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed50", + "GPL", insns, insn_cnt, &opts); + if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) { + if (prog_fd >= 0) + close(prog_fd); + goto cleanup; + } + if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25")) + goto cleanup; + if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name)) + goto cleanup; + } + + /* validate rolling verifier log logic: try all variations of log buf + * length to force various truncation scenarios + */ + opts.log_buf = logs.buf; + + /* rotating mode, then fixed mode */ + for (mode = 1; mode >= 0; mode--) { + /* prefill logs.buf with 'A's to detect any write beyond allowed length */ + memset(logs.filler, 'A', sizeof(logs.filler)); + logs.filler[sizeof(logs.filler) - 1] = '\0'; + memset(logs.buf, 'A', sizeof(logs.buf)); + logs.buf[sizeof(logs.buf) - 1] = '\0'; + + for (i = 1; i < fixed_log_sz; i++) { + opts.log_size = i; + opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */); + + snprintf(prog_name, sizeof(prog_name), + "log_%s_%d", mode ? "roll" : "fixed", i); + prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, prog_name, + "GPL", insns, insn_cnt, &opts); + + snprintf(op_name, sizeof(op_name), + "log_%s_prog_load_%d", mode ? "roll" : "fixed", i); + if (!ASSERT_EQ(prog_fd, -ENOSPC, op_name)) { + if (prog_fd >= 0) + close(prog_fd); + goto cleanup; + } + + snprintf(op_name, sizeof(op_name), + "log_%s_strlen_%d", mode ? "roll" : "fixed", i); + ASSERT_EQ(strlen(logs.buf), i - 1, op_name); + + if (mode) + exp_log = logs.reference + fixed_log_sz - i; + else + exp_log = logs.reference; + + snprintf(op_name, sizeof(op_name), + "log_%s_contents_%d", mode ? "roll" : "fixed", i); + if (!ASSERT_STRNEQ(logs.buf, exp_log, i - 1, op_name)) { + printf("CMP:%d\nS1:'%s'\nS2:'%s'\n", + strncmp(logs.buf, exp_log, i - 1), + logs.buf, exp_log); + goto cleanup; + } + + /* check that unused portions of logs.buf is not overwritten */ + snprintf(op_name, sizeof(op_name), + "log_%s_unused_%d", mode ? "roll" : "fixed", i); + if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) { + printf("CMP:%d\nS1:'%s'\nS2:'%s'\n", + strcmp(logs.buf + i, logs.filler + i), + logs.buf + i, logs.filler + i); + goto cleanup; + } + } + } + +cleanup: + test_log_buf__destroy(skel); +} + +void test_verifier_log(void) +{ + if (test__start_subtest("good_prog-level1")) + verif_log_subtest("good_prog", false, 1); + if (test__start_subtest("good_prog-level2")) + verif_log_subtest("good_prog", false, 2); + if (test__start_subtest("bad_prog-level1")) + verif_log_subtest("bad_prog", true, 1); + if (test__start_subtest("bad_prog-level2")) + verif_log_subtest("bad_prog", true, 2); +} -- cgit v1.2.3-70-g09d2 From 5787540827a9e2cdecf38166e648b2924a57443f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:42:03 -0700 Subject: selftests/bpf: Add tests to validate log_true_size feature Add additional test cases validating that log_true_size is consistent between fixed and rotating log modes, and that log_true_size can be used *exactly* without causing -ENOSPC, while using just 1 byte shorter log buffer would cause -ENOSPC. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-18-andrii@kernel.org --- .../selftests/bpf/prog_tests/verifier_log.c | 92 ++++++++++++++++++---- 1 file changed, 76 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c index 3284108a6ce8..2ec82fc60c03 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier_log.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c @@ -18,25 +18,41 @@ static bool check_prog_load(int prog_fd, bool expect_err, const char *tag) if (!ASSERT_GT(prog_fd, 0, tag)) return false; } + if (prog_fd >= 0) + close(prog_fd); return true; } +static struct { + /* strategically placed before others to avoid accidental modification by kernel */ + char filler[1024]; + char buf[1024]; + /* strategically placed after buf[] to catch more accidental corruptions */ + char reference[1024]; +} logs; +static const struct bpf_insn *insns; +static size_t insn_cnt; + +static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error) +{ + int prog_fd; + + prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog", + "GPL", insns, insn_cnt, opts); + check_prog_load(prog_fd, expect_load_error, "prog_load"); + + return prog_fd; +} + static void verif_log_subtest(const char *name, bool expect_load_error, int log_level) { LIBBPF_OPTS(bpf_prog_load_opts, opts); - struct { - /* strategically placed before others to avoid accidental modification by kernel */ - char filler[1024]; - char buf[1024]; - /* strategically placed after buf[] to catch more accidental corruptions */ - char reference[1024]; - } logs; char *exp_log, prog_name[16], op_name[32]; struct test_log_buf *skel; struct bpf_program *prog; - const struct bpf_insn *insns; - size_t insn_cnt, fixed_log_sz; - int i, mode, err, prog_fd; + size_t fixed_log_sz; + __u32 log_true_sz_fixed, log_true_sz_rolling; + int i, mode, err, prog_fd, res; skel = test_log_buf__open(); if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -61,11 +77,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ opts.log_buf = logs.reference; opts.log_size = sizeof(logs.reference); opts.log_level = log_level | 8 /* BPF_LOG_FIXED */; - prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed", - "GPL", insns, insn_cnt, &opts); - if (!check_prog_load(prog_fd, expect_load_error, "fixed_buf_prog_load")) - goto cleanup; - close(prog_fd); + load_prog(&opts, expect_load_error); fixed_log_sz = strlen(logs.reference) + 1; if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz")) @@ -89,7 +101,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ opts.log_level = log_level | 8; /* fixed-length log */ opts.log_size = 25; - prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed50", + prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25", "GPL", insns, insn_cnt, &opts); if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) { if (prog_fd >= 0) @@ -162,6 +174,54 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ } } + /* (FIXED) get actual log size */ + opts.log_buf = logs.buf; + opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ + opts.log_size = sizeof(logs.buf); + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed"); + + log_true_sz_fixed = opts.log_true_size; + ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed"); + + /* (ROLLING) get actual log size */ + opts.log_buf = logs.buf; + opts.log_level = log_level; + opts.log_size = sizeof(logs.buf); + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling"); + + log_true_sz_rolling = opts.log_true_size; + ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq"); + + /* (FIXED) expect -ENOSPC for one byte short log */ + opts.log_buf = logs.buf; + opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ + opts.log_size = log_true_sz_fixed - 1; + res = load_prog(&opts, true /* should fail */); + ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed"); + + /* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */ + opts.log_buf = logs.buf; + opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ + opts.log_size = log_true_sz_fixed; + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed"); + + /* (ROLLING) expect -ENOSPC for one byte short log */ + opts.log_buf = logs.buf; + opts.log_level = log_level; + opts.log_size = log_true_sz_rolling - 1; + res = load_prog(&opts, true /* should fail */); + ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling"); + + /* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */ + opts.log_buf = logs.buf; + opts.log_level = log_level; + opts.log_size = log_true_sz_rolling; + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling"); + cleanup: test_log_buf__destroy(skel); } -- cgit v1.2.3-70-g09d2 From be983f44274f575e42025130e3c62b8718b0a29a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:42:04 -0700 Subject: selftests/bpf: Add testing of log_buf==NULL condition for BPF_PROG_LOAD Add few extra test conditions to validate that it's ok to pass log_buf==NULL and log_size==0 to BPF_PROG_LOAD command with the intent to get log_true_size without providing a buffer. Test that log_buf==NULL condition *does not* return -ENOSPC. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-19-andrii@kernel.org --- .../selftests/bpf/prog_tests/verifier_log.c | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c index 2ec82fc60c03..9ae0ac6e3b25 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier_log.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c @@ -178,26 +178,47 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ opts.log_buf = logs.buf; opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ opts.log_size = sizeof(logs.buf); + opts.log_true_size = 0; res = load_prog(&opts, expect_load_error); ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed"); log_true_sz_fixed = opts.log_true_size; ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed"); + /* (FIXED, NULL) get actual log size */ + opts.log_buf = NULL; + opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ + opts.log_size = 0; + opts.log_true_size = 0; + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed_null"); + ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq"); + /* (ROLLING) get actual log size */ opts.log_buf = logs.buf; opts.log_level = log_level; opts.log_size = sizeof(logs.buf); + opts.log_true_size = 0; res = load_prog(&opts, expect_load_error); ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling"); log_true_sz_rolling = opts.log_true_size; ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq"); + /* (ROLLING, NULL) get actual log size */ + opts.log_buf = NULL; + opts.log_level = log_level; + opts.log_size = 0; + opts.log_true_size = 0; + res = load_prog(&opts, expect_load_error); + ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling_null"); + ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq"); + /* (FIXED) expect -ENOSPC for one byte short log */ opts.log_buf = logs.buf; opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ opts.log_size = log_true_sz_fixed - 1; + opts.log_true_size = 0; res = load_prog(&opts, true /* should fail */); ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed"); @@ -205,6 +226,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ opts.log_buf = logs.buf; opts.log_level = log_level | 8; /* BPF_LOG_FIXED */ opts.log_size = log_true_sz_fixed; + opts.log_true_size = 0; res = load_prog(&opts, expect_load_error); ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed"); @@ -219,6 +241,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ opts.log_buf = logs.buf; opts.log_level = log_level; opts.log_size = log_true_sz_rolling; + opts.log_true_size = 0; res = load_prog(&opts, expect_load_error); ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling"); -- cgit v1.2.3-70-g09d2 From 054b6c7866c7a2537fffd4aa12d88aac47db60f9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 6 Apr 2023 16:42:05 -0700 Subject: selftests/bpf: Add verifier log tests for BPF_BTF_LOAD command Add verifier log tests for BPF_BTF_LOAD command, which are very similar, conceptually, to BPF_PROG_LOAD tests. These are two separate commands dealing with verbose verifier log, so should be both tested separately. Test that log_buf==NULL condition *does not* return -ENOSPC. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Lorenz Bauer Link: https://lore.kernel.org/bpf/20230406234205.323208-20-andrii@kernel.org --- .../selftests/bpf/prog_tests/verifier_log.c | 188 +++++++++++++++++++++ 1 file changed, 188 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c index 9ae0ac6e3b25..475092a78deb 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier_log.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c @@ -249,6 +249,190 @@ cleanup: test_log_buf__destroy(skel); } +static const void *btf_data; +static u32 btf_data_sz; + +static int load_btf(struct bpf_btf_load_opts *opts, bool expect_err) +{ + int fd; + + fd = bpf_btf_load(btf_data, btf_data_sz, opts); + if (fd >= 0) + close(fd); + if (expect_err) + ASSERT_LT(fd, 0, "btf_load_failure"); + else /* !expect_err */ + ASSERT_GT(fd, 0, "btf_load_success"); + return fd; +} + +static void verif_btf_log_subtest(bool bad_btf) +{ + LIBBPF_OPTS(bpf_btf_load_opts, opts); + struct btf *btf; + struct btf_type *t; + char *exp_log, op_name[32]; + size_t fixed_log_sz; + __u32 log_true_sz_fixed, log_true_sz_rolling; + int i, res; + + /* prepare simple BTF contents */ + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "btf_new_empty")) + return; + res = btf__add_int(btf, "whatever", 4, 0); + if (!ASSERT_GT(res, 0, "btf_add_int_id")) + goto cleanup; + if (bad_btf) { + /* btf__add_int() doesn't allow bad value of size, so we'll just + * force-cast btf_type pointer and manually override size to invalid + * 3 if we need to simulate failure + */ + t = (void *)btf__type_by_id(btf, res); + if (!ASSERT_OK_PTR(t, "int_btf_type")) + goto cleanup; + t->size = 3; + } + + btf_data = btf__raw_data(btf, &btf_data_sz); + if (!ASSERT_OK_PTR(btf_data, "btf_data")) + goto cleanup; + + load_btf(&opts, bad_btf); + + opts.log_buf = logs.reference; + opts.log_size = sizeof(logs.reference); + opts.log_level = 1 | 8 /* BPF_LOG_FIXED */; + load_btf(&opts, bad_btf); + + fixed_log_sz = strlen(logs.reference) + 1; + if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz")) + goto cleanup; + memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz); + + /* validate BPF_LOG_FIXED truncation works as verifier log used to work */ + opts.log_buf = logs.buf; + opts.log_level = 1 | 8; /* fixed-length log */ + opts.log_size = 25; + res = load_btf(&opts, true); + ASSERT_EQ(res, -ENOSPC, "half_log_fd"); + ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25"); + ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name); + + /* validate rolling verifier log logic: try all variations of log buf + * length to force various truncation scenarios + */ + opts.log_buf = logs.buf; + opts.log_level = 1; /* rolling log */ + + /* prefill logs.buf with 'A's to detect any write beyond allowed length */ + memset(logs.filler, 'A', sizeof(logs.filler)); + logs.filler[sizeof(logs.filler) - 1] = '\0'; + memset(logs.buf, 'A', sizeof(logs.buf)); + logs.buf[sizeof(logs.buf) - 1] = '\0'; + + for (i = 1; i < fixed_log_sz; i++) { + opts.log_size = i; + + snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i); + res = load_btf(&opts, true); + if (!ASSERT_EQ(res, -ENOSPC, op_name)) + goto cleanup; + + exp_log = logs.reference + fixed_log_sz - i; + snprintf(op_name, sizeof(op_name), "log_roll_contents_%d", i); + if (!ASSERT_STREQ(logs.buf, exp_log, op_name)) { + printf("CMP:%d\nS1:'%s'\nS2:'%s'\n", + strcmp(logs.buf, exp_log), + logs.buf, exp_log); + goto cleanup; + } + + /* check that unused portions of logs.buf are not overwritten */ + snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i); + if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) { + printf("CMP:%d\nS1:'%s'\nS2:'%s'\n", + strcmp(logs.buf + i, logs.filler + i), + logs.buf + i, logs.filler + i); + goto cleanup; + } + } + + /* (FIXED) get actual log size */ + opts.log_buf = logs.buf; + opts.log_level = 1 | 8; /* BPF_LOG_FIXED */ + opts.log_size = sizeof(logs.buf); + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed"); + + log_true_sz_fixed = opts.log_true_size; + ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed"); + + /* (FIXED, NULL) get actual log size */ + opts.log_buf = NULL; + opts.log_level = 1 | 8; /* BPF_LOG_FIXED */ + opts.log_size = 0; + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed_null"); + ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq"); + + /* (ROLLING) get actual log size */ + opts.log_buf = logs.buf; + opts.log_level = 1; + opts.log_size = sizeof(logs.buf); + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling"); + + log_true_sz_rolling = opts.log_true_size; + ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq"); + + /* (ROLLING, NULL) get actual log size */ + opts.log_buf = NULL; + opts.log_level = 1; + opts.log_size = 0; + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling_null"); + ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq"); + + /* (FIXED) expect -ENOSPC for one byte short log */ + opts.log_buf = logs.buf; + opts.log_level = 1 | 8; /* BPF_LOG_FIXED */ + opts.log_size = log_true_sz_fixed - 1; + opts.log_true_size = 0; + res = load_btf(&opts, true); + ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_fixed"); + + /* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */ + opts.log_buf = logs.buf; + opts.log_level = 1 | 8; /* BPF_LOG_FIXED */ + opts.log_size = log_true_sz_fixed; + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_fixed"); + + /* (ROLLING) expect -ENOSPC for one byte short log */ + opts.log_buf = logs.buf; + opts.log_level = 1; + opts.log_size = log_true_sz_rolling - 1; + res = load_btf(&opts, true); + ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_rolling"); + + /* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */ + opts.log_buf = logs.buf; + opts.log_level = 1; + opts.log_size = log_true_sz_rolling; + opts.log_true_size = 0; + res = load_btf(&opts, bad_btf); + ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_rolling"); + +cleanup: + btf__free(btf); +} + void test_verifier_log(void) { if (test__start_subtest("good_prog-level1")) @@ -259,4 +443,8 @@ void test_verifier_log(void) verif_log_subtest("bad_prog", true, 1); if (test__start_subtest("bad_prog-level2")) verif_log_subtest("bad_prog", true, 2); + if (test__start_subtest("bad_btf")) + verif_btf_log_subtest(true /* bad btf */); + if (test__start_subtest("good_btf")) + verif_btf_log_subtest(false /* !bad btf */); } -- cgit v1.2.3-70-g09d2 From 75dcef8d3609d0b1d3497d6ed4809096513e0b83 Mon Sep 17 00:00:00 2001 From: Feng Zhou Date: Mon, 10 Apr 2023 16:59:08 +0800 Subject: selftests/bpf: Add test to access u32 ptr argument in tracing program Adding verifier test for accessing u32 pointer argument in tracing programs. The test program loads 1nd argument of bpf_fentry_test9 function which is u32 pointer and checks that verifier allows that. Co-developed-by: Chengming Zhou Signed-off-by: Chengming Zhou Signed-off-by: Feng Zhou Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20230410085908.98493-3-zhoufeng.zf@bytedance.com --- net/bpf/test_run.c | 8 +++++++- tools/testing/selftests/bpf/verifier/btf_ctx_access.c | 13 +++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index f1652f5fbd2e..68bdfc041a7b 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -541,6 +541,11 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) return (long)arg->a; } +__bpf_kfunc u32 bpf_fentry_test9(u32 *a) +{ + return *a; +} + __bpf_kfunc int bpf_modify_return_test(int a, int *b) { *b += 1; @@ -855,7 +860,8 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog, bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || - bpf_fentry_test8(&arg) != 0) + bpf_fentry_test8(&arg) != 0 || + bpf_fentry_test9(&retval) != 0) goto out; break; case BPF_MODIFY_RETURN: diff --git a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c index 6340db6b46dc..0484d3de040d 100644 --- a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c +++ b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c @@ -10,3 +10,16 @@ .expected_attach_type = BPF_TRACE_FENTRY, .kfunc = "bpf_modify_return_test", }, + +{ + "btf_ctx_access u32 pointer accept", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), /* load 1nd argument value (u32 pointer) */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACING, + .expected_attach_type = BPF_TRACE_FENTRY, + .kfunc = "bpf_fentry_test9", +}, -- cgit v1.2.3-70-g09d2 From 1d71283987c729dceccce834a864c27301ba155e Mon Sep 17 00:00:00 2001 From: David Vernet Date: Mon, 10 Apr 2023 23:16:31 -0500 Subject: bpf: Make bpf_cgroup_acquire() KF_RCU | KF_RET_NULL struct cgroup is already an RCU-safe type in the verifier. We can therefore update bpf_cgroup_acquire() to be KF_RCU | KF_RET_NULL, and subsequently remove bpf_cgroup_kptr_get(). This patch does the first of these by updating bpf_cgroup_acquire() to be KF_RCU | KF_RET_NULL, and also updates selftests accordingly. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230411041633.179404-1-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 5 ++-- .../selftests/bpf/progs/cgrp_kfunc_common.h | 5 ++++ .../selftests/bpf/progs/cgrp_kfunc_failure.c | 35 ++++++++++++++++++---- .../selftests/bpf/progs/cgrp_kfunc_success.c | 5 +++- 4 files changed, 40 insertions(+), 10 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b6a5cda5bb59..71f0604bdc97 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2037,8 +2037,7 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p) */ __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) { - cgroup_get(cgrp); - return cgrp; + return cgroup_tryget(cgrp) ? cgrp : NULL; } /** @@ -2314,7 +2313,7 @@ BTF_ID_FLAGS(func, bpf_rbtree_add) BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) #ifdef CONFIG_CGROUPS -BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h index d0b7cd0d09d7..b0e279f4652b 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -61,6 +61,11 @@ static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp) } acquired = bpf_cgroup_acquire(cgrp); + if (!acquired) { + bpf_map_delete_elem(&__cgrps_kfunc_map, &id); + return -ENOENT; + } + old = bpf_kptr_xchg(&v->cgrp, acquired); if (old) { bpf_cgroup_release(old); diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 48b2034cadb3..49347f12de39 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -41,6 +41,23 @@ int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path /* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */ acquired = bpf_cgroup_acquire(v->cgrp); + if (acquired) + bpf_cgroup_release(acquired); + + return 0; +} + +SEC("tp_btf/cgroup_mkdir") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +int BPF_PROG(cgrp_kfunc_acquire_no_null_check, struct cgroup *cgrp, const char *path) +{ + struct cgroup *acquired; + + acquired = bpf_cgroup_acquire(cgrp); + /* + * Can't invoke bpf_cgroup_release() without checking the return value + * of bpf_cgroup_acquire(). + */ bpf_cgroup_release(acquired); return 0; @@ -54,7 +71,8 @@ int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path) /* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */ acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -67,7 +85,8 @@ int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp) /* Can't acquire an untrusted struct cgroup * pointer. */ acquired = bpf_cgroup_acquire(cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -80,7 +99,8 @@ int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char /* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */ acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp); - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -93,9 +113,8 @@ int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path) /* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */ acquired = bpf_cgroup_acquire(NULL); - if (!acquired) - return 0; - bpf_cgroup_release(acquired); + if (acquired) + bpf_cgroup_release(acquired); return 0; } @@ -137,6 +156,8 @@ int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char * struct cgroup *kptr, *acquired; acquired = bpf_cgroup_acquire(cgrp); + if (!acquired) + return 0; /* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */ kptr = bpf_cgroup_kptr_get(&acquired); @@ -256,6 +277,8 @@ int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path) return -ENOENT; acquired = bpf_cgroup_acquire(cgrp); + if (!acquired) + return -ENOENT; old = bpf_kptr_xchg(&v->cgrp, acquired); diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c index 030aff700084..e9dbd1af05a7 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -38,7 +38,10 @@ int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char return 0; acquired = bpf_cgroup_acquire(cgrp); - bpf_cgroup_release(acquired); + if (!acquired) + err = 1; + else + bpf_cgroup_release(acquired); return 0; } -- cgit v1.2.3-70-g09d2 From 6499fe6edc4fd5b91aed4d5cd84bd113e1c58d5f Mon Sep 17 00:00:00 2001 From: David Vernet Date: Mon, 10 Apr 2023 23:16:32 -0500 Subject: bpf: Remove bpf_cgroup_kptr_get() kfunc Now that bpf_cgroup_acquire() is KF_RCU | KF_RET_NULL, bpf_cgroup_kptr_get() is redundant. Let's remove it, and update selftests to instead use bpf_cgroup_acquire() where appropriate. The next patch will update the BPF documentation to not mention bpf_cgroup_kptr_get(). Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230411041633.179404-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 32 ---------- .../selftests/bpf/progs/cgrp_kfunc_common.h | 3 +- .../selftests/bpf/progs/cgrp_kfunc_failure.c | 68 +++------------------- .../selftests/bpf/progs/cgrp_kfunc_success.c | 10 ++-- 4 files changed, 14 insertions(+), 99 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 71f0604bdc97..f04e60a4847f 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2040,37 +2040,6 @@ __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) return cgroup_tryget(cgrp) ? cgrp : NULL; } -/** - * bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup - * kptr acquired by this kfunc which is not subsequently stored in a map, must - * be released by calling bpf_cgroup_release(). - * @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired. - */ -__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp) -{ - struct cgroup *cgrp; - - rcu_read_lock(); - /* Another context could remove the cgroup from the map and release it - * at any time, including after we've done the lookup above. This is - * safe because we're in an RCU read region, so the cgroup is - * guaranteed to remain valid until at least the rcu_read_unlock() - * below. - */ - cgrp = READ_ONCE(*cgrpp); - - if (cgrp && !cgroup_tryget(cgrp)) - /* If the cgroup had been removed from the map and freed as - * described above, cgroup_tryget() will return false. The - * cgroup will be freed at some point after the current RCU gp - * has ended, so just return NULL to the user. - */ - cgrp = NULL; - rcu_read_unlock(); - - return cgrp; -} - /** * bpf_cgroup_release - Release the reference acquired on a cgroup. * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to @@ -2314,7 +2283,6 @@ BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) #ifdef CONFIG_CGROUPS BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) -BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h index b0e279f4652b..22914a70db54 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h @@ -21,10 +21,11 @@ struct hash_map { } __cgrps_kfunc_map SEC(".maps"); struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; -struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym; void bpf_cgroup_release(struct cgroup *p) __ksym; struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym; struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp) { diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c index 49347f12de39..0fa564a5cc5b 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c @@ -133,59 +133,6 @@ int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *pat return 0; } -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr; - - /* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */ - kptr = bpf_cgroup_kptr_get(&cgrp); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); - - return 0; -} - -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr, *acquired; - - acquired = bpf_cgroup_acquire(cgrp); - if (!acquired) - return 0; - - /* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */ - kptr = bpf_cgroup_kptr_get(&acquired); - bpf_cgroup_release(acquired); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); - - return 0; -} - -SEC("tp_btf/cgroup_mkdir") -__failure __msg("arg#0 expected pointer to map value") -int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path) -{ - struct cgroup *kptr; - - /* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */ - kptr = bpf_cgroup_kptr_get(NULL); - if (!kptr) - return 0; - - bpf_cgroup_release(kptr); - - return 0; -} - SEC("tp_btf/cgroup_mkdir") __failure __msg("Unreleased reference") int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) @@ -207,8 +154,8 @@ int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path) } SEC("tp_btf/cgroup_mkdir") -__failure __msg("Unreleased reference") -int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) +__failure __msg("must be referenced or trusted") +int BPF_PROG(cgrp_kfunc_rcu_get_release, struct cgroup *cgrp, const char *path) { struct cgroup *kptr; struct __cgrps_kfunc_map_value *v; @@ -217,11 +164,12 @@ int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path) if (!v) return 0; - kptr = bpf_cgroup_kptr_get(&v->cgrp); - if (!kptr) - return 0; - - /* Kptr acquired above is never released. */ + bpf_rcu_read_lock(); + kptr = v->cgrp; + if (kptr) + /* Can't release a cgroup kptr stored in a map. */ + bpf_cgroup_release(kptr); + bpf_rcu_read_unlock(); return 0; } diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c index e9dbd1af05a7..5354455a01be 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c +++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c @@ -126,13 +126,11 @@ int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path) return 0; } - kptr = bpf_cgroup_kptr_get(&v->cgrp); - if (!kptr) { + bpf_rcu_read_lock(); + kptr = v->cgrp; + if (!kptr) err = 3; - return 0; - } - - bpf_cgroup_release(kptr); + bpf_rcu_read_unlock(); return 0; } -- cgit v1.2.3-70-g09d2 From d9688f898c08c8f96fb0e7879262877ffd319bfd Mon Sep 17 00:00:00 2001 From: Christian Ehrig Date: Fri, 7 Apr 2023 15:38:55 +0200 Subject: selftests/bpf: Test FOU kfuncs for externally controlled ipip devices Add tests for FOU and GUE encapsulation via the bpf_skb_{set,get}_fou_encap kfuncs, using ipip devices in collect-metadata mode. These tests make sure that we can successfully set and obtain FOU and GUE encap parameters using ingress / egress BPF tc-hooks. Signed-off-by: Christian Ehrig Link: https://lore.kernel.org/r/040193566ddbdb0b53eb359f7ac7bbd316f338b5.1680874078.git.cehrig@cloudflare.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/test_tunnel.c | 153 ++++++++++++++++++++- .../testing/selftests/bpf/progs/test_tunnel_kern.c | 117 ++++++++++++++++ 2 files changed, 268 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index 47f1d482fe39..d149ab98798d 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -89,6 +89,9 @@ #define IP6VXLAN_TUNL_DEV0 "ip6vxlan00" #define IP6VXLAN_TUNL_DEV1 "ip6vxlan11" +#define IPIP_TUNL_DEV0 "ipip00" +#define IPIP_TUNL_DEV1 "ipip11" + #define PING_ARGS "-i 0.01 -c 3 -w 10 -q" static int config_device(void) @@ -188,6 +191,79 @@ static void delete_ip6vxlan_tunnel(void) SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1); } +enum ipip_encap { + NONE = 0, + FOU = 1, + GUE = 2, +}; + +static int set_ipip_encap(const char *ipproto, const char *type) +{ + SYS(fail, "ip -n at_ns0 fou add port 5555 %s", ipproto); + SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap %s", + IPIP_TUNL_DEV0, type); + SYS(fail, "ip -n at_ns0 link set dev %s type ipip encap-dport 5555", + IPIP_TUNL_DEV0); + + return 0; +fail: + return -1; +} + +static int add_ipip_tunnel(enum ipip_encap encap) +{ + int err; + const char *ipproto, *type; + + switch (encap) { + case FOU: + ipproto = "ipproto 4"; + type = "fou"; + break; + case GUE: + ipproto = "gue"; + type = ipproto; + break; + default: + ipproto = NULL; + type = ipproto; + } + + /* at_ns0 namespace */ + SYS(fail, "ip -n at_ns0 link add dev %s type ipip local %s remote %s", + IPIP_TUNL_DEV0, IP4_ADDR_VETH0, IP4_ADDR1_VETH1); + + if (type && ipproto) { + err = set_ipip_encap(ipproto, type); + if (!ASSERT_OK(err, "set_ipip_encap")) + goto fail; + } + + SYS(fail, "ip -n at_ns0 link set dev %s up", IPIP_TUNL_DEV0); + SYS(fail, "ip -n at_ns0 addr add dev %s %s/24", + IPIP_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + + /* root namespace */ + if (type && ipproto) + SYS(fail, "ip fou add port 5555 %s", ipproto); + SYS(fail, "ip link add dev %s type ipip external", IPIP_TUNL_DEV1); + SYS(fail, "ip link set dev %s up", IPIP_TUNL_DEV1); + SYS(fail, "ip addr add dev %s %s/24", IPIP_TUNL_DEV1, + IP4_ADDR_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_ipip_tunnel(void) +{ + SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0); + SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null"); + SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1); + SYS_NOFAIL("ip fou del port 5555 2> /dev/null"); +} + static int test_ping(int family, const char *addr) { SYS(fail, "%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); @@ -386,10 +462,80 @@ done: test_tunnel_kern__destroy(skel); } -#define RUN_TEST(name) \ +static void test_ipip_tunnel(enum ipip_encap encap) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int set_src_prog_fd, get_src_prog_fd; + int ifindex = -1; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add ipip tunnel */ + err = add_ipip_tunnel(encap); + if (!ASSERT_OK(err, "add_ipip_tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(IPIP_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "ipip11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + + switch (encap) { + case FOU: + get_src_prog_fd = bpf_program__fd( + skel->progs.ipip_encap_get_tunnel); + set_src_prog_fd = bpf_program__fd( + skel->progs.ipip_fou_set_tunnel); + break; + case GUE: + get_src_prog_fd = bpf_program__fd( + skel->progs.ipip_encap_get_tunnel); + set_src_prog_fd = bpf_program__fd( + skel->progs.ipip_gue_set_tunnel); + break; + default: + get_src_prog_fd = bpf_program__fd( + skel->progs.ipip_get_tunnel); + set_src_prog_fd = bpf_program__fd( + skel->progs.ipip_set_tunnel); + } + + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* ping from root namespace test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + + /* ping from at_ns0 namespace test */ + nstoken = open_netns("at_ns0"); + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV1); + if (!ASSERT_OK(err, "test_ping")) + goto done; + close_netns(nstoken); + +done: + /* delete ipip tunnel */ + delete_ipip_tunnel(); + if (skel) + test_tunnel_kern__destroy(skel); +} + +#define RUN_TEST(name, ...) \ ({ \ if (test__start_subtest(#name)) { \ - test_ ## name(); \ + test_ ## name(__VA_ARGS__); \ } \ }) @@ -400,6 +546,9 @@ static void *test_tunnel_run_tests(void *arg) RUN_TEST(vxlan_tunnel); RUN_TEST(ip6vxlan_tunnel); + RUN_TEST(ipip_tunnel, NONE); + RUN_TEST(ipip_tunnel, FOU); + RUN_TEST(ipip_tunnel, GUE); cleanup(); diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c index 9ab2d55ab7c0..f66af753bbbb 100644 --- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c @@ -52,6 +52,21 @@ struct vxlan_metadata { __u32 gbp; }; +struct bpf_fou_encap { + __be16 sport; + __be16 dport; +}; + +enum bpf_fou_encap_type { + FOU_BPF_ENCAP_FOU, + FOU_BPF_ENCAP_GUE, +}; + +int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx, + struct bpf_fou_encap *encap, int type) __ksym; +int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx, + struct bpf_fou_encap *encap) __ksym; + struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); @@ -749,6 +764,108 @@ int ipip_get_tunnel(struct __sk_buff *skb) return TC_ACT_OK; } +SEC("tc") +int ipip_gue_set_tunnel(struct __sk_buff *skb) +{ + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; + void *data_end = (void *)(long)skb->data_end; + int ret; + + if (data + sizeof(*iph) > data_end) { + log_err(1); + return TC_ACT_SHOT; + } + + key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + encap.sport = 0; + encap.dport = bpf_htons(5555); + + ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_GUE); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("tc") +int ipip_fou_set_tunnel(struct __sk_buff *skb) +{ + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + void *data = (void *)(long)skb->data; + struct iphdr *iph = data; + void *data_end = (void *)(long)skb->data_end; + int ret; + + if (data + sizeof(*iph) > data_end) { + log_err(1); + return TC_ACT_SHOT; + } + + key.tunnel_ttl = 64; + if (iph->protocol == IPPROTO_ICMP) + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */ + + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + encap.sport = 0; + encap.dport = bpf_htons(5555); + + ret = bpf_skb_set_fou_encap(skb, &encap, FOU_BPF_ENCAP_FOU); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +SEC("tc") +int ipip_encap_get_tunnel(struct __sk_buff *skb) +{ + int ret; + struct bpf_tunnel_key key = {}; + struct bpf_fou_encap encap = {}; + + ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + ret = bpf_skb_get_fou_encap(skb, &encap); + if (ret < 0) { + log_err(ret); + return TC_ACT_SHOT; + } + + if (bpf_ntohs(encap.dport) != 5555) + return TC_ACT_SHOT; + + bpf_printk("%d remote ip 0x%x, sport %d, dport %d\n", ret, + key.remote_ipv4, bpf_ntohs(encap.sport), + bpf_ntohs(encap.dport)); + return TC_ACT_OK; +} + SEC("tc") int ipip6_set_tunnel(struct __sk_buff *skb) { -- cgit v1.2.3-70-g09d2 From 5a674611d116a5fc28c5429beea2b78c6e2933ef Mon Sep 17 00:00:00 2001 From: Lorenz Bauer Date: Thu, 13 Apr 2023 10:47:40 +0100 Subject: selftests/bpf: Fix use of uninitialized op_name in log tests One of the test assertions uses an uninitialized op_name, which leads to some headscratching if it fails. Use a string constant instead. Fixes: b1a7a480a112 ("selftests/bpf: Add fixed vs rotating verifier log tests") Signed-off-by: Lorenz Bauer Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230413094740.18041-1-lmb@isovalent.com --- tools/testing/selftests/bpf/prog_tests/verifier_log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier_log.c b/tools/testing/selftests/bpf/prog_tests/verifier_log.c index 475092a78deb..8337c6bc5b95 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier_log.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier_log.c @@ -110,7 +110,7 @@ static void verif_log_subtest(const char *name, bool expect_load_error, int log_ } if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25")) goto cleanup; - if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name)) + if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, "log_fixed_contents_25")) goto cleanup; } -- cgit v1.2.3-70-g09d2 From de6d014a09bf12a9a8959d60c0a1d4a41d394a89 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 12 Apr 2023 14:04:21 -0700 Subject: selftests/bpf: Use read_perf_max_sample_freq() in perf_event_stackmap Currently, perf_event sample period in perf_event_stackmap is set too low that the test fails randomly. Fix this by using the max sample frequency, from read_perf_max_sample_freq(). Move read_perf_max_sample_freq() to testing_helpers.c. Replace the CHECK() with if-printf, as CHECK is not available in testing_helpers.c. Fixes: 1da4864c2b20 ("selftests/bpf: Add callchain_stackid") Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230412210423.900851-2-song@kernel.org --- .../selftests/bpf/prog_tests/perf_event_stackmap.c | 3 ++- .../bpf/prog_tests/stacktrace_build_id_nmi.c | 15 --------------- tools/testing/selftests/bpf/testing_helpers.c | 20 ++++++++++++++++++++ tools/testing/selftests/bpf/testing_helpers.h | 2 ++ 4 files changed, 24 insertions(+), 16 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c index 33144c9432ae..f4aad35afae1 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c @@ -63,7 +63,8 @@ void test_perf_event_stackmap(void) PERF_SAMPLE_BRANCH_NO_FLAGS | PERF_SAMPLE_BRANCH_NO_CYCLES | PERF_SAMPLE_BRANCH_CALL_STACK, - .sample_period = 5000, + .freq = 1, + .sample_freq = read_perf_max_sample_freq(), .size = sizeof(struct perf_event_attr), }; struct perf_event_stackmap *skel; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index 47558b0d7f66..5db9eec24b5b 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -2,21 +2,6 @@ #include #include "test_stacktrace_build_id.skel.h" -static __u64 read_perf_max_sample_freq(void) -{ - __u64 sample_freq = 5000; /* fallback to 5000 on error */ - FILE *f; - __u32 duration = 0; - - f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r"); - if (f == NULL) - return sample_freq; - CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate", - "return default value: 5000,err %d\n", -errno); - fclose(f); - return sample_freq; -} - void test_stacktrace_build_id_nmi(void) { int control_map_fd, stackid_hmap_fd, stackmap_fd; diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index ecfea13f938b..0b5e0829e5be 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -229,3 +229,23 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, return bpf_prog_load(type, NULL, license, insns, insns_cnt, &opts); } + +__u64 read_perf_max_sample_freq(void) +{ + __u64 sample_freq = 5000; /* fallback to 5000 on error */ + FILE *f; + + f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r"); + if (f == NULL) { + printf("Failed to open /proc/sys/kernel/perf_event_max_sample_rate: err %d\n" + "return default value: 5000\n", -errno); + return sample_freq; + } + if (fscanf(f, "%llu", &sample_freq) != 1) { + printf("Failed to parse /proc/sys/kernel/perf_event_max_sample_rate: err %d\n" + "return default value: 5000\n", -errno); + } + + fclose(f); + return sample_freq; +} diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index 6ec00bf79cb5..eb8790f928e4 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -20,3 +20,5 @@ struct test_filter_set; int parse_test_list(const char *s, struct test_filter_set *test_set, bool is_glob_pattern); + +__u64 read_perf_max_sample_freq(void); -- cgit v1.2.3-70-g09d2 From c1e07a80cf23d3a6e96172bc9a73bfa912a9fcbc Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 12 Apr 2023 14:04:22 -0700 Subject: selftests/bpf: Fix leaked bpf_link in get_stackid_cannot_attach skel->links.oncpu is leaked in one case. This causes test perf_branches fails when it runs after get_stackid_cannot_attach: ./test_progs -t get_stackid_cannot_attach,perf_branches 84 get_stackid_cannot_attach:OK test_perf_branches_common:PASS:test_perf_branches_load 0 nsec test_perf_branches_common:PASS:attach_perf_event 0 nsec test_perf_branches_common:PASS:set_affinity 0 nsec check_good_sample:FAIL:output not valid no valid sample from prog 146/1 perf_branches/perf_branches_hw:FAIL 146/2 perf_branches/perf_branches_no_hw:OK 146 perf_branches:FAIL All error logs: test_perf_branches_common:PASS:test_perf_branches_load 0 nsec test_perf_branches_common:PASS:attach_perf_event 0 nsec test_perf_branches_common:PASS:set_affinity 0 nsec check_good_sample:FAIL:output not valid no valid sample from prog 146/1 perf_branches/perf_branches_hw:FAIL 146 perf_branches:FAIL Summary: 1/1 PASSED, 0 SKIPPED, 1 FAILED Fix this by adding the missing bpf_link__destroy(). Fixes: 346938e9380c ("selftests/bpf: Add get_stackid_cannot_attach") Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230412210423.900851-3-song@kernel.org --- tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c index 5308de1ed478..2715c68301f5 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -65,6 +65,7 @@ void test_get_stackid_cannot_attach(void) skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, pmu_fd); ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event_callchain"); + bpf_link__destroy(skel->links.oncpu); close(pmu_fd); /* add exclude_callchain_kernel, attach should fail */ -- cgit v1.2.3-70-g09d2 From 2995f9a8d427b9ff6f3cf4e85c0f9d4456ef324d Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 12 Apr 2023 14:04:23 -0700 Subject: selftests/bpf: Keep the loop in bpf_testmod_loop_test Some compilers (for example clang-15) optimize bpf_testmod_loop_test and remove the loop: gcc version (gdb) disassemble bpf_testmod_loop_test Dump of assembler code for function bpf_testmod_loop_test: 0x0000000000000570 <+0>: callq 0x575 0x0000000000000575 <+5>: xor %eax,%eax 0x0000000000000577 <+7>: test %edi,%edi 0x0000000000000579 <+9>: jle 0x587 0x000000000000057b <+11>: xor %edx,%edx 0x000000000000057d <+13>: add %edx,%eax 0x000000000000057f <+15>: add $0x1,%edx 0x0000000000000582 <+18>: cmp %edx,%edi 0x0000000000000584 <+20>: jne 0x57d 0x0000000000000586 <+22>: retq 0x0000000000000587 <+23>: retq clang-15 version (gdb) disassemble bpf_testmod_loop_test Dump of assembler code for function bpf_testmod_loop_test: 0x0000000000000450 <+0>: nopl 0x0(%rax,%rax,1) 0x0000000000000455 <+5>: test %edi,%edi 0x0000000000000457 <+7>: jle 0x46b 0x0000000000000459 <+9>: lea -0x1(%rdi),%eax 0x000000000000045c <+12>: lea -0x2(%rdi),%ecx 0x000000000000045f <+15>: imul %rax,%rcx 0x0000000000000463 <+19>: shr %rcx 0x0000000000000466 <+22>: lea -0x1(%rdi,%rcx,1),%eax 0x000000000000046a <+26>: retq 0x000000000000046b <+27>: xor %eax,%eax 0x000000000000046d <+29>: retq Note: The jne instruction is removed in clang-15 version. Force the compile to keep the loop by making sum volatile. Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230412210423.900851-4-song@kernel.org --- tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 7999476b9446..c5ad39bbe9af 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -130,7 +130,11 @@ bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) { noinline int bpf_testmod_loop_test(int n) { - int i, sum = 0; + /* Make sum volatile, so smart compilers, such as clang, will not + * optimize the code by removing the loop. + */ + volatile int sum = 0; + int i; /* the primary goal of this test is to test LBR. Create a lot of * branches in the function, so we can catch it easily. -- cgit v1.2.3-70-g09d2 From ee5059a64dbad4806a3c11babd0dbed5a5d04ead Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 12 Apr 2023 10:06:55 -0700 Subject: selftests/bpf: Remove stand-along test_verifier_log test binary test_prog's prog_tests/verifier_log.c is superseding test_verifier_log stand-alone test. It cover same checks and adds more, and is also integrated into test_progs test runner. Just remove test_verifier_log.c. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230412170655.1866831-1-andrii@kernel.org --- tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/test_verifier_log.c | 175 ------------------------ 2 files changed, 1 insertion(+), 176 deletions(-) delete mode 100644 tools/testing/selftests/bpf/test_verifier_log.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index b5ffdd89b86f..c49e5403ad0e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -36,7 +36,7 @@ endif # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_verifier_log test_dev_cgroup \ + test_dev_cgroup \ test_sock test_sockmap get_cgroup_id_user \ test_cgroup_storage \ test_tcpnotify_user test_sysctl \ diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c deleted file mode 100644 index 70feda97cee5..000000000000 --- a/tools/testing/selftests/bpf/test_verifier_log.c +++ /dev/null @@ -1,175 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define LOG_SIZE (1 << 20) - -#define err(str...) printf("ERROR: " str) - -static const struct bpf_insn code_sample[] = { - /* We need a few instructions to pass the min log length */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), -}; - -static inline __u64 ptr_to_u64(const void *ptr) -{ - return (__u64) (unsigned long) ptr; -} - -static int load(char *log, size_t log_len, int log_level) -{ - union bpf_attr attr; - - bzero(&attr, sizeof(attr)); - attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; - attr.insn_cnt = (__u32)(sizeof(code_sample) / sizeof(struct bpf_insn)); - attr.insns = ptr_to_u64(code_sample); - attr.license = ptr_to_u64("GPL"); - attr.log_buf = ptr_to_u64(log); - attr.log_size = log_len; - attr.log_level = log_level; - - return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); -} - -static void check_ret(int ret, int exp_errno) -{ - if (ret > 0) { - close(ret); - err("broken sample loaded successfully!?\n"); - exit(1); - } - - if (!ret || errno != exp_errno) { - err("Program load returned: ret:%d/errno:%d, expected ret:%d/errno:%d\n", - ret, errno, -1, exp_errno); - exit(1); - } -} - -static void check_ones(const char *buf, size_t len, const char *msg) -{ - while (len--) - if (buf[len] != 1) { - err("%s", msg); - exit(1); - } -} - -static void test_log_good(char *log, size_t buf_len, size_t log_len, - size_t exp_len, int exp_errno, const char *full_log) -{ - size_t len; - int ret; - - memset(log, 1, buf_len); - - ret = load(log, log_len, 1); - check_ret(ret, exp_errno); - - len = strnlen(log, buf_len); - if (len == buf_len) { - err("verifier did not NULL terminate the log\n"); - exit(1); - } - if (exp_len && len != exp_len) { - err("incorrect log length expected:%zd have:%zd\n", - exp_len, len); - exit(1); - } - - if (strchr(log, 1)) { - err("verifier leaked a byte through\n"); - exit(1); - } - - check_ones(log + len + 1, buf_len - len - 1, - "verifier wrote bytes past NULL termination\n"); - - if (memcmp(full_log, log, LOG_SIZE)) { - err("log did not match expected output\n"); - exit(1); - } -} - -static void test_log_bad(char *log, size_t log_len, int log_level) -{ - int ret; - - ret = load(log, log_len, log_level); - check_ret(ret, EINVAL); - if (log) - check_ones(log, LOG_SIZE, - "verifier touched log with bad parameters\n"); -} - -int main(int argc, char **argv) -{ - char full_log[LOG_SIZE]; - char log[LOG_SIZE]; - size_t want_len; - int i; - - memset(log, 1, LOG_SIZE); - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - /* Test incorrect attr */ - printf("Test log_level 0...\n"); - test_log_bad(log, LOG_SIZE, 0); - - printf("Test log_size < 128...\n"); - test_log_bad(log, 15, 1); - - printf("Test log_buff = NULL...\n"); - test_log_bad(NULL, LOG_SIZE, 1); - - /* Test with log big enough */ - printf("Test oversized buffer...\n"); - test_log_good(full_log, LOG_SIZE, LOG_SIZE, 0, EACCES, full_log); - - want_len = strlen(full_log); - - printf("Test exact buffer...\n"); - test_log_good(log, LOG_SIZE, want_len + 2, want_len, EACCES, full_log); - - printf("Test undersized buffers...\n"); - for (i = 0; i < 64; i++) { - full_log[want_len - i + 1] = 1; - full_log[want_len - i] = 0; - - test_log_good(log, LOG_SIZE, want_len + 1 - i, want_len - i, - ENOSPC, full_log); - } - - printf("test_verifier_log: OK\n"); - return 0; -} -- cgit v1.2.3-70-g09d2 From 4099be372faf7b3616634dfe6994b81b1edf1906 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 11 Apr 2023 20:46:47 -0700 Subject: selftests/bpf: Fix compiler warnings in bpf_testmod for kfuncs Add -Wmissing-prototypes ignore in bpf_testmod.c, similarly to what we do in kernel code proper. Reported-by: kernel test robot Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/oe-kbuild-all/202304080951.l14IDv3n-lkp@intel.com/ Link: https://lore.kernel.org/bpf/20230412034647.3968143-1-andrii@kernel.org --- tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index c5ad39bbe9af..fe847ebfb731 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -28,6 +28,10 @@ struct bpf_testmod_struct_arg_2 { long b; }; +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in bpf_testmod.ko BTF"); + noinline int bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) { bpf_testmod_test_struct_arg_result = a.a + a.b + b + c; @@ -175,6 +179,8 @@ noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) return a + b + c; } +__diag_pop(); + int bpf_testmod_fentry_ok; noinline ssize_t -- cgit v1.2.3-70-g09d2 From c04135ab351badb1daf5868328581075ead691e0 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 13 Apr 2023 17:22:48 -0700 Subject: selftests/bpf: Fix merge conflict due to SYS() macro change. Fix merge conflict between bpf/bpf-next trees due to change of arguments in SYS() macro. Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index c94eb63b7b77..498d3bdaa4b0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -175,8 +175,8 @@ void test_xdp_do_redirect(void) goto out; /* Enable GRO */ - SYS("ethtool -K veth_src gro on"); - SYS("ethtool -K veth_dst gro on"); + SYS(out, "ethtool -K veth_src gro on"); + SYS(out, "ethtool -K veth_dst gro on"); err = bpf_xdp_query(ifindex_src, XDP_FLAGS_DRV_MODE, &query_opts); if (!ASSERT_OK(err, "veth_src bpf_xdp_query gro on")) -- cgit v1.2.3-70-g09d2 From 75860b52019cf93662c31596254599bf5f8df900 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 13 Apr 2023 19:54:17 -0700 Subject: selftests/bpf: Workaround for older vm_sockets.h. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some distros ship with older vm_sockets.h that doesn't have VMADDR_CID_LOCAL which causes selftests build to fail: /tmp/work/bpf/bpf/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c:261:18: error: ‘VMADDR_CID_LOCAL’ undeclared (first use in this function); did you mean ‘VMADDR_CID_HOST’? 261 | addr->svm_cid = VMADDR_CID_LOCAL; | ^~~~~~~~~~~~~~~~ | VMADDR_CID_HOST Workaround this issue by defining it on demand. Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/sockmap_listen.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 8f09e1ea3ba7..141c1e5944ee 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -20,6 +20,11 @@ #include #include +/* workaround for older vm_sockets.h */ +#ifndef VMADDR_CID_LOCAL +#define VMADDR_CID_LOCAL 1 +#endif + #include #include -- cgit v1.2.3-70-g09d2 From 7c50b1cb76aca4540aa917db5f2a302acddcadff Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:06 -0700 Subject: bpf: Add bpf_refcount_acquire kfunc Currently, BPF programs can interact with the lifetime of refcounted local kptrs in the following ways: bpf_obj_new - Initialize refcount to 1 as part of new object creation bpf_obj_drop - Decrement refcount and free object if it's 0 collection add - Pass ownership to the collection. No change to refcount but collection is responsible for bpf_obj_dropping it In order to be able to add a refcounted local kptr to multiple collections we need to be able to increment the refcount and acquire a new owning reference. This patch adds a kfunc, bpf_refcount_acquire, implementing such an operation. bpf_refcount_acquire takes a refcounted local kptr and returns a new owning reference to the same underlying memory as the input. The input can be either owning or non-owning. To reinforce why this is safe, consider the following code snippets: struct node *n = bpf_obj_new(typeof(*n)); // A struct node *m = bpf_refcount_acquire(n); // B In the above snippet, n will be alive with refcount=1 after (A), and since nothing changes that state before (B), it's obviously safe. If n is instead added to some rbtree, we can still safely refcount_acquire it: struct node *n = bpf_obj_new(typeof(*n)); struct node *m; bpf_spin_lock(&glock); bpf_rbtree_add(&groot, &n->node, less); // A m = bpf_refcount_acquire(n); // B bpf_spin_unlock(&glock); In the above snippet, after (A) n is a non-owning reference, and after (B) m is an owning reference pointing to the same memory as n. Although n has no ownership of that memory's lifetime, it's guaranteed to be alive until the end of the critical section, and n would be clobbered if we were past the end of the critical section, so it's safe to bump refcount. Implementation details: * From verifier's perspective, bpf_refcount_acquire handling is similar to bpf_obj_new and bpf_obj_drop. Like the former, it returns a new owning reference matching input type, although like the latter, type can be inferred from concrete kptr input. Verifier changes in {check,fixup}_kfunc_call and check_kfunc_args are largely copied from aforementioned functions' verifier changes. * An exception to the above is the new KF_ARG_PTR_TO_REFCOUNTED_KPTR arg, indicated by new "__refcounted_kptr" kfunc arg suffix. This is necessary in order to handle both owning and non-owning input without adding special-casing to "__alloc" arg handling. Also a convenient place to confirm that input type has bpf_refcount field. * The implemented kfunc is actually bpf_refcount_acquire_impl, with 'hidden' second arg that the verifier sets to the type's struct_meta in fixup_kfunc_call. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-5-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 15 ++++++ kernel/bpf/verifier.c | 74 ++++++++++++++++++++++---- tools/testing/selftests/bpf/bpf_experimental.h | 13 +++++ 3 files changed, 91 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index e2dbd9644e5c..57ff8a60222c 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1917,6 +1917,20 @@ __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) __bpf_obj_drop_impl(p, meta ? meta->record : NULL); } +__bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) +{ + struct btf_struct_meta *meta = meta__ign; + struct bpf_refcount *ref; + + /* Could just cast directly to refcount_t *, but need some code using + * bpf_refcount type so that it is emitted in vmlinux BTF + */ + ref = (struct bpf_refcount *)p__refcounted_kptr + meta->record->refcount_off; + + refcount_inc((refcount_t *)ref); + return (void *)p__refcounted_kptr; +} + static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail) { struct list_head *n = (void *)node, *h = (void *)head; @@ -2276,6 +2290,7 @@ BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) #endif BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE) BTF_ID_FLAGS(func, bpf_list_push_front) BTF_ID_FLAGS(func, bpf_list_push_back) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4aa6d715e655..29e106f7ccaa 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -273,6 +273,11 @@ struct bpf_call_arg_meta { struct btf_field *kptr_field; }; +struct btf_and_id { + struct btf *btf; + u32 btf_id; +}; + struct bpf_kfunc_call_arg_meta { /* In parameters */ struct btf *btf; @@ -291,10 +296,10 @@ struct bpf_kfunc_call_arg_meta { u64 value; bool found; } arg_constant; - struct { - struct btf *btf; - u32 btf_id; - } arg_obj_drop; + union { + struct btf_and_id arg_obj_drop; + struct btf_and_id arg_refcount_acquire; + }; struct { struct btf_field *field; } arg_list_head; @@ -9403,6 +9408,11 @@ static bool is_kfunc_arg_uninit(const struct btf *btf, const struct btf_param *a return __kfunc_param_match_suffix(btf, arg, "__uninit"); } +static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf_param *arg) +{ + return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); +} + static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, const struct btf_param *arg, const char *name) @@ -9542,15 +9552,16 @@ static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = { enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_CTX, - KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ - KF_ARG_PTR_TO_KPTR, /* PTR_TO_KPTR but type specific */ + KF_ARG_PTR_TO_ALLOC_BTF_ID, /* Allocated object */ + KF_ARG_PTR_TO_REFCOUNTED_KPTR, /* Refcounted local kptr */ + KF_ARG_PTR_TO_KPTR, /* PTR_TO_KPTR but type specific */ KF_ARG_PTR_TO_DYNPTR, KF_ARG_PTR_TO_ITER, KF_ARG_PTR_TO_LIST_HEAD, KF_ARG_PTR_TO_LIST_NODE, - KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ + KF_ARG_PTR_TO_BTF_ID, /* Also covers reg2btf_ids conversions */ KF_ARG_PTR_TO_MEM, - KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ + KF_ARG_PTR_TO_MEM_SIZE, /* Size derived from next argument, skip it */ KF_ARG_PTR_TO_CALLBACK, KF_ARG_PTR_TO_RB_ROOT, KF_ARG_PTR_TO_RB_NODE, @@ -9559,6 +9570,7 @@ enum kfunc_ptr_arg_type { enum special_kfunc_type { KF_bpf_obj_new_impl, KF_bpf_obj_drop_impl, + KF_bpf_refcount_acquire_impl, KF_bpf_list_push_front, KF_bpf_list_push_back, KF_bpf_list_pop_front, @@ -9579,6 +9591,7 @@ enum special_kfunc_type { BTF_SET_START(special_kfunc_set) BTF_ID(func, bpf_obj_new_impl) BTF_ID(func, bpf_obj_drop_impl) +BTF_ID(func, bpf_refcount_acquire_impl) BTF_ID(func, bpf_list_push_front) BTF_ID(func, bpf_list_push_back) BTF_ID(func, bpf_list_pop_front) @@ -9597,6 +9610,7 @@ BTF_SET_END(special_kfunc_set) BTF_ID_LIST(special_kfunc_list) BTF_ID(func, bpf_obj_new_impl) BTF_ID(func, bpf_obj_drop_impl) +BTF_ID(func, bpf_refcount_acquire_impl) BTF_ID(func, bpf_list_push_front) BTF_ID(func, bpf_list_push_back) BTF_ID(func, bpf_list_pop_front) @@ -9649,6 +9663,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) return KF_ARG_PTR_TO_ALLOC_BTF_ID; + if (is_kfunc_arg_refcounted_kptr(meta->btf, &args[argno])) + return KF_ARG_PTR_TO_REFCOUNTED_KPTR; + if (is_kfunc_arg_kptr_get(meta, argno)) { if (!btf_type_is_ptr(ref_t)) { verbose(env, "arg#0 BTF type must be a double pointer for kptr_get kfunc\n"); @@ -9952,7 +9969,8 @@ static bool is_bpf_rbtree_api_kfunc(u32 btf_id) static bool is_bpf_graph_api_kfunc(u32 btf_id) { - return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id); + return is_bpf_list_api_kfunc(btf_id) || is_bpf_rbtree_api_kfunc(btf_id) || + btf_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]; } static bool is_callback_calling_kfunc(u32 btf_id) @@ -10171,6 +10189,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ const char *func_name = meta->func_name, *ref_tname; const struct btf *btf = meta->btf; const struct btf_param *args; + struct btf_record *rec; u32 i, nargs; int ret; @@ -10306,6 +10325,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_MEM: case KF_ARG_PTR_TO_MEM_SIZE: case KF_ARG_PTR_TO_CALLBACK: + case KF_ARG_PTR_TO_REFCOUNTED_KPTR: /* Trusted by default */ break; default: @@ -10523,6 +10543,26 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_CALLBACK: meta->subprogno = reg->subprogno; break; + case KF_ARG_PTR_TO_REFCOUNTED_KPTR: + if (!type_is_ptr_alloc_obj(reg->type) && !type_is_non_owning_ref(reg->type)) { + verbose(env, "arg#%d is neither owning or non-owning ref\n", i); + return -EINVAL; + } + + rec = reg_btf_record(reg); + if (!rec) { + verbose(env, "verifier internal error: Couldn't find btf_record\n"); + return -EFAULT; + } + + if (rec->refcount_off < 0) { + verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); + return -EINVAL; + } + + meta->arg_refcount_acquire.btf = reg->btf; + meta->arg_refcount_acquire.btf_id = reg->btf_id; + break; } } @@ -10699,7 +10739,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (is_kfunc_acquire(&meta) && !btf_type_is_struct_ptr(meta.btf, t)) { /* Only exception is bpf_obj_new_impl */ - if (meta.btf != btf_vmlinux || meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl]) { + if (meta.btf != btf_vmlinux || + (meta.func_id != special_kfunc_list[KF_bpf_obj_new_impl] && + meta.func_id != special_kfunc_list[KF_bpf_refcount_acquire_impl])) { verbose(env, "acquire kernel function does not return PTR_TO_BTF_ID\n"); return -EINVAL; } @@ -10747,6 +10789,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, insn_aux->obj_new_size = ret_t->size; insn_aux->kptr_struct_meta = btf_find_struct_meta(ret_btf, ret_btf_id); + } else if (meta.func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID | MEM_ALLOC; + regs[BPF_REG_0].btf = meta.arg_refcount_acquire.btf; + regs[BPF_REG_0].btf_id = meta.arg_refcount_acquire.btf_id; + + insn_aux->kptr_struct_meta = + btf_find_struct_meta(meta.arg_refcount_acquire.btf, + meta.arg_refcount_acquire.btf_id); } else if (meta.func_id == special_kfunc_list[KF_bpf_list_pop_front] || meta.func_id == special_kfunc_list[KF_bpf_list_pop_back]) { struct btf_field *field = meta.arg_list_head.field; @@ -17393,7 +17444,8 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, insn_buf[2] = addr[1]; insn_buf[3] = *insn; *cnt = 4; - } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl]) { + } else if (desc->func_id == special_kfunc_list[KF_bpf_obj_drop_impl] || + desc->func_id == special_kfunc_list[KF_bpf_refcount_acquire_impl]) { struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; struct bpf_insn addr[2] = { BPF_LD_IMM64(BPF_REG_2, (long)kptr_struct_meta) }; diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index dbd2c729781a..619afcab2ab0 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -37,6 +37,19 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; /* Convenience macro to wrap over bpf_obj_drop_impl */ #define bpf_obj_drop(kptr) bpf_obj_drop_impl(kptr, NULL) +/* Description + * Increment the refcount on a refcounted local kptr, turning the + * non-owning reference input into an owning reference in the process. + * + * The 'meta' parameter is a hidden argument that is ignored. + * Returns + * An owning reference to the object pointed to by 'kptr' + */ +extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; + +/* Convenience macro to wrap over bpf_refcount_acquire_impl */ +#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL) + /* Description * Add a new entry to the beginning of the BPF linked list. * Returns -- cgit v1.2.3-70-g09d2 From d2dcc67df910dd85253a701b6a5b747f955d28f5 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:07 -0700 Subject: bpf: Migrate bpf_rbtree_add and bpf_list_push_{front,back} to possibly fail Consider this code snippet: struct node { long key; bpf_list_node l; bpf_rb_node r; bpf_refcount ref; } int some_bpf_prog(void *ctx) { struct node *n = bpf_obj_new(/*...*/), *m; bpf_spin_lock(&glock); bpf_rbtree_add(&some_tree, &n->r, /* ... */); m = bpf_refcount_acquire(n); bpf_rbtree_add(&other_tree, &m->r, /* ... */); bpf_spin_unlock(&glock); /* ... */ } After bpf_refcount_acquire, n and m point to the same underlying memory, and that node's bpf_rb_node field is being used by the some_tree insert, so overwriting it as a result of the second insert is an error. In order to properly support refcounted nodes, the rbtree and list insert functions must be allowed to fail. This patch adds such support. The kfuncs bpf_rbtree_add, bpf_list_push_{front,back} are modified to return an int indicating success/failure, with 0 -> success, nonzero -> failure. bpf_obj_drop on failure ======================= Currently the only reason an insert can fail is the example above: the bpf_{list,rb}_node is already in use. When such a failure occurs, the insert kfuncs will bpf_obj_drop the input node. This allows the insert operations to logically fail without changing their verifier owning ref behavior, namely the unconditional release_reference of the input owning ref. With insert that always succeeds, ownership of the node is always passed to the collection, since the node always ends up in the collection. With a possibly-failed insert w/ bpf_obj_drop, ownership of the node is always passed either to the collection (success), or to bpf_obj_drop (failure). Regardless, it's correct to continue unconditionally releasing the input owning ref, as something is always taking ownership from the calling program on insert. Keeping owning ref behavior unchanged results in a nice default UX for insert functions that can fail. If the program's reaction to a failed insert is "fine, just get rid of this owning ref for me and let me go on with my business", then there's no reason to check for failure since that's default behavior. e.g.: long important_failures = 0; int some_bpf_prog(void *ctx) { struct node *n, *m, *o; /* all bpf_obj_new'd */ bpf_spin_lock(&glock); bpf_rbtree_add(&some_tree, &n->node, /* ... */); bpf_rbtree_add(&some_tree, &m->node, /* ... */); if (bpf_rbtree_add(&some_tree, &o->node, /* ... */)) { important_failures++; } bpf_spin_unlock(&glock); } If we instead chose to pass ownership back to the program on failed insert - by returning NULL on success or an owning ref on failure - programs would always have to do something with the returned ref on failure. The most likely action is probably "I'll just get rid of this owning ref and go about my business", which ideally would look like: if (n = bpf_rbtree_add(&some_tree, &n->node, /* ... */)) bpf_obj_drop(n); But bpf_obj_drop isn't allowed in a critical section and inserts must occur within one, so in reality error handling would become a hard-to-parse mess. For refcounted nodes, we can replicate the "pass ownership back to program on failure" logic with this patch's semantics, albeit in an ugly way: struct node *n = bpf_obj_new(/* ... */), *m; bpf_spin_lock(&glock); m = bpf_refcount_acquire(n); if (bpf_rbtree_add(&some_tree, &n->node, /* ... */)) { /* Do something with m */ } bpf_spin_unlock(&glock); bpf_obj_drop(m); bpf_refcount_acquire is used to simulate "return owning ref on failure". This should be an uncommon occurrence, though. Addition of two verifier-fixup'd args to collection inserts =========================================================== The actual bpf_obj_drop kfunc is bpf_obj_drop_impl(void *, struct btf_struct_meta *), with bpf_obj_drop macro populating the second arg with 0 and the verifier later filling in the arg during insn fixup. Because bpf_rbtree_add and bpf_list_push_{front,back} now might do bpf_obj_drop, these kfuncs need a btf_struct_meta parameter that can be passed to bpf_obj_drop_impl. Similarly, because the 'node' param to those insert functions is the bpf_{list,rb}_node within the node type, and bpf_obj_drop expects a pointer to the beginning of the node, the insert functions need to be able to find the beginning of the node struct. A second verifier-populated param is necessary: the offset of {list,rb}_node within the node type. These two new params allow the insert kfuncs to correctly call __bpf_obj_drop_impl: beginning_of_node = bpf_rb_node_ptr - offset if (already_inserted) __bpf_obj_drop_impl(beginning_of_node, btf_struct_meta->record); Similarly to other kfuncs with "hidden" verifier-populated params, the insert functions are renamed with _impl prefix and a macro is provided for common usage. For example, bpf_rbtree_add kfunc is now bpf_rbtree_add_impl and bpf_rbtree_add is now a macro which sets "hidden" args to 0. Due to the two new args BPF progs will need to be recompiled to work with the new _impl kfuncs. This patch also rewrites the "hidden argument" explanation to more directly say why the BPF program writer doesn't need to populate the arguments with anything meaningful. How does this new logic affect non-owning references? ===================================================== Currently, non-owning refs are valid until the end of the critical section in which they're created. We can make this guarantee because, if a non-owning ref exists, the referent was added to some collection. The collection will drop() its nodes when it goes away, but it can't go away while our program is accessing it, so that's not a problem. If the referent is removed from the collection in the same CS that it was added in, it can't be bpf_obj_drop'd until after CS end. Those are the only two ways to free the referent's memory and neither can happen until after the non-owning ref's lifetime ends. On first glance, having these collection insert functions potentially bpf_obj_drop their input seems like it breaks the "can't be bpf_obj_drop'd until after CS end" line of reasoning. But we care about the memory not being _freed_ until end of CS end, and a previous patch in the series modified bpf_obj_drop such that it doesn't free refcounted nodes until refcount == 0. So the statement can be more accurately rewritten as "can't be free'd until after CS end". We can prove that this rewritten statement holds for any non-owning reference produced by collection insert functions: * If the input to the insert function is _not_ refcounted * We have an owning reference to the input, and can conclude it isn't in any collection * Inserting a node in a collection turns owning refs into non-owning, and since our input type isn't refcounted, there's no way to obtain additional owning refs to the same underlying memory * Because our node isn't in any collection, the insert operation cannot fail, so bpf_obj_drop will not execute * If bpf_obj_drop is guaranteed not to execute, there's no risk of memory being free'd * Otherwise, the input to the insert function is refcounted * If the insert operation fails due to the node's list_head or rb_root already being in some collection, there was some previous successful insert which passed refcount to the collection * We have an owning reference to the input, it must have been acquired via bpf_refcount_acquire, which bumped the refcount * refcount must be >= 2 since there's a valid owning reference and the node is already in a collection * Insert triggering bpf_obj_drop will decr refcount to >= 1, never resulting in a free So although we may do bpf_obj_drop during the critical section, this will never result in memory being free'd, and no changes to non-owning ref logic are needed in this patch. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-6-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 7 ++- kernel/bpf/helpers.c | 65 +++++++++++++++------ kernel/bpf/verifier.c | 78 ++++++++++++++++++-------- tools/testing/selftests/bpf/bpf_experimental.h | 49 ++++++++++++---- 4 files changed, 148 insertions(+), 51 deletions(-) (limited to 'tools/testing') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f03852b89d28..3dd29a53b711 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -464,7 +464,12 @@ struct bpf_insn_aux_data { */ struct bpf_loop_inline_state loop_inline_state; }; - u64 obj_new_size; /* remember the size of type passed to bpf_obj_new to rewrite R1 */ + union { + /* remember the size of type passed to bpf_obj_new to rewrite R1 */ + u64 obj_new_size; + /* remember the offset of node field within type to rewrite */ + u64 insert_off; + }; struct btf_struct_meta *kptr_struct_meta; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 57ff8a60222c..5067f8d46872 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1931,7 +1931,8 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta return (void *)p__refcounted_kptr; } -static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail) +static int __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, + bool tail, struct btf_record *rec, u64 off) { struct list_head *n = (void *)node, *h = (void *)head; @@ -1939,17 +1940,35 @@ static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *hea INIT_LIST_HEAD(h); if (unlikely(!n->next)) INIT_LIST_HEAD(n); + if (!list_empty(n)) { + /* Only called from BPF prog, no need to migrate_disable */ + __bpf_obj_drop_impl(n - off, rec); + return -EINVAL; + } + tail ? list_add_tail(n, h) : list_add(n, h); + + return 0; } -__bpf_kfunc void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta__ign, u64 off) { - return __bpf_list_add(node, head, false); + struct btf_struct_meta *meta = meta__ign; + + return __bpf_list_add(node, head, false, + meta ? meta->record : NULL, off); } -__bpf_kfunc void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) +__bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta__ign, u64 off) { - return __bpf_list_add(node, head, true); + struct btf_struct_meta *meta = meta__ign; + + return __bpf_list_add(node, head, true, + meta ? meta->record : NULL, off); } static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) @@ -1989,14 +2008,23 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF * program */ -static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, - void *less) +static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, + void *less, struct btf_record *rec, u64 off) { struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; + struct rb_node *parent = NULL, *n = (struct rb_node *)node; bpf_callback_t cb = (bpf_callback_t)less; - struct rb_node *parent = NULL; bool leftmost = true; + if (!n->__rb_parent_color) + RB_CLEAR_NODE(n); + + if (!RB_EMPTY_NODE(n)) { + /* Only called from BPF prog, no need to migrate_disable */ + __bpf_obj_drop_impl(n - off, rec); + return -EINVAL; + } + while (*link) { parent = *link; if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { @@ -2007,15 +2035,18 @@ static void __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, } } - rb_link_node((struct rb_node *)node, parent, link); - rb_insert_color_cached((struct rb_node *)node, - (struct rb_root_cached *)root, leftmost); + rb_link_node(n, parent, link); + rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); + return 0; } -__bpf_kfunc void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) +__bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), + void *meta__ign, u64 off) { - __bpf_rbtree_add(root, node, (void *)less); + struct btf_struct_meta *meta = meta__ign; + + return __bpf_rbtree_add(root, node, (void *)less, meta ? meta->record : NULL, off); } __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) @@ -2291,14 +2322,14 @@ BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE) -BTF_ID_FLAGS(func, bpf_list_push_front) -BTF_ID_FLAGS(func, bpf_list_push_back) +BTF_ID_FLAGS(func, bpf_list_push_front_impl) +BTF_ID_FLAGS(func, bpf_list_push_back_impl) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE) -BTF_ID_FLAGS(func, bpf_rbtree_add) +BTF_ID_FLAGS(func, bpf_rbtree_add_impl) BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) #ifdef CONFIG_CGROUPS diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 29e106f7ccaa..736cb7cec0bd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8500,10 +8500,10 @@ static int set_rbtree_add_callback_state(struct bpf_verifier_env *env, struct bpf_func_state *callee, int insn_idx) { - /* void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, + /* void bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, * bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)); * - * 'struct bpf_rb_node *node' arg to bpf_rbtree_add is the same PTR_TO_BTF_ID w/ offset + * 'struct bpf_rb_node *node' arg to bpf_rbtree_add_impl is the same PTR_TO_BTF_ID w/ offset * that 'less' callback args will be receiving. However, 'node' arg was release_reference'd * by this point, so look at 'root' */ @@ -9571,8 +9571,8 @@ enum special_kfunc_type { KF_bpf_obj_new_impl, KF_bpf_obj_drop_impl, KF_bpf_refcount_acquire_impl, - KF_bpf_list_push_front, - KF_bpf_list_push_back, + KF_bpf_list_push_front_impl, + KF_bpf_list_push_back_impl, KF_bpf_list_pop_front, KF_bpf_list_pop_back, KF_bpf_cast_to_kern_ctx, @@ -9580,7 +9580,7 @@ enum special_kfunc_type { KF_bpf_rcu_read_lock, KF_bpf_rcu_read_unlock, KF_bpf_rbtree_remove, - KF_bpf_rbtree_add, + KF_bpf_rbtree_add_impl, KF_bpf_rbtree_first, KF_bpf_dynptr_from_skb, KF_bpf_dynptr_from_xdp, @@ -9592,14 +9592,14 @@ BTF_SET_START(special_kfunc_set) BTF_ID(func, bpf_obj_new_impl) BTF_ID(func, bpf_obj_drop_impl) BTF_ID(func, bpf_refcount_acquire_impl) -BTF_ID(func, bpf_list_push_front) -BTF_ID(func, bpf_list_push_back) +BTF_ID(func, bpf_list_push_front_impl) +BTF_ID(func, bpf_list_push_back_impl) BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_back) BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rbtree_remove) -BTF_ID(func, bpf_rbtree_add) +BTF_ID(func, bpf_rbtree_add_impl) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) @@ -9611,8 +9611,8 @@ BTF_ID_LIST(special_kfunc_list) BTF_ID(func, bpf_obj_new_impl) BTF_ID(func, bpf_obj_drop_impl) BTF_ID(func, bpf_refcount_acquire_impl) -BTF_ID(func, bpf_list_push_front) -BTF_ID(func, bpf_list_push_back) +BTF_ID(func, bpf_list_push_front_impl) +BTF_ID(func, bpf_list_push_back_impl) BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_back) BTF_ID(func, bpf_cast_to_kern_ctx) @@ -9620,7 +9620,7 @@ BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rcu_read_lock) BTF_ID(func, bpf_rcu_read_unlock) BTF_ID(func, bpf_rbtree_remove) -BTF_ID(func, bpf_rbtree_add) +BTF_ID(func, bpf_rbtree_add_impl) BTF_ID(func, bpf_rbtree_first) BTF_ID(func, bpf_dynptr_from_skb) BTF_ID(func, bpf_dynptr_from_xdp) @@ -9954,15 +9954,15 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_ static bool is_bpf_list_api_kfunc(u32 btf_id) { - return btf_id == special_kfunc_list[KF_bpf_list_push_front] || - btf_id == special_kfunc_list[KF_bpf_list_push_back] || + return btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || + btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || btf_id == special_kfunc_list[KF_bpf_list_pop_front] || btf_id == special_kfunc_list[KF_bpf_list_pop_back]; } static bool is_bpf_rbtree_api_kfunc(u32 btf_id) { - return btf_id == special_kfunc_list[KF_bpf_rbtree_add] || + return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl] || btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || btf_id == special_kfunc_list[KF_bpf_rbtree_first]; } @@ -9975,7 +9975,7 @@ static bool is_bpf_graph_api_kfunc(u32 btf_id) static bool is_callback_calling_kfunc(u32 btf_id) { - return btf_id == special_kfunc_list[KF_bpf_rbtree_add]; + return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; } static bool is_rbtree_lock_required_kfunc(u32 btf_id) @@ -10016,12 +10016,12 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, switch (node_field_type) { case BPF_LIST_NODE: - ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front] || - kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back]); + ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || + kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); break; case BPF_RB_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || - kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add]); + kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]); break; default: verbose(env, "verifier internal error: unexpected graph node argument type %s\n", @@ -10702,10 +10702,11 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front] || - meta.func_id == special_kfunc_list[KF_bpf_list_push_back] || - meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) { + if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || + meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || + meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; + insn_aux->insert_off = regs[BPF_REG_2].off; err = ref_convert_owning_non_owning(env, release_ref_obj_id); if (err) { verbose(env, "kfunc %s#%d conversion of owning ref to non-owning failed\n", @@ -10721,7 +10722,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } - if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add]) { + if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { err = __check_func_call(env, insn, insn_idx_p, meta.subprogno, set_rbtree_add_callback_state); if (err) { @@ -14764,7 +14765,7 @@ static bool regs_exact(const struct bpf_reg_state *rold, const struct bpf_reg_state *rcur, struct bpf_id_pair *idmap) { - return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && check_ids(rold->id, rcur->id, idmap) && check_ids(rold->ref_obj_id, rcur->ref_obj_id, idmap); } @@ -17407,6 +17408,23 @@ static void specialize_kfunc(struct bpf_verifier_env *env, } } +static void __fixup_collection_insert_kfunc(struct bpf_insn_aux_data *insn_aux, + u16 struct_meta_reg, + u16 node_offset_reg, + struct bpf_insn *insn, + struct bpf_insn *insn_buf, + int *cnt) +{ + struct btf_struct_meta *kptr_struct_meta = insn_aux->kptr_struct_meta; + struct bpf_insn addr[2] = { BPF_LD_IMM64(struct_meta_reg, (long)kptr_struct_meta) }; + + insn_buf[0] = addr[0]; + insn_buf[1] = addr[1]; + insn_buf[2] = BPF_MOV64_IMM(node_offset_reg, insn_aux->insert_off); + insn_buf[3] = *insn; + *cnt = 4; +} + static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_insn *insn_buf, int insn_idx, int *cnt) { @@ -17453,6 +17471,20 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, insn_buf[1] = addr[1]; insn_buf[2] = *insn; *cnt = 3; + } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || + desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || + desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + int struct_meta_reg = BPF_REG_3; + int node_offset_reg = BPF_REG_4; + + /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ + if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + struct_meta_reg = BPF_REG_4; + node_offset_reg = BPF_REG_5; + } + + __fixup_collection_insert_kfunc(&env->insn_aux_data[insn_idx], struct_meta_reg, + node_offset_reg, insn, insn_buf, cnt); } else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] || desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 619afcab2ab0..209811b1993a 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -14,7 +14,8 @@ * type ID of a struct in program BTF. * * The 'local_type_id' parameter must be a known constant. - * The 'meta' parameter is a hidden argument that is ignored. + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. * Returns * A pointer to an object of the type corresponding to the passed in * 'local_type_id', or NULL on failure. @@ -28,7 +29,8 @@ extern void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym; * Free an allocated object. All fields of the object that require * destruction will be destructed before the storage is freed. * - * The 'meta' parameter is a hidden argument that is ignored. + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. * Returns * Void. */ @@ -41,7 +43,8 @@ extern void bpf_obj_drop_impl(void *kptr, void *meta) __ksym; * Increment the refcount on a refcounted local kptr, turning the * non-owning reference input into an owning reference in the process. * - * The 'meta' parameter is a hidden argument that is ignored. + * The 'meta' parameter is rewritten by the verifier, no need for BPF + * program to set it. * Returns * An owning reference to the object pointed to by 'kptr' */ @@ -52,17 +55,35 @@ extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym; /* Description * Add a new entry to the beginning of the BPF linked list. + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Void. + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a list */ -extern void bpf_list_push_front(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +extern int bpf_list_push_front_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_push_front_impl */ +#define bpf_list_push_front(head, node) bpf_list_push_front_impl(head, node, NULL, 0) /* Description * Add a new entry to the end of the BPF linked list. + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Void. + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a list */ -extern void bpf_list_push_back(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +extern int bpf_list_push_back_impl(struct bpf_list_head *head, + struct bpf_list_node *node, + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_push_back_impl */ +#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0) /* Description * Remove the entry at the beginning of the BPF linked list. @@ -88,11 +109,19 @@ extern struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, /* Description * Add 'node' to rbtree with root 'root' using comparator 'less' + * + * The 'meta' and 'off' parameters are rewritten by the verifier, no need + * for BPF programs to set them * Returns - * Nothing + * 0 if the node was successfully added + * -EINVAL if the node wasn't added because it's already in a tree */ -extern void bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node *node, - bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b)) __ksym; +extern int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, + bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), + void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_rbtree_add_impl */ +#define bpf_rbtree_add(head, node, less) bpf_rbtree_add_impl(head, node, less, NULL, 0) /* Description * Return the first (leftmost) node in input tree -- cgit v1.2.3-70-g09d2 From de67ba3968fa1455e8020b21e5ccc2bb48b9a852 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:08 -0700 Subject: selftests/bpf: Modify linked_list tests to work with macro-ified inserts The linked_list tests use macros and function pointers to reduce code duplication. Earlier in the series, bpf_list_push_{front,back} were modified to be macros, expanding to invoke actual kfuncs bpf_list_push_{front,back}_impl. Due to this change, a code snippet like: void (*p)(void *, void *) = (void *)&bpf_list_##op; p(hexpr, nexpr); meant to do bpf_list_push_{front,back}(hexpr, nexpr), will no longer work as it's no longer valid to do &bpf_list_push_{front,back} since they're no longer functions. This patch fixes issues of this type, along with two other minor changes - one improvement and one fix - both related to the node argument to list_push_{front,back}. * The fix: migration of list_push tests away from (void *, void *) func ptr uncovered that some tests were incorrectly passing pointer to node, not pointer to struct bpf_list_node within the node. This patch fixes such issues (CHECK(..., f) -> CHECK(..., &f->node)) * The improvement: In linked_list tests, the struct foo type has two list_node fields: node and node2, at byte offsets 0 and 40 within the struct, respectively. Currently node is used in ~all tests involving struct foo and lists. The verifier needs to do some work to account for the offset of bpf_list_node within the node type, so using node2 instead of node exercises that logic more in the tests. This patch migrates linked_list tests to use node2 instead of node. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-7-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/linked_list.c | 6 +- tools/testing/selftests/bpf/progs/linked_list.c | 34 ++++---- tools/testing/selftests/bpf/progs/linked_list.h | 4 +- .../testing/selftests/bpf/progs/linked_list_fail.c | 96 ++++++++++++---------- 4 files changed, 73 insertions(+), 67 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 0ed8132ce1c3..872e4bd500fd 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -84,11 +84,11 @@ static struct { { "double_push_back", "arg#1 expected pointer to allocated object" }, { "no_node_value_type", "bpf_list_node not found at offset=0" }, { "incorrect_value_type", - "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, " + "operation on bpf_list_head expects arg#1 bpf_list_node at offset=40 in struct foo, " "but arg is at offset=0 in struct bar" }, { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, - { "incorrect_node_off1", "bpf_list_node not found at offset=1" }, - { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" }, + { "incorrect_node_off1", "bpf_list_node not found at offset=41" }, + { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=40 in struct foo" }, { "no_head_type", "bpf_list_head not found at offset=0" }, { "incorrect_head_var_off1", "R1 doesn't have constant offset" }, { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 53ded51a3abb..57440a554304 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -25,7 +25,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 3; } @@ -34,7 +34,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); bpf_obj_drop(f); return 4; } @@ -42,7 +42,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) return 0; @@ -51,7 +51,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_unlock(lock); if (!n) return 5; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 6; @@ -59,14 +59,14 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l bpf_spin_lock(lock); f->data = 13; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); bpf_spin_lock(lock); n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (!n) return 7; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 13) { bpf_obj_drop(f); return 8; @@ -77,7 +77,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 9; } @@ -85,7 +85,7 @@ int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool l n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 10; } return 0; @@ -119,8 +119,8 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea f[i + 1]->data = i + 1; bpf_spin_lock(lock); - bpf_list_push_front(head, &f[i]->node); - bpf_list_push_front(head, &f[i + 1]->node); + bpf_list_push_front(head, &f[i]->node2); + bpf_list_push_front(head, &f[i + 1]->node2); bpf_spin_unlock(lock); } @@ -130,13 +130,13 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 3; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != (ARRAY_SIZE(f) - i - 1)) { bpf_obj_drop(pf); return 4; } bpf_spin_lock(lock); - bpf_list_push_back(head, &pf->node); + bpf_list_push_back(head, &pf->node2); bpf_spin_unlock(lock); } @@ -149,7 +149,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea bpf_spin_unlock(lock); if (!n) return 5; - pf = container_of(n, struct foo, node); + pf = container_of(n, struct foo, node2); if (pf->data != i) { bpf_obj_drop(pf); return 6; @@ -160,7 +160,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_back(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 7; } @@ -168,7 +168,7 @@ int list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *hea n = bpf_list_pop_front(head); bpf_spin_unlock(lock); if (n) { - bpf_obj_drop(container_of(n, struct foo, node)); + bpf_obj_drop(container_of(n, struct foo, node2)); return 8; } return 0; @@ -199,7 +199,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_lock(lock); f->data = 42; - bpf_list_push_front(head, &f->node); + bpf_list_push_front(head, &f->node2); bpf_spin_unlock(lock); if (leave_in_map) @@ -210,7 +210,7 @@ int list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool le bpf_spin_unlock(lock); if (!n) return 4; - f = container_of(n, struct foo, node); + f = container_of(n, struct foo, node2); if (f->data != 42) { bpf_obj_drop(f); return 5; diff --git a/tools/testing/selftests/bpf/progs/linked_list.h b/tools/testing/selftests/bpf/progs/linked_list.h index 3fb2412552fc..c0f3609a7ffa 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.h +++ b/tools/testing/selftests/bpf/progs/linked_list.h @@ -22,7 +22,7 @@ struct foo { struct map_value { struct bpf_spin_lock lock; int data; - struct bpf_list_head head __contains(foo, node); + struct bpf_list_head head __contains(foo, node2); }; struct array_map { @@ -50,7 +50,7 @@ struct { #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; -private(A) struct bpf_list_head ghead __contains(foo, node); +private(A) struct bpf_list_head ghead __contains(foo, node2); private(B) struct bpf_spin_lock glock2; #endif diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index 41978b46f58e..f4c63daba229 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -73,22 +73,21 @@ CHECK(inner_map, pop_back, &iv->head); int test##_missing_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void *) = (void *)&bpf_list_##op; \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } -CHECK(kptr, push_front, &f->head, b); -CHECK(kptr, push_back, &f->head, b); +CHECK(kptr, push_front, &f->head, &b->node); +CHECK(kptr, push_back, &f->head, &b->node); -CHECK(global, push_front, &ghead, f); -CHECK(global, push_back, &ghead, f); +CHECK(global, push_front, &ghead, &f->node2); +CHECK(global, push_back, &ghead, &f->node2); -CHECK(map, push_front, &v->head, f); -CHECK(map, push_back, &v->head, f); +CHECK(map, push_front, &v->head, &f->node2); +CHECK(map, push_back, &v->head, &f->node2); -CHECK(inner_map, push_front, &iv->head, f); -CHECK(inner_map, push_back, &iv->head, f); +CHECK(inner_map, push_front, &iv->head, &f->node2); +CHECK(inner_map, push_back, &iv->head, &f->node2); #undef CHECK @@ -135,32 +134,31 @@ CHECK_OP(pop_back); int test##_incorrect_lock_##op(void *ctx) \ { \ INIT; \ - void (*p)(void *, void*) = (void *)&bpf_list_##op; \ bpf_spin_lock(lexpr); \ - p(hexpr, nexpr); \ + bpf_list_##op(hexpr, nexpr); \ return 0; \ } #define CHECK_OP(op) \ - CHECK(kptr_kptr, op, &f1->lock, &f2->head, b); \ - CHECK(kptr_global, op, &f1->lock, &ghead, f); \ - CHECK(kptr_map, op, &f1->lock, &v->head, f); \ - CHECK(kptr_inner_map, op, &f1->lock, &iv->head, f); \ + CHECK(kptr_kptr, op, &f1->lock, &f2->head, &b->node); \ + CHECK(kptr_global, op, &f1->lock, &ghead, &f->node2); \ + CHECK(kptr_map, op, &f1->lock, &v->head, &f->node2); \ + CHECK(kptr_inner_map, op, &f1->lock, &iv->head, &f->node2); \ \ - CHECK(global_global, op, &glock2, &ghead, f); \ - CHECK(global_kptr, op, &glock, &f1->head, b); \ - CHECK(global_map, op, &glock, &v->head, f); \ - CHECK(global_inner_map, op, &glock, &iv->head, f); \ + CHECK(global_global, op, &glock2, &ghead, &f->node2); \ + CHECK(global_kptr, op, &glock, &f1->head, &b->node); \ + CHECK(global_map, op, &glock, &v->head, &f->node2); \ + CHECK(global_inner_map, op, &glock, &iv->head, &f->node2); \ \ - CHECK(map_map, op, &v->lock, &v2->head, f); \ - CHECK(map_kptr, op, &v->lock, &f2->head, b); \ - CHECK(map_global, op, &v->lock, &ghead, f); \ - CHECK(map_inner_map, op, &v->lock, &iv->head, f); \ + CHECK(map_map, op, &v->lock, &v2->head, &f->node2); \ + CHECK(map_kptr, op, &v->lock, &f2->head, &b->node); \ + CHECK(map_global, op, &v->lock, &ghead, &f->node2); \ + CHECK(map_inner_map, op, &v->lock, &iv->head, &f->node2); \ \ - CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, f); \ - CHECK(inner_map_kptr, op, &iv->lock, &f2->head, b); \ - CHECK(inner_map_global, op, &iv->lock, &ghead, f); \ - CHECK(inner_map_map, op, &iv->lock, &v->head, f); + CHECK(inner_map_inner_map, op, &iv->lock, &iv2->head, &f->node2);\ + CHECK(inner_map_kptr, op, &iv->lock, &f2->head, &b->node); \ + CHECK(inner_map_global, op, &iv->lock, &ghead, &f->node2); \ + CHECK(inner_map_map, op, &iv->lock, &v->head, &f->node2); CHECK_OP(push_front); CHECK_OP(push_back); @@ -340,7 +338,7 @@ int direct_read_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - return *(int *)&f->node; + return *(int *)&f->node2; } SEC("?tc") @@ -351,12 +349,12 @@ int direct_write_node(void *ctx) f = bpf_obj_new(typeof(*f)); if (!f) return 0; - *(int *)&f->node = 0; + *(int *)&f->node2 = 0; return 0; } static __always_inline -int use_after_unlock(void (*op)(void *head, void *node)) +int use_after_unlock(bool push_front) { struct foo *f; @@ -365,7 +363,10 @@ int use_after_unlock(void (*op)(void *head, void *node)) return 0; bpf_spin_lock(&glock); f->data = 42; - op(&ghead, &f->node); + if (push_front) + bpf_list_push_front(&ghead, &f->node2); + else + bpf_list_push_back(&ghead, &f->node2); bpf_spin_unlock(&glock); return f->data; @@ -374,17 +375,17 @@ int use_after_unlock(void (*op)(void *head, void *node)) SEC("?tc") int use_after_unlock_push_front(void *ctx) { - return use_after_unlock((void *)bpf_list_push_front); + return use_after_unlock(true); } SEC("?tc") int use_after_unlock_push_back(void *ctx) { - return use_after_unlock((void *)bpf_list_push_back); + return use_after_unlock(false); } static __always_inline -int list_double_add(void (*op)(void *head, void *node)) +int list_double_add(bool push_front) { struct foo *f; @@ -392,8 +393,13 @@ int list_double_add(void (*op)(void *head, void *node)) if (!f) return 0; bpf_spin_lock(&glock); - op(&ghead, &f->node); - op(&ghead, &f->node); + if (push_front) { + bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node2); + } else { + bpf_list_push_back(&ghead, &f->node2); + bpf_list_push_back(&ghead, &f->node2); + } bpf_spin_unlock(&glock); return 0; @@ -402,13 +408,13 @@ int list_double_add(void (*op)(void *head, void *node)) SEC("?tc") int double_push_front(void *ctx) { - return list_double_add((void *)bpf_list_push_front); + return list_double_add(true); } SEC("?tc") int double_push_back(void *ctx) { - return list_double_add((void *)bpf_list_push_back); + return list_double_add(false); } SEC("?tc") @@ -450,7 +456,7 @@ int incorrect_node_var_off(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + ctx->protocol); + bpf_list_push_front(&ghead, (void *)&f->node2 + ctx->protocol); bpf_spin_unlock(&glock); return 0; @@ -465,7 +471,7 @@ int incorrect_node_off1(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, (void *)&f->node + 1); + bpf_list_push_front(&ghead, (void *)&f->node2 + 1); bpf_spin_unlock(&glock); return 0; @@ -480,7 +486,7 @@ int incorrect_node_off2(void *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front(&ghead, &f->node2); + bpf_list_push_front(&ghead, &f->node); bpf_spin_unlock(&glock); return 0; @@ -510,7 +516,7 @@ int incorrect_head_var_off1(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node); + bpf_list_push_front((void *)&ghead + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -525,7 +531,7 @@ int incorrect_head_var_off2(struct __sk_buff *ctx) if (!f) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node); + bpf_list_push_front((void *)&f->head + ctx->protocol, &f->node2); bpf_spin_unlock(&glock); return 0; @@ -563,7 +569,7 @@ int incorrect_head_off2(void *ctx) return 0; bpf_spin_lock(&glock); - bpf_list_push_front((void *)&ghead + 1, &f->node); + bpf_list_push_front((void *)&ghead + 1, &f->node2); bpf_spin_unlock(&glock); return 0; -- cgit v1.2.3-70-g09d2 From 404ad75a36fb1a1008e9fe803aa7d0212df9e240 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:09 -0700 Subject: bpf: Migrate bpf_rbtree_remove to possibly fail This patch modifies bpf_rbtree_remove to account for possible failure due to the input rb_node already not being in any collection. The function can now return NULL, and does when the aforementioned scenario occurs. As before, on successful removal an owning reference to the removed node is returned. Adding KF_RET_NULL to bpf_rbtree_remove's kfunc flags - now KF_RET_NULL | KF_ACQUIRE - provides the desired verifier semantics: * retval must be checked for NULL before use * if NULL, retval's ref_obj_id is released * retval is a "maybe acquired" owning ref, not a non-owning ref, so it will live past end of critical section (bpf_spin_unlock), and thus can be checked for NULL after the end of the CS BPF programs must add checks ============================ This does change bpf_rbtree_remove's verifier behavior. BPF program writers will need to add NULL checks to their programs, but the resulting UX looks natural: bpf_spin_lock(&glock); n = bpf_rbtree_first(&ghead); if (!n) { /* ... */} res = bpf_rbtree_remove(&ghead, &n->node); bpf_spin_unlock(&glock); if (!res) /* Newly-added check after this patch */ return 1; n = container_of(res, /* ... */); /* Do something else with n */ bpf_obj_drop(n); return 0; The "if (!res)" check above is the only addition necessary for the above program to pass verification after this patch. bpf_rbtree_remove no longer clobbers non-owning refs ==================================================== An issue arises when bpf_rbtree_remove fails, though. Consider this example: struct node_data { long key; struct bpf_list_node l; struct bpf_rb_node r; struct bpf_refcount ref; }; long failed_sum; void bpf_prog() { struct node_data *n = bpf_obj_new(/* ... */); struct bpf_rb_node *res; n->key = 10; bpf_spin_lock(&glock); bpf_list_push_back(&some_list, &n->l); /* n is now a non-owning ref */ res = bpf_rbtree_remove(&some_tree, &n->r, /* ... */); if (!res) failed_sum += n->key; /* not possible */ bpf_spin_unlock(&glock); /* if (res) { do something useful and drop } ... */ } The bpf_rbtree_remove in this example will always fail. Similarly to bpf_spin_unlock, bpf_rbtree_remove is a non-owning reference invalidation point. The verifier clobbers all non-owning refs after a bpf_rbtree_remove call, so the "failed_sum += n->key" line will fail verification, and in fact there's no good way to get information about the node which failed to add after the invalidation. This patch removes non-owning reference invalidation from bpf_rbtree_remove to allow the above usecase to pass verification. The logic for why this is now possible is as follows: Before this series, bpf_rbtree_add couldn't fail and thus assumed that its input, a non-owning reference, was in the tree. But it's easy to construct an example where two non-owning references pointing to the same underlying memory are acquired and passed to rbtree_remove one after another (see rbtree_api_release_aliasing in selftests/bpf/progs/rbtree_fail.c). So it was necessary to clobber non-owning refs to prevent this case and, more generally, to enforce "non-owning ref is definitely in some collection" invariant. This series removes that invariant and the failure / runtime checking added in this patch provide a clean way to deal with the aliasing issue - just fail to remove. Because the aliasing issue prevented by clobbering non-owning refs is no longer an issue, this patch removes the invalidate_non_owning_refs call from verifier handling of bpf_rbtree_remove. Note that bpf_spin_unlock - the other caller of invalidate_non_owning_refs - clobbers non-owning refs for a different reason, so its clobbering behavior remains unchanged. No BPF program changes are necessary for programs to remain valid as a result of this clobbering change. A valid program before this patch passed verification with its non-owning refs having shorter (or equal) lifetimes due to more aggressive clobbering. Also, update existing tests to check bpf_rbtree_remove retval for NULL where necessary, and move rbtree_api_release_aliasing from progs/rbtree_fail.c to progs/rbtree.c since it's now expected to pass verification. Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-8-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/btf.c | 21 +---- kernel/bpf/helpers.c | 8 +- kernel/bpf/verifier.c | 3 - .../testing/selftests/bpf/prog_tests/linked_list.c | 90 ++++++++++++++-------- tools/testing/selftests/bpf/prog_tests/rbtree.c | 25 ++++++ tools/testing/selftests/bpf/progs/rbtree.c | 74 +++++++++++++++++- tools/testing/selftests/bpf/progs/rbtree_fail.c | 77 +++++++----------- 7 files changed, 191 insertions(+), 107 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 14889fd5ba8e..027f9f8a3551 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3805,25 +3805,8 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type goto end; } - /* need collection identity for non-owning refs before allowing this - * - * Consider a node type w/ both list and rb_node fields: - * struct node { - * struct bpf_list_node l; - * struct bpf_rb_node r; - * } - * - * Used like so: - * struct node *n = bpf_obj_new(....); - * bpf_list_push_front(&list_head, &n->l); - * bpf_rbtree_remove(&rb_root, &n->r); - * - * It should not be possible to rbtree_remove the node since it hasn't - * been added to a tree. But push_front converts n to a non-owning - * reference, and rbtree_remove accepts the non-owning reference to - * a type w/ bpf_rb_node field. - */ - if (btf_record_has_field(rec, BPF_LIST_NODE) && + if (rec->refcount_off < 0 && + btf_record_has_field(rec, BPF_LIST_NODE) && btf_record_has_field(rec, BPF_RB_NODE)) { ret = -EINVAL; goto end; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 5067f8d46872..1835df333287 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2000,6 +2000,12 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct rb_root_cached *r = (struct rb_root_cached *)root; struct rb_node *n = (struct rb_node *)node; + if (!n->__rb_parent_color) + RB_CLEAR_NODE(n); + + if (RB_EMPTY_NODE(n)) + return NULL; + rb_erase_cached(n, r); RB_CLEAR_NODE(n); return (struct bpf_rb_node *)n; @@ -2328,7 +2334,7 @@ BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE) +BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_rbtree_add_impl) BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 736cb7cec0bd..6a41b69a424e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10922,9 +10922,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ref_set_non_owning(env, ®s[BPF_REG_0]); } - if (meta.func_id == special_kfunc_list[KF_bpf_rbtree_remove]) - invalidate_non_owning_refs(env); - if (reg_may_point_to_spin_lock(®s[BPF_REG_0]) && !regs[BPF_REG_0].id) regs[BPF_REG_0].id = ++env->id_gen; } else if (btf_type_is_void(t)) { diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 872e4bd500fd..f63309fd0e28 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -266,6 +266,59 @@ end: return NULL; } +static void list_and_rb_node_same_struct(bool refcount_field) +{ + int bpf_rb_node_btf_id, bpf_refcount_btf_id, foo_btf_id; + struct btf *btf; + int id, err; + + btf = init_btf(); + if (!ASSERT_OK_PTR(btf, "init_btf")) + return; + + bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 24); + if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node")) + return; + + if (refcount_field) { + bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4); + if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount")) + return; + } + + id = btf__add_struct(btf, "bar", refcount_field ? 44 : 40); + if (!ASSERT_GT(id, 0, "btf__add_struct bar")) + return; + err = btf__add_field(btf, "a", LIST_NODE, 0, 0); + if (!ASSERT_OK(err, "btf__add_field bar::a")) + return; + err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 128, 0); + if (!ASSERT_OK(err, "btf__add_field bar::c")) + return; + if (refcount_field) { + err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 320, 0); + if (!ASSERT_OK(err, "btf__add_field bar::ref")) + return; + } + + foo_btf_id = btf__add_struct(btf, "foo", 20); + if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo")) + return; + err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); + if (!ASSERT_OK(err, "btf__add_field foo::a")) + return; + err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); + if (!ASSERT_OK(err, "btf__add_field foo::b")) + return; + id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0); + if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a")) + return; + + err = btf__load_into_kernel(btf); + ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf"); + btf__free(btf); +} + static void test_btf(void) { struct btf *btf = NULL; @@ -717,39 +770,12 @@ static void test_btf(void) } while (test__start_subtest("btf: list_node and rb_node in same struct")) { - btf = init_btf(); - if (!ASSERT_OK_PTR(btf, "init_btf")) - break; - - id = btf__add_struct(btf, "bpf_rb_node", 24); - if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node")) - break; - id = btf__add_struct(btf, "bar", 40); - if (!ASSERT_EQ(id, 6, "btf__add_struct bar")) - break; - err = btf__add_field(btf, "a", LIST_NODE, 0, 0); - if (!ASSERT_OK(err, "btf__add_field bar::a")) - break; - err = btf__add_field(btf, "c", 5, 128, 0); - if (!ASSERT_OK(err, "btf__add_field bar::c")) - break; - - id = btf__add_struct(btf, "foo", 20); - if (!ASSERT_EQ(id, 7, "btf__add_struct foo")) - break; - err = btf__add_field(btf, "a", LIST_HEAD, 0, 0); - if (!ASSERT_OK(err, "btf__add_field foo::a")) - break; - err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0); - if (!ASSERT_OK(err, "btf__add_field foo::b")) - break; - id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0); - if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a")) - break; + list_and_rb_node_same_struct(true); + break; + } - err = btf__load_into_kernel(btf); - ASSERT_EQ(err, -EINVAL, "check btf"); - btf__free(btf); + while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) { + list_and_rb_node_same_struct(false); break; } } diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c index 156fa95c42f6..e9300c96607d 100644 --- a/tools/testing/selftests/bpf/prog_tests/rbtree.c +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -77,6 +77,29 @@ static void test_rbtree_first_and_remove(void) rbtree__destroy(skel); } +static void test_rbtree_api_release_aliasing(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts); + ASSERT_OK(ret, "rbtree_api_release_aliasing"); + ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval"); + ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()"); + ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()"); + + rbtree__destroy(skel); +} + void test_rbtree_success(void) { if (test__start_subtest("rbtree_add_nodes")) @@ -85,6 +108,8 @@ void test_rbtree_success(void) test_rbtree_add_and_remove(); if (test__start_subtest("rbtree_first_and_remove")) test_rbtree_first_and_remove(); + if (test__start_subtest("rbtree_api_release_aliasing")) + test_rbtree_api_release_aliasing(); } #define BTF_FAIL_TEST(suffix) \ diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index 4c90aa6abddd..b09f4fffe57c 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -93,9 +93,11 @@ long rbtree_add_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); + if (!res) + return 1; + n = container_of(res, struct node_data, node); removed_key = n->key; - bpf_obj_drop(n); return 0; @@ -148,9 +150,11 @@ long rbtree_first_and_remove(void *ctx) res = bpf_rbtree_remove(&groot, &o->node); bpf_spin_unlock(&glock); + if (!res) + return 5; + o = container_of(res, struct node_data, node); removed_key = o->key; - bpf_obj_drop(o); bpf_spin_lock(&glock); @@ -173,4 +177,70 @@ err_out: return 1; } +SEC("tc") +long rbtree_api_release_aliasing(void *ctx) +{ + struct node_data *n, *m, *o; + struct bpf_rb_node *res, *res2; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + n->key = 41; + n->data = 42; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + bpf_spin_lock(&glock); + + /* m and o point to the same node, + * but verifier doesn't know this + */ + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + o = container_of(res, struct node_data, node); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + m = container_of(res, struct node_data, node); + + res = bpf_rbtree_remove(&groot, &m->node); + /* Retval of previous remove returns an owning reference to m, + * which is the same node non-owning ref o is pointing at. + * We can safely try to remove o as the second rbtree_remove will + * return NULL since the node isn't in a tree. + * + * Previously we relied on the verifier type system + rbtree_remove + * invalidating non-owning refs to ensure that rbtree_remove couldn't + * fail, but now rbtree_remove does runtime checking so we no longer + * invalidate non-owning refs after remove. + */ + res2 = bpf_rbtree_remove(&groot, &o->node); + + bpf_spin_unlock(&glock); + + if (res) { + o = container_of(res, struct node_data, node); + first_data[0] = o->data; + bpf_obj_drop(o); + } + if (res2) { + /* The second remove fails, so res2 is null and this doesn't + * execute + */ + m = container_of(res2, struct node_data, node); + first_data[1] = m->data; + bpf_obj_drop(m); + } + return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index 46d7d18a218f..3fecf1c6dfe5 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=2 alloc_insn=10") +__failure __msg("Unreleased reference id=3 alloc_insn=10") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; @@ -118,11 +118,13 @@ long rbtree_api_remove_no_drop(void *ctx) res = bpf_rbtree_remove(&groot, res); - n = container_of(res, struct node_data, node); - __sink(n); + if (res) { + n = container_of(res, struct node_data, node); + __sink(n); + } bpf_spin_unlock(&glock); - /* bpf_obj_drop(n) is missing here */ + /* if (res) { bpf_obj_drop(n); } is missing here */ return 0; unlock_err: @@ -150,35 +152,36 @@ long rbtree_api_add_to_multiple_trees(void *ctx) } SEC("?tc") -__failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_add_release_unlock_escape(void *ctx) +__failure __msg("dereference of modified ptr_or_null_ ptr R2 off=16 disallowed") +long rbtree_api_use_unchecked_remove_retval(void *ctx) { - struct node_data *n; - - n = bpf_obj_new(typeof(*n)); - if (!n) - return 1; + struct bpf_rb_node *res; bpf_spin_lock(&glock); - bpf_rbtree_add(&groot, &n->node, less); + + res = bpf_rbtree_first(&groot); + if (!res) + goto err_out; + res = bpf_rbtree_remove(&groot, res); + bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - /* After add() in previous critical section, n should be - * release_on_unlock and released after previous spin_unlock, - * so should not be possible to use it here - */ - bpf_rbtree_remove(&groot, &n->node); + /* Must check res for NULL before using in rbtree_add below */ + bpf_rbtree_add(&groot, res, less); bpf_spin_unlock(&glock); return 0; + +err_out: + bpf_spin_unlock(&glock); + return 1; } SEC("?tc") __failure __msg("rbtree_remove node input must be non-owning ref") -long rbtree_api_release_aliasing(void *ctx) +long rbtree_api_add_release_unlock_escape(void *ctx) { - struct node_data *n, *m, *o; - struct bpf_rb_node *res; + struct node_data *n; n = bpf_obj_new(typeof(*n)); if (!n) @@ -189,37 +192,11 @@ long rbtree_api_release_aliasing(void *ctx) bpf_spin_unlock(&glock); bpf_spin_lock(&glock); - - /* m and o point to the same node, - * but verifier doesn't know this - */ - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - o = container_of(res, struct node_data, node); - - res = bpf_rbtree_first(&groot); - if (!res) - return 1; - m = container_of(res, struct node_data, node); - - bpf_rbtree_remove(&groot, &m->node); - /* This second remove shouldn't be possible. Retval of previous - * remove returns owning reference to m, which is the same - * node o's non-owning ref is pointing at - * - * In order to preserve property - * * owning ref must not be in rbtree - * * non-owning ref must be in rbtree - * - * o's ref must be invalidated after previous remove. Otherwise - * we'd have non-owning ref to node that isn't in rbtree, and - * verifier wouldn't be able to use type system to prevent remove - * of ref that already isn't in any tree. Would have to do runtime - * checks in that case. + /* After add() in previous critical section, n should be + * release_on_unlock and released after previous spin_unlock, + * so should not be possible to use it here */ - bpf_rbtree_remove(&groot, &o->node); - + bpf_rbtree_remove(&groot, &n->node); bpf_spin_unlock(&glock); return 0; } -- cgit v1.2.3-70-g09d2 From 6147f15131e2df544a5449815f456da48c0c88e7 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Sat, 15 Apr 2023 13:18:11 -0700 Subject: selftests/bpf: Add refcounted_kptr tests Test refcounted local kptr functionality added in previous patches in the series. Usecases which pass verification: * Add refcounted local kptr to both tree and list. Then, read and - possibly, depending on test variant - delete from tree, then list. * Also test doing read-and-maybe-delete in opposite order * Stash a refcounted local kptr in a map_value, then add it to a rbtree. Read from both, possibly deleting after tree read. * Add refcounted local kptr to both tree and list. Then, try reading and deleting twice from one of the collections. * bpf_refcount_acquire of just-added non-owning ref should work, as should bpf_refcount_acquire of owning ref just out of bpf_obj_new Usecases which fail verification: * The simple successful bpf_refcount_acquire cases from above should both fail to verify if the newly-acquired owning ref is not dropped Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230415201811.343116-10-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/refcounted_kptr.c | 18 + .../testing/selftests/bpf/progs/refcounted_kptr.c | 406 +++++++++++++++++++++ .../selftests/bpf/progs/refcounted_kptr_fail.c | 72 ++++ 3 files changed, 496 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c create mode 100644 tools/testing/selftests/bpf/progs/refcounted_kptr.c create mode 100644 tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c new file mode 100644 index 000000000000..2ab23832062d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "refcounted_kptr.skel.h" +#include "refcounted_kptr_fail.skel.h" + +void test_refcounted_kptr(void) +{ + RUN_TESTS(refcounted_kptr); +} + +void test_refcounted_kptr_fail(void) +{ + RUN_TESTS(refcounted_kptr_fail); +} diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c new file mode 100644 index 000000000000..1d348a225140 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +struct node_data { + long key; + long list_data; + struct bpf_rb_node r; + struct bpf_list_node l; + struct bpf_refcount ref; +}; + +struct map_value { + struct node_data __kptr *node; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} stashed_nodes SEC(".maps"); + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock lock; +private(A) struct bpf_rb_root root __contains(node_data, r); +private(A) struct bpf_list_head head __contains(node_data, l); + +private(B) struct bpf_spin_lock alock; +private(B) struct bpf_rb_root aroot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *node_a, const struct bpf_rb_node *node_b) +{ + struct node_data *a; + struct node_data *b; + + a = container_of(node_a, struct node_data, r); + b = container_of(node_b, struct node_data, r); + + return a->key < b->key; +} + +static bool less_a(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +static long __insert_in_tree_and_list(struct bpf_list_head *head, + struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct node_data *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -1; + + m = bpf_refcount_acquire(n); + m->key = 123; + m->list_data = 456; + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &n->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + bpf_obj_drop(m); + return -2; + } + bpf_spin_unlock(lock); + + bpf_spin_lock(lock); + if (bpf_list_push_front(head, &m->l)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -3; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __stash_map_insert_tree(int idx, int val, struct bpf_rb_root *root, + struct bpf_spin_lock *lock) +{ + struct map_value *mapval; + struct node_data *n, *m; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -2; + + n->key = val; + m = bpf_refcount_acquire(n); + + n = bpf_kptr_xchg(&mapval->node, n); + if (n) { + bpf_obj_drop(n); + bpf_obj_drop(m); + return -3; + } + + bpf_spin_lock(lock); + if (bpf_rbtree_add(root, &m->r, less)) { + /* Failure to insert - unexpected */ + bpf_spin_unlock(lock); + return -4; + } + bpf_spin_unlock(lock); + return 0; +} + +static long __read_from_tree(struct bpf_rb_root *root, + struct bpf_spin_lock *lock, + bool remove_from_tree) +{ + struct bpf_rb_node *rb; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + rb = bpf_rbtree_first(root); + if (!rb) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(rb, struct node_data, r); + res = n->key; + + if (!remove_from_tree) { + bpf_spin_unlock(lock); + return res; + } + + rb = bpf_rbtree_remove(root, rb); + bpf_spin_unlock(lock); + if (!rb) + return -2; + n = container_of(rb, struct node_data, r); + bpf_obj_drop(n); + return res; +} + +static long __read_from_list(struct bpf_list_head *head, + struct bpf_spin_lock *lock, + bool remove_from_list) +{ + struct bpf_list_node *l; + struct node_data *n; + long res = -99; + + bpf_spin_lock(lock); + + l = bpf_list_pop_front(head); + if (!l) { + bpf_spin_unlock(lock); + return -1; + } + + n = container_of(l, struct node_data, l); + res = n->list_data; + + if (!remove_from_list) { + if (bpf_list_push_back(head, &n->l)) { + bpf_spin_unlock(lock); + return -2; + } + } + + bpf_spin_unlock(lock); + + if (remove_from_list) + bpf_obj_drop(n); + return res; +} + +static long __read_from_unstash(int idx) +{ + struct node_data *n = NULL; + struct map_value *mapval; + long val = -99; + + mapval = bpf_map_lookup_elem(&stashed_nodes, &idx); + if (!mapval) + return -1; + + n = bpf_kptr_xchg(&mapval->node, n); + if (!n) + return -2; + + val = n->key; + bpf_obj_drop(n); + return val; +} + +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + return tree_data + list_data; \ +} + +/* After successful insert of struct node_data into both collections: + * - it should have refcount = 2 + * - removing / not removing the node_data from a collection after + * reading should have no effect on ability to read / remove from + * the other collection + */ +INSERT_READ_BOTH(true, true, "insert_read_both: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both: remove from list"); + +#undef INSERT_READ_BOTH +#define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(579) \ +long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \ +{ \ + long err, tree_data, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_list(&head, &lock, rem_list); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + return tree_data + list_data; \ +} + +/* Similar to insert_read_both, but list data is read and possibly removed + * first + * + * Results should be no different than reading and possibly removing rbtree + * node first + */ +INSERT_READ_BOTH(true, true, "insert_read_both_list_first: remove from tree + list"); +INSERT_READ_BOTH(false, false, "insert_read_both_list_first: remove from neither"); +INSERT_READ_BOTH(true, false, "insert_read_both_list_first: remove from tree"); +INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list"); + +#define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(-1) \ +long insert_double_##read_fn##_and_del_##read_root(void *ctx) \ +{ \ + long err, list_data; \ + \ + err = __insert_in_tree_and_list(&head, &root, &lock); \ + if (err) \ + return err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + else \ + list_data = err; \ + \ + err = read_fn(&read_root, &lock, true); \ + if (err < 0) \ + return err; \ + \ + return err + list_data; \ +} + +/* Insert into both tree and list, then try reading-and-removing from either twice + * + * The second read-and-remove should fail on read step since the node has + * already been removed + */ +INSERT_DOUBLE_READ_AND_DEL(__read_from_tree, root, "insert_double_del: 2x read-and-del from tree"); +INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-and-del from list"); + +#define INSERT_STASH_READ(rem_tree, desc) \ +SEC("tc") \ +__description(desc) \ +__success __retval(84) \ +long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ +{ \ + long err, tree_data, map_data; \ + \ + err = __stash_map_insert_tree(0, 42, &root, &lock); \ + if (err) \ + return err; \ + \ + err = __read_from_tree(&root, &lock, rem_tree); \ + if (err < 0) \ + return err; \ + else \ + tree_data = err; \ + \ + err = __read_from_unstash(0); \ + if (err < 0) \ + return err; \ + else \ + map_data = err; \ + \ + return tree_data + map_data; \ +} + +/* Stash a refcounted node in map_val, insert same node into tree, then try + * reading data from tree then unstashed map_val, possibly removing from tree + * + * Removing from tree should have no effect on map_val kptr validity + */ +INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); +INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&alock); + + m->key = 2; + bpf_obj_drop(m); + return 0; +} + +SEC("tc") +__success +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&alock); + bpf_rbtree_add(&aroot, &n->node, less_a); + bpf_spin_unlock(&alock); + + bpf_obj_drop(m); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c new file mode 100644 index 000000000000..efcb308f80ad --- /dev/null +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include "bpf_experimental.h" +#include "bpf_misc.h" + +struct node_acquire { + long key; + long data; + struct bpf_rb_node node; + struct bpf_refcount refcount; +}; + +#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) +private(A) struct bpf_spin_lock glock; +private(A) struct bpf_rb_root groot __contains(node_acquire, node); + +static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) +{ + struct node_acquire *node_a; + struct node_acquire *node_b; + + node_a = container_of(a, struct node_acquire, node); + node_b = container_of(b, struct node_acquire, node); + + return node_a->key < node_b->key; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=21") +long rbtree_refcounted_node_ref_escapes(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + bpf_spin_unlock(&glock); + + m->key = 2; + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference id=3 alloc_insn=9") +long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) +{ + struct node_acquire *n, *m; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return 1; + + /* m becomes an owning ref but is never drop'd or added to a tree */ + m = bpf_refcount_acquire(n); + m->key = 2; + + bpf_spin_lock(&glock); + bpf_rbtree_add(&groot, &n->node, less); + bpf_spin_unlock(&glock); + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 09b501d905217a38f03c0f07d5a66e0b5c8c1644 Mon Sep 17 00:00:00 2001 From: David Vernet Date: Sun, 16 Apr 2023 03:49:26 -0500 Subject: bpf: Remove bpf_kfunc_call_test_kptr_get() test kfunc We've managed to improve the UX for kptrs significantly over the last 9 months. All of the prior main use cases, struct bpf_cpumask *, struct task_struct *, and struct cgroup *, have all been updated to be synchronized mainly using RCU. In other words, their KF_ACQUIRE kfunc calls are all KF_RCU, and the pointers themselves are MEM_RCU and can be accessed in an RCU read region in BPF. In a follow-on change, we'll be removing the KF_KPTR_GET kfunc flag. This patch prepares for that by removing the bpf_kfunc_call_test_kptr_get() kfunc, and all associated selftests. Signed-off-by: David Vernet Link: https://lore.kernel.org/r/20230416084928.326135-2-void@manifault.com Signed-off-by: Alexei Starovoitov --- net/bpf/test_run.c | 12 ---- tools/testing/selftests/bpf/progs/map_kptr.c | 40 ++---------- tools/testing/selftests/bpf/progs/map_kptr_fail.c | 78 ----------------------- tools/testing/selftests/bpf/verifier/map_kptr.c | 27 -------- 4 files changed, 5 insertions(+), 152 deletions(-) (limited to 'tools/testing') diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 0b9bd9b39990..f170e8a17974 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -679,17 +679,6 @@ __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) { } -__bpf_kfunc struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) -{ - struct prog_test_ref_kfunc *p = READ_ONCE(*pp); - - if (!p) - return NULL; - refcount_inc(&p->cnt); - return p; -} - struct prog_test_pass1 { int x0; struct { @@ -804,7 +793,6 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) -BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index dae5dab1bbf7..d7150041e5d1 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -115,8 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_mallo DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; @@ -187,25 +185,10 @@ static void test_kptr_ref(struct map_value *v) bpf_kfunc_call_test_release(p); } -static void test_kptr_get(struct map_value *v) -{ - struct prog_test_ref_kfunc *p; - - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return; - if (p->a + p->b > 100) { - bpf_kfunc_call_test_release(p); - return; - } - bpf_kfunc_call_test_release(p); -} - static void test_kptr(struct map_value *v) { test_kptr_unref(v); test_kptr_ref(v); - test_kptr_get(v); } SEC("tc") @@ -338,38 +321,25 @@ int test_map_kptr_ref_pre(struct map_value *v) if (p_st->cnt.refs.counter != ref) return 4; - p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - if (!p) - return 5; - ref++; - if (p_st->cnt.refs.counter != ref) { - ret = 6; - goto end; - } - bpf_kfunc_call_test_release(p); - ref--; - if (p_st->cnt.refs.counter != ref) - return 7; - p = bpf_kptr_xchg(&v->ref_ptr, NULL); if (!p) - return 8; + return 5; bpf_kfunc_call_test_release(p); ref--; if (p_st->cnt.refs.counter != ref) - return 9; + return 6; p = bpf_kfunc_call_test_acquire(&arg); if (!p) - return 10; + return 7; ref++; p = bpf_kptr_xchg(&v->ref_ptr, p); if (p) { - ret = 11; + ret = 8; goto end; } if (p_st->cnt.refs.counter != ref) - return 12; + return 9; /* Leave in map */ return 0; diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index 15bf3127dba3..da8c724f839b 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -21,8 +21,6 @@ struct array_map { extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern struct prog_test_ref_kfunc * -bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym; SEC("?tc") __failure __msg("kptr access size must be BPF_DW") @@ -220,67 +218,6 @@ int reject_kptr_xchg_on_unref(struct __sk_buff *ctx) return 0; } -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get((void *)&ctx, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 expected pointer to map value") -int reject_kptr_get_no_null_map_val(struct __sk_buff *ctx) -{ - bpf_kfunc_call_test_kptr_get(bpf_map_lookup_elem(&array_map, &(int){0}), 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=0") -int reject_kptr_get_no_kptr(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)v, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("arg#0 no referenced kptr at map value offset=8") -int reject_kptr_get_on_unref(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->unref_ptr, 0, 0); - return 0; -} - -SEC("?tc") -__failure __msg("kernel function bpf_kfunc_call_test_kptr_get args#0") -int reject_kptr_get_bad_type_match(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get((void *)&v->ref_memb_ptr, 0, 0); - return 0; -} - SEC("?tc") __failure __msg("R1 type=rcu_ptr_or_null_ expected=percpu_ptr_") int mark_ref_as_untrusted_or_null(struct __sk_buff *ctx) @@ -428,21 +365,6 @@ int kptr_xchg_ref_state(struct __sk_buff *ctx) return 0; } -SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=") -int kptr_get_ref_state(struct __sk_buff *ctx) -{ - struct map_value *v; - int key = 0; - - v = bpf_map_lookup_elem(&array_map, &key); - if (!v) - return 0; - - bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0); - return 0; -} - SEC("?tc") __failure __msg("Possibly NULL pointer passed to helper arg2") int kptr_xchg_possibly_null(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c index d775ccb01989..a0cfc06d75bc 100644 --- a/tools/testing/selftests/bpf/verifier/map_kptr.c +++ b/tools/testing/selftests/bpf/verifier/map_kptr.c @@ -288,33 +288,6 @@ .result = REJECT, .errstr = "off=0 kptr isn't referenced kptr", }, -{ - "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_LD_MAP_FD(BPF_REG_6, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_kptr = { 1 }, - .result = REJECT, - .errstr = "arg#0 no referenced kptr at map value offset=0", - .fixup_kfunc_btf_id = { - { "bpf_kfunc_call_test_kptr_get", 13 }, - } -}, /* Tests for referenced PTR_TO_BTF_ID */ { "map_kptr: ref: loaded pointer marked as untrusted", -- cgit v1.2.3-70-g09d2 From 74cc26f416b9ed88af300393a6f06e0765ebde8b Mon Sep 17 00:00:00 2001 From: Aaron Conole Date: Fri, 14 Apr 2023 09:17:48 -0400 Subject: selftests: openvswitch: add interface support Includes an associated test to generate netns and connect interfaces, with the option to include packet tracing. This will be used in the future when flow support is added for additional test cases. Signed-off-by: Aaron Conole Signed-off-by: David S. Miller --- .../selftests/net/openvswitch/openvswitch.sh | 55 ++++++++++ .../testing/selftests/net/openvswitch/ovs-dpctl.py | 118 +++++++++++++++++++-- 2 files changed, 163 insertions(+), 10 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index 7ce46700a3ae..18383b0b7b9c 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -70,6 +70,49 @@ ovs_add_dp () { on_exit "ovs_sbx $sbxname python3 $ovs_base/ovs-dpctl.py del-dp $1;" } +ovs_add_if () { + info "Adding IF to DP: br:$2 if:$3" + ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" || return 1 +} + +ovs_del_if () { + info "Deleting IF from DP: br:$2 if:$3" + ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py del-if "$2" "$3" || return 1 +} + +ovs_netns_spawn_daemon() { + sbx=$1 + shift + netns=$1 + shift + info "spawning cmd: $*" + ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr & + pid=$! + ovs_sbx "$sbx" on_exit "kill -TERM $pid 2>/dev/null" +} + +ovs_add_netns_and_veths () { + info "Adding netns attached: sbx:$1 dp:$2 {$3, $4, $5}" + ovs_sbx "$1" ip netns add "$3" || return 1 + on_exit "ovs_sbx $1 ip netns del $3" + ovs_sbx "$1" ip link add "$4" type veth peer name "$5" || return 1 + on_exit "ovs_sbx $1 ip link del $4 >/dev/null 2>&1" + ovs_sbx "$1" ip link set "$4" up || return 1 + ovs_sbx "$1" ip link set "$5" netns "$3" || return 1 + ovs_sbx "$1" ip netns exec "$3" ip link set "$5" up || return 1 + + if [ "$6" != "" ]; then + ovs_sbx "$1" ip netns exec "$3" ip addr add "$6" dev "$5" \ + || return 1 + fi + + ovs_add_if "$1" "$2" "$4" || return 1 + [ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \ + tcpdump -i any -s 65535 + + return 0 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." @@ -101,6 +144,18 @@ test_netlink_checks () { return 1 fi + ovs_add_netns_and_veths "test_netlink_checks" nv0 left left0 l0 || \ + return 1 + ovs_add_netns_and_veths "test_netlink_checks" nv0 right right0 r0 || \ + return 1 + [ $(python3 $ovs_base/ovs-dpctl.py show nv0 | grep port | \ + wc -l) == 3 ] || \ + return 1 + ovs_del_if "test_netlink_checks" nv0 right0 || return 1 + [ $(python3 $ovs_base/ovs-dpctl.py show nv0 | grep port | \ + wc -l) == 2 ] || \ + return 1 + return 0 } diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py index 5d467d1993cb..626013dfd020 100644 --- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py @@ -50,7 +50,6 @@ class ovs_dp_msg(genlmsg): class OvsDatapath(GenericNetlinkSocket): - OVS_DP_F_VPORT_PIDS = 1 << 1 OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3 @@ -170,6 +169,12 @@ class OvsDatapath(GenericNetlinkSocket): class OvsVport(GenericNetlinkSocket): + OVS_VPORT_TYPE_NETDEV = 1 + OVS_VPORT_TYPE_INTERNAL = 2 + OVS_VPORT_TYPE_GRE = 3 + OVS_VPORT_TYPE_VXLAN = 4 + OVS_VPORT_TYPE_GENEVE = 5 + class ovs_vport_msg(ovs_dp_msg): nla_map = ( ("OVS_VPORT_ATTR_UNSPEC", "none"), @@ -197,17 +202,30 @@ class OvsVport(GenericNetlinkSocket): ) def type_to_str(vport_type): - if vport_type == 1: + if vport_type == OvsVport.OVS_VPORT_TYPE_NETDEV: return "netdev" - elif vport_type == 2: + elif vport_type == OvsVport.OVS_VPORT_TYPE_INTERNAL: return "internal" - elif vport_type == 3: + elif vport_type == OvsVport.OVS_VPORT_TYPE_GRE: return "gre" - elif vport_type == 4: + elif vport_type == OvsVport.OVS_VPORT_TYPE_VXLAN: return "vxlan" - elif vport_type == 5: + elif vport_type == OvsVport.OVS_VPORT_TYPE_GENEVE: return "geneve" - return "unknown:%d" % vport_type + raise ValueError("Unknown vport type:%d" % vport_type) + + def str_to_type(vport_type): + if vport_type == "netdev": + return OvsVport.OVS_VPORT_TYPE_NETDEV + elif vport_type == "internal": + return OvsVport.OVS_VPORT_TYPE_INTERNAL + elif vport_type == "gre": + return OvsVport.OVS_VPORT_TYPE_INTERNAL + elif vport_type == "vxlan": + return OvsVport.OVS_VPORT_TYPE_VXLAN + elif vport_type == "geneve": + return OvsVport.OVS_VPORT_TYPE_GENEVE + raise ValueError("Unknown vport type: '%s'" % vport_type) def __init__(self): GenericNetlinkSocket.__init__(self) @@ -238,8 +256,51 @@ class OvsVport(GenericNetlinkSocket): raise ne return reply + def attach(self, dpindex, vport_ifname, ptype): + msg = OvsVport.ovs_vport_msg() + + msg["cmd"] = OVS_VPORT_CMD_NEW + msg["version"] = OVS_DATAPATH_VERSION + msg["reserved"] = 0 + msg["dpifindex"] = dpindex + port_type = OvsVport.str_to_type(ptype) + + msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type]) + msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname]) + msg["attrs"].append(["OVS_VPORT_ATTR_UPCALL_PID", [self.pid]]) + + try: + reply = self.nlm_request( + msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK + ) + reply = reply[0] + except NetlinkError as ne: + raise ne + return reply + + def detach(self, dpindex, vport_ifname): + msg = OvsVport.ovs_vport_msg() + + msg["cmd"] = OVS_VPORT_CMD_DEL + msg["version"] = OVS_DATAPATH_VERSION + msg["reserved"] = 0 + msg["dpifindex"] = dpindex + msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname]) -def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()): + try: + reply = self.nlm_request( + msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK + ) + reply = reply[0] + except NetlinkError as ne: + if ne.code == errno.ENODEV: + reply = None + else: + raise ne + return reply + + +def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME") base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS") megaflow_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_MEGAFLOW_STATS") @@ -265,7 +326,6 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()): print(" features: 0x%X" % user_features) # port print out - vpl = OvsVport() for iface in ndb.interfaces: rep = vpl.info(iface.ifname, ifindex) if rep is not None: @@ -312,9 +372,25 @@ def main(argv): deldpcmd = subparsers.add_parser("del-dp") deldpcmd.add_argument("deldp", help="Datapath Name") + addifcmd = subparsers.add_parser("add-if") + addifcmd.add_argument("dpname", help="Datapath Name") + addifcmd.add_argument("addif", help="Interface name for adding") + addifcmd.add_argument( + "-t", + "--ptype", + type=str, + default="netdev", + choices=["netdev", "internal"], + help="Interface type (default netdev)", + ) + delifcmd = subparsers.add_parser("del-if") + delifcmd.add_argument("dpname", help="Datapath Name") + delifcmd.add_argument("delif", help="Interface name for adding") + args = parser.parse_args() ovsdp = OvsDatapath() + ovsvp = OvsVport() ndb = NDB() if hasattr(args, "showdp"): @@ -328,7 +404,7 @@ def main(argv): if rep is not None: found = True - print_ovsdp_full(rep, iface.index, ndb) + print_ovsdp_full(rep, iface.index, ndb, ovsvp) if not found: msg = "No DP found" @@ -343,6 +419,28 @@ def main(argv): print("DP '%s' added" % args.adddp) elif hasattr(args, "deldp"): ovsdp.destroy(args.deldp) + elif hasattr(args, "addif"): + rep = ovsdp.info(args.dpname, 0) + if rep is None: + print("DP '%s' not found." % args.dpname) + return 1 + rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype) + msg = "vport '%s'" % args.addif + if rep and rep["header"]["error"] is None: + msg += " added." + else: + msg += " failed to add." + elif hasattr(args, "delif"): + rep = ovsdp.info(args.dpname, 0) + if rep is None: + print("DP '%s' not found." % args.dpname) + return 1 + rep = ovsvp.detach(rep["dpifindex"], args.delif) + msg = "vport '%s'" % args.delif + if rep and rep["header"]["error"] is None: + msg += " removed." + else: + msg += " failed to remove." return 0 -- cgit v1.2.3-70-g09d2 From e52b07aa1a54fcb66461149c5185a815c1c60340 Mon Sep 17 00:00:00 2001 From: Aaron Conole Date: Fri, 14 Apr 2023 09:17:49 -0400 Subject: selftests: openvswitch: add flow dump support Add a basic set of fields to print in a 'dpflow' format. This will be used by future commits to check for flow fields after parsing, as well as verifying the flow fields pushed into the kernel from userspace. Signed-off-by: Aaron Conole Signed-off-by: David S. Miller --- .../testing/selftests/net/openvswitch/ovs-dpctl.py | 1026 ++++++++++++++++++++ 1 file changed, 1026 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py index 626013dfd020..21b1b8deda7d 100644 --- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py @@ -6,15 +6,21 @@ import argparse import errno +import ipaddress +import logging import sys +import time try: from pyroute2 import NDB + from pyroute2.netlink import NLA_F_NESTED from pyroute2.netlink import NLM_F_ACK + from pyroute2.netlink import NLM_F_DUMP from pyroute2.netlink import NLM_F_REQUEST from pyroute2.netlink import genlmsg from pyroute2.netlink import nla + from pyroute2.netlink import nlmsg_atoms from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.generic import GenericNetlinkSocket except ModuleNotFoundError: @@ -40,6 +46,36 @@ OVS_VPORT_CMD_DEL = 2 OVS_VPORT_CMD_GET = 3 OVS_VPORT_CMD_SET = 4 +OVS_FLOW_CMD_NEW = 1 +OVS_FLOW_CMD_DEL = 2 +OVS_FLOW_CMD_GET = 3 +OVS_FLOW_CMD_SET = 4 + + +def macstr(mac): + outstr = ":".join(["%02X" % i for i in mac]) + return outstr + + +def convert_mac(mac_str, mask=False): + if mac_str is None or mac_str == "": + mac_str = "00:00:00:00:00:00" + if mask is True and mac_str != "00:00:00:00:00:00": + mac_str = "FF:FF:FF:FF:FF:FF" + mac_split = mac_str.split(":") + ret = bytearray([int(i, 16) for i in mac_split]) + return bytes(ret) + + +def convert_ipv4(ip, mask=False): + if ip is None: + ip = 0 + if mask is True: + if ip != 0: + ip = int(ipaddress.IPv4Address(ip)) & 0xFFFFFFFF + + return int(ipaddress.IPv4Address(ip)) + class ovs_dp_msg(genlmsg): # include the OVS version @@ -49,6 +85,847 @@ class ovs_dp_msg(genlmsg): fields = genlmsg.fields + (("dpifindex", "I"),) +class ovsactions(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_ACTION_ATTR_UNSPEC", "none"), + ("OVS_ACTION_ATTR_OUTPUT", "uint32"), + ("OVS_ACTION_ATTR_USERSPACE", "userspace"), + ("OVS_ACTION_ATTR_SET", "none"), + ("OVS_ACTION_ATTR_PUSH_VLAN", "none"), + ("OVS_ACTION_ATTR_POP_VLAN", "flag"), + ("OVS_ACTION_ATTR_SAMPLE", "none"), + ("OVS_ACTION_ATTR_RECIRC", "uint32"), + ("OVS_ACTION_ATTR_HASH", "none"), + ("OVS_ACTION_ATTR_PUSH_MPLS", "none"), + ("OVS_ACTION_ATTR_POP_MPLS", "flag"), + ("OVS_ACTION_ATTR_SET_MASKED", "none"), + ("OVS_ACTION_ATTR_CT", "ctact"), + ("OVS_ACTION_ATTR_TRUNC", "uint32"), + ("OVS_ACTION_ATTR_PUSH_ETH", "none"), + ("OVS_ACTION_ATTR_POP_ETH", "flag"), + ("OVS_ACTION_ATTR_CT_CLEAR", "flag"), + ("OVS_ACTION_ATTR_PUSH_NSH", "none"), + ("OVS_ACTION_ATTR_POP_NSH", "flag"), + ("OVS_ACTION_ATTR_METER", "none"), + ("OVS_ACTION_ATTR_CLONE", "none"), + ("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"), + ("OVS_ACTION_ATTR_ADD_MPLS", "none"), + ("OVS_ACTION_ATTR_DEC_TTL", "none"), + ) + + class ctact(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_CT_ATTR_NONE", "none"), + ("OVS_CT_ATTR_COMMIT", "flag"), + ("OVS_CT_ATTR_ZONE", "uint16"), + ("OVS_CT_ATTR_MARK", "none"), + ("OVS_CT_ATTR_LABELS", "none"), + ("OVS_CT_ATTR_HELPER", "asciiz"), + ("OVS_CT_ATTR_NAT", "natattr"), + ("OVS_CT_ATTR_FORCE_COMMIT", "flag"), + ("OVS_CT_ATTR_EVENTMASK", "uint32"), + ("OVS_CT_ATTR_TIMEOUT", "asciiz"), + ) + + class natattr(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_NAT_ATTR_NONE", "none"), + ("OVS_NAT_ATTR_SRC", "flag"), + ("OVS_NAT_ATTR_DST", "flag"), + ("OVS_NAT_ATTR_IP_MIN", "ipaddr"), + ("OVS_NAT_ATTR_IP_MAX", "ipaddr"), + ("OVS_NAT_ATTR_PROTO_MIN", "uint16"), + ("OVS_NAT_ATTR_PROTO_MAX", "uint16"), + ("OVS_NAT_ATTR_PERSISTENT", "flag"), + ("OVS_NAT_ATTR_PROTO_HASH", "flag"), + ("OVS_NAT_ATTR_PROTO_RANDOM", "flag"), + ) + + def dpstr(self, more=False): + print_str = "nat(" + + if self.get_attr("OVS_NAT_ATTR_SRC"): + print_str += "src" + elif self.get_attr("OVS_NAT_ATTR_DST"): + print_str += "dst" + else: + print_str += "XXX-unknown-nat" + + if self.get_attr("OVS_NAT_ATTR_IP_MIN") or self.get_attr( + "OVS_NAT_ATTR_IP_MAX" + ): + if self.get_attr("OVS_NAT_ATTR_IP_MIN"): + print_str += "=%s," % str( + self.get_attr("OVS_NAT_ATTR_IP_MIN") + ) + + if self.get_attr("OVS_NAT_ATTR_IP_MAX"): + print_str += "-%s," % str( + self.get_attr("OVS_NAT_ATTR_IP_MAX") + ) + else: + print_str += "," + + if self.get_attr("OVS_NAT_ATTR_PROTO_MIN"): + print_str += "proto_min=%d," % self.get_attr( + "OVS_NAT_ATTR_PROTO_MIN" + ) + + if self.get_attr("OVS_NAT_ATTR_PROTO_MAX"): + print_str += "proto_max=%d," % self.get_attr( + "OVS_NAT_ATTR_PROTO_MAX" + ) + + if self.get_attr("OVS_NAT_ATTR_PERSISTENT"): + print_str += "persistent," + if self.get_attr("OVS_NAT_ATTR_HASH"): + print_str += "hash," + if self.get_attr("OVS_NAT_ATTR_RANDOM"): + print_str += "random" + print_str += ")" + return print_str + + def dpstr(self, more=False): + print_str = "ct(" + + if self.get_attr("OVS_CT_ATTR_COMMIT") is not None: + print_str += "commit," + if self.get_attr("OVS_CT_ATTR_ZONE") is not None: + print_str += "zone=%d," % self.get_attr("OVS_CT_ATTR_ZONE") + if self.get_attr("OVS_CT_ATTR_HELPER") is not None: + print_str += "helper=%s," % self.get_attr("OVS_CT_ATTR_HELPER") + if self.get_attr("OVS_CT_ATTR_NAT") is not None: + print_str += self.get_attr("OVS_CT_ATTR_NAT").dpstr(more) + print_str += "," + if self.get_attr("OVS_CT_ATTR_FORCE_COMMIT") is not None: + print_str += "force," + if self.get_attr("OVS_CT_ATTR_EVENTMASK") is not None: + print_str += "emask=0x%X," % self.get_attr( + "OVS_CT_ATTR_EVENTMASK" + ) + if self.get_attr("OVS_CT_ATTR_TIMEOUT") is not None: + print_str += "timeout=%s" % self.get_attr( + "OVS_CT_ATTR_TIMEOUT" + ) + print_str += ")" + return print_str + + class userspace(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_USERSPACE_ATTR_UNUSED", "none"), + ("OVS_USERSPACE_ATTR_PID", "uint32"), + ("OVS_USERSPACE_ATTR_USERDATA", "array(uint8)"), + ("OVS_USERSPACE_ATTR_EGRESS_TUN_PORT", "uint32"), + ) + + def dpstr(self, more=False): + print_str = "userspace(" + if self.get_attr("OVS_USERSPACE_ATTR_PID") is not None: + print_str += "pid=%d," % self.get_attr( + "OVS_USERSPACE_ATTR_PID" + ) + if self.get_attr("OVS_USERSPACE_ATTR_USERDATA") is not None: + print_str += "userdata=" + for f in self.get_attr("OVS_USERSPACE_ATTR_USERDATA"): + print_str += "%x." % f + if self.get_attr("OVS_USERSPACE_ATTR_TUN_PORT") is not None: + print_str += "egress_tun_port=%d" % self.get_attr( + "OVS_USERSPACE_ATTR_TUN_PORT" + ) + print_str += ")" + return print_str + + def dpstr(self, more=False): + print_str = "" + + for field in self.nla_map: + if field[1] == "none" or self.get_attr(field[0]) is None: + continue + if print_str != "": + print_str += "," + + if field[1] == "uint32": + if field[0] == "OVS_ACTION_ATTR_OUTPUT": + print_str += "%d" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_RECIRC": + print_str += "recirc(0x%x)" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_TRUNC": + print_str += "trunc(%d)" % int(self.get_attr(field[0])) + elif field[1] == "flag": + if field[0] == "OVS_ACTION_ATTR_CT_CLEAR": + print_str += "ct_clear" + elif field[0] == "OVS_ACTION_ATTR_POP_VLAN": + print_str += "pop_vlan" + elif field[0] == "OVS_ACTION_ATTR_POP_ETH": + print_str += "pop_eth" + elif field[0] == "OVS_ACTION_ATTR_POP_NSH": + print_str += "pop_nsh" + elif field[0] == "OVS_ACTION_ATTR_POP_MPLS": + print_str += "pop_mpls" + else: + datum = self.get_attr(field[0]) + print_str += datum.dpstr(more) + + return print_str + + +class ovskey(nla): + nla_flags = NLA_F_NESTED + nla_map = ( + ("OVS_KEY_ATTR_UNSPEC", "none"), + ("OVS_KEY_ATTR_ENCAP", "none"), + ("OVS_KEY_ATTR_PRIORITY", "uint32"), + ("OVS_KEY_ATTR_IN_PORT", "uint32"), + ("OVS_KEY_ATTR_ETHERNET", "ethaddr"), + ("OVS_KEY_ATTR_VLAN", "uint16"), + ("OVS_KEY_ATTR_ETHERTYPE", "be16"), + ("OVS_KEY_ATTR_IPV4", "ovs_key_ipv4"), + ("OVS_KEY_ATTR_IPV6", "ovs_key_ipv6"), + ("OVS_KEY_ATTR_TCP", "ovs_key_tcp"), + ("OVS_KEY_ATTR_UDP", "ovs_key_udp"), + ("OVS_KEY_ATTR_ICMP", "ovs_key_icmp"), + ("OVS_KEY_ATTR_ICMPV6", "ovs_key_icmpv6"), + ("OVS_KEY_ATTR_ARP", "ovs_key_arp"), + ("OVS_KEY_ATTR_ND", "ovs_key_nd"), + ("OVS_KEY_ATTR_SKB_MARK", "uint32"), + ("OVS_KEY_ATTR_TUNNEL", "none"), + ("OVS_KEY_ATTR_SCTP", "ovs_key_sctp"), + ("OVS_KEY_ATTR_TCP_FLAGS", "be16"), + ("OVS_KEY_ATTR_DP_HASH", "uint32"), + ("OVS_KEY_ATTR_RECIRC_ID", "uint32"), + ("OVS_KEY_ATTR_MPLS", "array(ovs_key_mpls)"), + ("OVS_KEY_ATTR_CT_STATE", "uint32"), + ("OVS_KEY_ATTR_CT_ZONE", "uint16"), + ("OVS_KEY_ATTR_CT_MARK", "uint32"), + ("OVS_KEY_ATTR_CT_LABELS", "none"), + ("OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4", "ovs_key_ct_tuple_ipv4"), + ("OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6", "ovs_key_ct_tuple_ipv6"), + ("OVS_KEY_ATTR_NSH", "none"), + ("OVS_KEY_ATTR_PACKET_TYPE", "none"), + ("OVS_KEY_ATTR_ND_EXTENSIONS", "none"), + ("OVS_KEY_ATTR_TUNNEL_INFO", "none"), + ("OVS_KEY_ATTR_IPV6_EXTENSIONS", "none"), + ) + + class ovs_key_proto(nla): + fields = ( + ("src", "!H"), + ("dst", "!H"), + ) + + fields_map = ( + ("src", "src", "%d", lambda x: int(x) if x is not None else 0), + ("dst", "dst", "%d", lambda x: int(x) if x is not None else 0), + ) + + def __init__( + self, + protostr, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + self.proto_str = protostr + nla.__init__( + self, + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + def dpstr(self, masked=None, more=False): + outstr = self.proto_str + "(" + first = False + for f in self.fields_map: + if first: + outstr += "," + if masked is None: + outstr += "%s=" % f[0] + if isinstance(f[2], str): + outstr += f[2] % self[f[1]] + else: + outstr += f[2](self[f[1]]) + first = True + elif more or f[3](masked[f[1]]) != 0: + outstr += "%s=" % f[0] + if isinstance(f[2], str): + outstr += f[2] % self[f[1]] + else: + outstr += f[2](self[f[1]]) + outstr += "/" + if isinstance(f[2], str): + outstr += f[2] % masked[f[1]] + else: + outstr += f[2](masked[f[1]]) + first = True + outstr += ")" + return outstr + + class ethaddr(ovs_key_proto): + fields = ( + ("src", "!6s"), + ("dst", "!6s"), + ) + + fields_map = ( + ( + "src", + "src", + macstr, + lambda x: int.from_bytes(x, "big"), + convert_mac, + ), + ( + "dst", + "dst", + macstr, + lambda x: int.from_bytes(x, "big"), + convert_mac, + ), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "eth", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_ipv4(ovs_key_proto): + fields = ( + ("src", "!I"), + ("dst", "!I"), + ("proto", "B"), + ("tos", "B"), + ("ttl", "B"), + ("frag", "B"), + ) + + fields_map = ( + ( + "src", + "src", + lambda x: str(ipaddress.IPv4Address(x)), + int, + convert_ipv4, + ), + ( + "dst", + "dst", + lambda x: str(ipaddress.IPv4Address(x)), + int, + convert_ipv4, + ), + ("proto", "proto", "%d", lambda x: int(x) if x is not None else 0), + ("tos", "tos", "%d", lambda x: int(x) if x is not None else 0), + ("ttl", "ttl", "%d", lambda x: int(x) if x is not None else 0), + ("frag", "frag", "%d", lambda x: int(x) if x is not None else 0), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "ipv4", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_ipv6(ovs_key_proto): + fields = ( + ("src", "!16s"), + ("dst", "!16s"), + ("label", "!I"), + ("proto", "B"), + ("tclass", "B"), + ("hlimit", "B"), + ("frag", "B"), + ) + + fields_map = ( + ( + "src", + "src", + lambda x: str(ipaddress.IPv6Address(x)), + lambda x: int.from_bytes(x, "big"), + lambda x: ipaddress.IPv6Address(x), + ), + ( + "dst", + "dst", + lambda x: str(ipaddress.IPv6Address(x)), + lambda x: int.from_bytes(x, "big"), + lambda x: ipaddress.IPv6Address(x), + ), + ("label", "label", "%d", int), + ("proto", "proto", "%d", int), + ("tclass", "tclass", "%d", int), + ("hlimit", "hlimit", "%d", int), + ("frag", "frag", "%d", int), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "ipv6", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_tcp(ovs_key_proto): + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "tcp", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_udp(ovs_key_proto): + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "udp", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_sctp(ovs_key_proto): + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "sctp", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_icmp(ovs_key_proto): + fields = ( + ("type", "B"), + ("code", "B"), + ) + + fields_map = ( + ("type", "type", "%d", int), + ("code", "code", "%d", int), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "icmp", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_icmpv6(ovs_key_icmp): + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "icmpv6", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_arp(ovs_key_proto): + fields = ( + ("sip", "!I"), + ("tip", "!I"), + ("op", "!H"), + ("sha", "!6s"), + ("tha", "!6s"), + ("pad", "xx"), + ) + + fields_map = ( + ( + "sip", + "sip", + lambda x: str(ipaddress.IPv4Address(x)), + int, + convert_ipv4, + ), + ( + "tip", + "tip", + lambda x: str(ipaddress.IPv4Address(x)), + int, + convert_ipv4, + ), + ("op", "op", "%d", lambda x: int(x) if x is not None else 0), + ( + "sha", + "sha", + macstr, + lambda x: int.from_bytes(x, "big"), + convert_mac, + ), + ( + "tha", + "tha", + macstr, + lambda x: int.from_bytes(x, "big"), + convert_mac, + ), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "arp", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_nd(ovs_key_proto): + fields = ( + ("target", "!16s"), + ("sll", "!6s"), + ("tll", "!6s"), + ) + + fields_map = ( + ( + "target", + "target", + lambda x: str(ipaddress.IPv6Address(x)), + lambda x: int.from_bytes(x, "big"), + ), + ("sll", "sll", macstr, lambda x: int.from_bytes(x, "big")), + ("tll", "tll", macstr, lambda x: int.from_bytes(x, "big")), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "nd", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_ct_tuple_ipv4(ovs_key_proto): + fields = ( + ("src", "!I"), + ("dst", "!I"), + ("tp_src", "!H"), + ("tp_dst", "!H"), + ("proto", "B"), + ) + + fields_map = ( + ( + "src", + "src", + lambda x: str(ipaddress.IPv4Address(x)), + int, + ), + ( + "dst", + "dst", + lambda x: str(ipaddress.IPv6Address(x)), + int, + ), + ("tp_src", "tp_src", "%d", int), + ("tp_dst", "tp_dst", "%d", int), + ("proto", "proto", "%d", int), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "ct_tuple4", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_ct_tuple_ipv6(nla): + fields = ( + ("src", "!16s"), + ("dst", "!16s"), + ("tp_src", "!H"), + ("tp_dst", "!H"), + ("proto", "B"), + ) + + fields_map = ( + ( + "src", + "src", + lambda x: str(ipaddress.IPv6Address(x)), + lambda x: int.from_bytes(x, "big", convertmac), + ), + ( + "dst", + "dst", + lambda x: str(ipaddress.IPv6Address(x)), + lambda x: int.from_bytes(x, "big"), + ), + ("tp_src", "tp_src", "%d", int), + ("tp_dst", "tp_dst", "%d", int), + ("proto", "proto", "%d", int), + ) + + def __init__( + self, + data=None, + offset=None, + parent=None, + length=None, + init=None, + ): + ovskey.ovs_key_proto.__init__( + self, + "ct_tuple6", + data=data, + offset=offset, + parent=parent, + length=length, + init=init, + ) + + class ovs_key_mpls(nla): + fields = (("lse", ">I"),) + + def dpstr(self, mask=None, more=False): + print_str = "" + + for field in ( + ( + "OVS_KEY_ATTR_PRIORITY", + "skb_priority", + "%d", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_SKB_MARK", + "skb_mark", + "%d", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_RECIRC_ID", + "recirc_id", + "0x%08X", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_DP_HASH", + "dp_hash", + "0x%08X", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_CT_STATE", + "ct_state", + "0x%04x", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_CT_ZONE", + "ct_zone", + "0x%04x", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_CT_MARK", + "ct_mark", + "0x%08x", + lambda x: False, + True, + ), + ( + "OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4", + None, + None, + False, + False, + ), + ( + "OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6", + None, + None, + False, + False, + ), + ( + "OVS_KEY_ATTR_IN_PORT", + "in_port", + "%d", + lambda x: True, + True, + ), + ("OVS_KEY_ATTR_ETHERNET", None, None, False, False), + ( + "OVS_KEY_ATTR_ETHERTYPE", + "eth_type", + "0x%04x", + lambda x: int(x) == 0xFFFF, + True, + ), + ("OVS_KEY_ATTR_IPV4", None, None, False, False), + ("OVS_KEY_ATTR_IPV6", None, None, False, False), + ("OVS_KEY_ATTR_ARP", None, None, False, False), + ("OVS_KEY_ATTR_TCP", None, None, False, False), + ( + "OVS_KEY_ATTR_TCP_FLAGS", + "tcp_flags", + "0x%04x", + lambda x: False, + True, + ), + ("OVS_KEY_ATTR_UDP", None, None, False, False), + ("OVS_KEY_ATTR_SCTP", None, None, False, False), + ("OVS_KEY_ATTR_ICMP", None, None, False, False), + ("OVS_KEY_ATTR_ICMPV6", None, None, False, False), + ("OVS_KEY_ATTR_ND", None, None, False, False), + ): + v = self.get_attr(field[0]) + if v is not None: + m = None if mask is None else mask.get_attr(field[0]) + if field[4] is False: + print_str += v.dpstr(m, more) + print_str += "," + else: + if m is None or field[3](m): + print_str += field[1] + "(" + print_str += field[2] % v + print_str += ")," + elif more or m != 0: + print_str += field[1] + "(" + print_str += (field[2] % v) + "/" + (field[2] % m) + print_str += ")," + + return print_str + + class OvsDatapath(GenericNetlinkSocket): OVS_DP_F_VPORT_PIDS = 1 << 1 OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3 @@ -300,6 +1177,135 @@ class OvsVport(GenericNetlinkSocket): return reply +class OvsFlow(GenericNetlinkSocket): + class ovs_flow_msg(ovs_dp_msg): + nla_map = ( + ("OVS_FLOW_ATTR_UNSPEC", "none"), + ("OVS_FLOW_ATTR_KEY", "ovskey"), + ("OVS_FLOW_ATTR_ACTIONS", "ovsactions"), + ("OVS_FLOW_ATTR_STATS", "flowstats"), + ("OVS_FLOW_ATTR_TCP_FLAGS", "uint8"), + ("OVS_FLOW_ATTR_USED", "uint64"), + ("OVS_FLOW_ATTR_CLEAR", "none"), + ("OVS_FLOW_ATTR_MASK", "ovskey"), + ("OVS_FLOW_ATTR_PROBE", "none"), + ("OVS_FLOW_ATTR_UFID", "array(uint32)"), + ("OVS_FLOW_ATTR_UFID_FLAGS", "uint32"), + ) + + class flowstats(nla): + fields = ( + ("packets", "=Q"), + ("bytes", "=Q"), + ) + + def dpstr(self, more=False): + ufid = self.get_attr("OVS_FLOW_ATTR_UFID") + ufid_str = "" + if ufid is not None: + ufid_str = ( + "ufid:{:08x}-{:04x}-{:04x}-{:04x}-{:04x}{:08x}".format( + ufid[0], + ufid[1] >> 16, + ufid[1] & 0xFFFF, + ufid[2] >> 16, + ufid[2] & 0, + ufid[3], + ) + ) + + key_field = self.get_attr("OVS_FLOW_ATTR_KEY") + keymsg = None + if key_field is not None: + keymsg = key_field + + mask_field = self.get_attr("OVS_FLOW_ATTR_MASK") + maskmsg = None + if mask_field is not None: + maskmsg = mask_field + + acts_field = self.get_attr("OVS_FLOW_ATTR_ACTIONS") + actsmsg = None + if acts_field is not None: + actsmsg = acts_field + + print_str = "" + + if more: + print_str += ufid_str + "," + + if keymsg is not None: + print_str += keymsg.dpstr(maskmsg, more) + + stats = self.get_attr("OVS_FLOW_ATTR_STATS") + if stats is None: + print_str += " packets:0, bytes:0," + else: + print_str += " packets:%d, bytes:%d," % ( + stats["packets"], + stats["bytes"], + ) + + used = self.get_attr("OVS_FLOW_ATTR_USED") + print_str += " used:" + if used is None: + print_str += "never," + else: + used_time = int(used) + cur_time_sec = time.clock_gettime(time.CLOCK_MONOTONIC) + used_time = (cur_time_sec * 1000) - used_time + print_str += "{}s,".format(used_time / 1000) + + print_str += " actions:" + if ( + actsmsg is None + or "attrs" not in actsmsg + or len(actsmsg["attrs"]) == 0 + ): + print_str += "drop" + else: + print_str += actsmsg.dpstr(more) + + return print_str + + def __init__(self): + GenericNetlinkSocket.__init__(self) + + self.bind(OVS_FLOW_FAMILY, OvsFlow.ovs_flow_msg) + + def dump(self, dpifindex, flowspec=None): + """ + Returns a list of messages containing flows. + + dpifindex should be a valid datapath obtained by calling + into the OvsDatapath lookup + + flowpsec is a string which represents a flow in the dpctl + format. + """ + msg = OvsFlow.ovs_flow_msg() + + msg["cmd"] = OVS_FLOW_CMD_GET + msg["version"] = OVS_DATAPATH_VERSION + msg["reserved"] = 0 + msg["dpifindex"] = dpifindex + + msg_flags = NLM_F_REQUEST | NLM_F_ACK + if flowspec is None: + msg_flags |= NLM_F_DUMP + rep = None + + try: + rep = self.nlm_request( + msg, + msg_type=self.prid, + msg_flags=msg_flags, + ) + except NetlinkError as ne: + raise ne + return rep + + def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME") base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS") @@ -340,12 +1346,16 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): def main(argv): + nlmsg_atoms.ovskey = ovskey + nlmsg_atoms.ovsactions = ovsactions + parser = argparse.ArgumentParser() parser.add_argument( "-v", "--verbose", action="count", help="Increment 'verbose' output counter.", + default=0, ) subparsers = parser.add_subparsers() @@ -387,10 +1397,18 @@ def main(argv): delifcmd.add_argument("dpname", help="Datapath Name") delifcmd.add_argument("delif", help="Interface name for adding") + dumpflcmd = subparsers.add_parser("dump-flows") + dumpflcmd.add_argument("dumpdp", help="Datapath Name") + args = parser.parse_args() + if args.verbose > 0: + if args.verbose > 1: + logging.basicConfig(level=logging.DEBUG) + ovsdp = OvsDatapath() ovsvp = OvsVport() + ovsflow = OvsFlow() ndb = NDB() if hasattr(args, "showdp"): @@ -441,6 +1459,14 @@ def main(argv): msg += " removed." else: msg += " failed to remove." + elif hasattr(args, "dumpdp"): + rep = ovsdp.info(args.dumpdp, 0) + if rep is None: + print("DP '%s' not found." % args.dumpdp) + return 1 + rep = ovsflow.dump(rep["dpifindex"]) + for flow in rep: + print(flow.dpstr(True if args.verbose > 0 else False)) return 0 -- cgit v1.2.3-70-g09d2 From 9feac87b673c63e2de9aaf21bbf46cd9c4158a97 Mon Sep 17 00:00:00 2001 From: Aaron Conole Date: Fri, 14 Apr 2023 09:17:50 -0400 Subject: selftests: openvswitch: add support for upcall testing The upcall socket interface can be exercised now to make sure that future feature adjustments to the field can maintain backwards compatibility. Signed-off-by: Aaron Conole Signed-off-by: David S. Miller --- .../selftests/net/openvswitch/openvswitch.sh | 38 +++++- .../testing/selftests/net/openvswitch/ovs-dpctl.py | 138 +++++++++++++++++++-- 2 files changed, 165 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index 18383b0b7b9c..3117a4be0cd0 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -11,7 +11,8 @@ VERBOSE=0 TRACING=0 tests=" - netlink_checks ovsnl: validate netlink attrs and settings" + netlink_checks ovsnl: validate netlink attrs and settings + upcall_interfaces ovs: test the upcall interfaces" info() { [ $VERBOSE = 0 ] || echo $* @@ -72,7 +73,15 @@ ovs_add_dp () { ovs_add_if () { info "Adding IF to DP: br:$2 if:$3" - ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" || return 1 + if [ "$4" != "-u" ]; then + ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-if "$2" "$3" \ + || return 1 + else + python3 $ovs_base/ovs-dpctl.py add-if \ + -u "$2" "$3" >$ovs_dir/$3.out 2>$ovs_dir/$3.err & + pid=$! + on_exit "ovs_sbx $1 kill -TERM $pid 2>/dev/null" + fi } ovs_del_if () { @@ -106,7 +115,12 @@ ovs_add_netns_and_veths () { || return 1 fi - ovs_add_if "$1" "$2" "$4" || return 1 + if [ "$7" != "-u" ]; then + ovs_add_if "$1" "$2" "$4" || return 1 + else + ovs_add_if "$1" "$2" "$4" -u || return 1 + fi + [ $TRACING -eq 1 ] && ovs_netns_spawn_daemon "$1" "$ns" \ tcpdump -i any -s 65535 @@ -159,6 +173,24 @@ test_netlink_checks () { return 0 } +test_upcall_interfaces() { + sbx_add "test_upcall_interfaces" || return 1 + + info "setting up new DP" + ovs_add_dp "test_upcall_interfaces" ui0 -V 2:1 || return 1 + + ovs_add_netns_and_veths "test_upcall_interfaces" ui0 upc left0 l0 \ + 172.31.110.1/24 -u || return 1 + + sleep 1 + info "sending arping" + ip netns exec upc arping -I l0 172.31.110.20 -c 1 \ + >$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr + + grep -E "MISS upcall\[0/yes\]: .*arp\(sip=172.31.110.1,tip=172.31.110.20,op=1,sha=" $ovs_dir/left0.out >/dev/null 2>&1 || return 1 + return 0 +} + run_test() { ( tname="$1" diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py index 21b1b8deda7d..1c8b36bc15d4 100644 --- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py @@ -8,6 +8,8 @@ import argparse import errno import ipaddress import logging +import multiprocessing +import struct import sys import time @@ -926,6 +928,51 @@ class ovskey(nla): return print_str +class OvsPacket(GenericNetlinkSocket): + OVS_PACKET_CMD_MISS = 1 # Flow table miss + OVS_PACKET_CMD_ACTION = 2 # USERSPACE action + OVS_PACKET_CMD_EXECUTE = 3 # Apply actions to packet + + class ovs_packet_msg(ovs_dp_msg): + nla_map = ( + ("OVS_PACKET_ATTR_UNSPEC", "none"), + ("OVS_PACKET_ATTR_PACKET", "array(uint8)"), + ("OVS_PACKET_ATTR_KEY", "ovskey"), + ("OVS_PACKET_ATTR_ACTIONS", "ovsactions"), + ("OVS_PACKET_ATTR_USERDATA", "none"), + ("OVS_PACKET_ATTR_EGRESS_TUN_KEY", "none"), + ("OVS_PACKET_ATTR_UNUSED1", "none"), + ("OVS_PACKET_ATTR_UNUSED2", "none"), + ("OVS_PACKET_ATTR_PROBE", "none"), + ("OVS_PACKET_ATTR_MRU", "uint16"), + ("OVS_PACKET_ATTR_LEN", "uint32"), + ("OVS_PACKET_ATTR_HASH", "uint64"), + ) + + def __init__(self): + GenericNetlinkSocket.__init__(self) + self.bind(OVS_PACKET_FAMILY, OvsPacket.ovs_packet_msg) + + def upcall_handler(self, up=None): + print("listening on upcall packet handler:", self.epid) + while True: + try: + msgs = self.get() + for msg in msgs: + if not up: + continue + if msg["cmd"] == OvsPacket.OVS_PACKET_CMD_MISS: + up.miss(msg) + elif msg["cmd"] == OvsPacket.OVS_PACKET_CMD_ACTION: + up.action(msg) + elif msg["cmd"] == OvsPacket.OVS_PACKET_CMD_EXECUTE: + up.execute(msg) + else: + print("Unkonwn cmd: %d" % msg["cmd"]) + except NetlinkError as ne: + raise ne + + class OvsDatapath(GenericNetlinkSocket): OVS_DP_F_VPORT_PIDS = 1 << 1 OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3 @@ -989,7 +1036,9 @@ class OvsDatapath(GenericNetlinkSocket): return reply - def create(self, dpname, shouldUpcall=False, versionStr=None): + def create( + self, dpname, shouldUpcall=False, versionStr=None, p=OvsPacket() + ): msg = OvsDatapath.dp_cmd_msg() msg["cmd"] = OVS_DP_CMD_NEW if versionStr is None: @@ -1004,11 +1053,18 @@ class OvsDatapath(GenericNetlinkSocket): if versionStr is not None and versionStr.find(":") != -1: dpfeatures = int(versionStr.split(":")[1], 0) else: - dpfeatures = OvsDatapath.OVS_DP_F_VPORT_PIDS - + if versionStr is None or versionStr.find(":") == -1: + dpfeatures |= OvsDatapath.OVS_DP_F_DISPATCH_UPCALL_PER_CPU + dpfeatures &= ~OvsDatapath.OVS_DP_F_VPORT_PIDS + + nproc = multiprocessing.cpu_count() + procarray = [] + for i in range(1, nproc): + procarray += [int(p.epid)] + msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", procarray]) msg["attrs"].append(["OVS_DP_ATTR_USER_FEATURES", dpfeatures]) if not shouldUpcall: - msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", 0]) + msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", [0]]) try: reply = self.nlm_request( @@ -1104,9 +1160,10 @@ class OvsVport(GenericNetlinkSocket): return OvsVport.OVS_VPORT_TYPE_GENEVE raise ValueError("Unknown vport type: '%s'" % vport_type) - def __init__(self): + def __init__(self, packet=OvsPacket()): GenericNetlinkSocket.__init__(self) self.bind(OVS_VPORT_FAMILY, OvsVport.ovs_vport_msg) + self.upcall_packet = packet def info(self, vport_name, dpifindex=0, portno=None): msg = OvsVport.ovs_vport_msg() @@ -1144,7 +1201,37 @@ class OvsVport(GenericNetlinkSocket): msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type]) msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname]) - msg["attrs"].append(["OVS_VPORT_ATTR_UPCALL_PID", [self.pid]]) + msg["attrs"].append( + ["OVS_VPORT_ATTR_UPCALL_PID", [self.upcall_packet.epid]] + ) + + try: + reply = self.nlm_request( + msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK + ) + reply = reply[0] + except NetlinkError as ne: + if ne.code == errno.EEXIST: + reply = None + else: + raise ne + return reply + + def reset_upcall(self, dpindex, vport_ifname, p=None): + msg = OvsVport.ovs_vport_msg() + + msg["cmd"] = OVS_VPORT_CMD_SET + msg["version"] = OVS_DATAPATH_VERSION + msg["reserved"] = 0 + msg["dpifindex"] = dpindex + msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname]) + + if p == None: + p = self.upcall_packet + else: + self.upcall_packet = p + + msg["attrs"].append(["OVS_VPORT_ATTR_UPCALL_PID", [p.epid]]) try: reply = self.nlm_request( @@ -1176,6 +1263,9 @@ class OvsVport(GenericNetlinkSocket): raise ne return reply + def upcall_handler(self, handler=None): + self.upcall_packet.upcall_handler(handler) + class OvsFlow(GenericNetlinkSocket): class ovs_flow_msg(ovs_dp_msg): @@ -1305,6 +1395,24 @@ class OvsFlow(GenericNetlinkSocket): raise ne return rep + def miss(self, packetmsg): + seq = packetmsg["header"]["sequence_number"] + keystr = "(none)" + key_field = packetmsg.get_attr("OVS_PACKET_ATTR_KEY") + if key_field is not None: + keystr = key_field.dpstr(None, True) + + pktdata = packetmsg.get_attr("OVS_PACKET_ATTR_PACKET") + pktpres = "yes" if pktdata is not None else "no" + + print("MISS upcall[%d/%s]: %s" % (seq, pktpres, keystr), flush=True) + + def execute(self, packetmsg): + print("userspace execute command") + + def action(self, packetmsg): + print("userspace action command") + def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME") @@ -1385,6 +1493,12 @@ def main(argv): addifcmd = subparsers.add_parser("add-if") addifcmd.add_argument("dpname", help="Datapath Name") addifcmd.add_argument("addif", help="Interface name for adding") + addifcmd.add_argument( + "-u", + "--upcall", + action="store_true", + help="Leave open a reader for upcalls", + ) addifcmd.add_argument( "-t", "--ptype", @@ -1406,8 +1520,9 @@ def main(argv): if args.verbose > 1: logging.basicConfig(level=logging.DEBUG) + ovspk = OvsPacket() ovsdp = OvsDatapath() - ovsvp = OvsVport() + ovsvp = OvsVport(ovspk) ovsflow = OvsFlow() ndb = NDB() @@ -1430,11 +1545,13 @@ def main(argv): msg += ":'%s'" % args.showdp print(msg) elif hasattr(args, "adddp"): - rep = ovsdp.create(args.adddp, args.upcall, args.versioning) + rep = ovsdp.create(args.adddp, args.upcall, args.versioning, ovspk) if rep is None: print("DP '%s' already exists" % args.adddp) else: print("DP '%s' added" % args.adddp) + if args.upcall: + ovspk.upcall_handler(ovsflow) elif hasattr(args, "deldp"): ovsdp.destroy(args.deldp) elif hasattr(args, "addif"): @@ -1442,12 +1559,17 @@ def main(argv): if rep is None: print("DP '%s' not found." % args.dpname) return 1 + dpindex = rep["dpifindex"] rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype) msg = "vport '%s'" % args.addif if rep and rep["header"]["error"] is None: msg += " added." else: msg += " failed to add." + if args.upcall: + if rep is None: + rep = ovsvp.reset_upcall(dpindex, args.addif, ovspk) + ovsvp.upcall_handler(ovsflow) elif hasattr(args, "delif"): rep = ovsdp.info(args.dpname, 0) if rep is None: -- cgit v1.2.3-70-g09d2 From 0a85264e48b642d360720589fdb837a3643fb9b0 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Fri, 14 Apr 2023 17:47:09 +0200 Subject: selftests: mptcp: remove duplicated entries in usage mptcp_connect tool was printing some duplicated entries when showing how to use it: -j -l -r While at it, I also: - moved the very few entries that were not sorted, - added -R that was missing since commit 8a4b910d005d ("mptcp: selftests: add rcvbuf set option"), - removed the -u parameter that has been removed in commit f730b65c9d85 ("selftests: mptcp: try to set mptcp ulp mode in different sk states"). No need to backport this, it is just an internal tool used by our selftests. The help menu is mainly useful for MPTCP kernel devs. Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller --- tools/testing/selftests/net/mptcp/mptcp_connect.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index b25a31445ded..c7f9ebeebc2c 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -106,8 +106,8 @@ static struct cfg_sockopt_types cfg_sockopt_types; static void die_usage(void) { fprintf(stderr, "Usage: mptcp_connect [-6] [-c cmsg] [-f offset] [-i file] [-I num] [-j] [-l] " - "[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-j] [-l] [-r num] " - "[-s MPTCP|TCP] [-S num] [-r num] [-t num] [-T num] [-u] [-w sec] connect_address\n"); + "[-m mode] [-M mark] [-o option] [-p port] [-P mode] [-r num] [-R num] " + "[-s MPTCP|TCP] [-S num] [-t num] [-T num] [-w sec] connect_address\n"); fprintf(stderr, "\t-6 use ipv6\n"); fprintf(stderr, "\t-c cmsg -- test cmsg type \n"); fprintf(stderr, "\t-f offset -- stop the I/O after receiving and sending the specified amount " @@ -126,13 +126,13 @@ static void die_usage(void) fprintf(stderr, "\t-p num -- use port num\n"); fprintf(stderr, "\t-P [saveWithPeek|saveAfterPeek] -- save data with/after MSG_PEEK form tcp socket\n"); - fprintf(stderr, "\t-t num -- set poll timeout to num\n"); - fprintf(stderr, "\t-T num -- set expected runtime to num ms\n"); fprintf(stderr, "\t-r num -- enable slow mode, limiting each write to num bytes " "-- for remove addr tests\n"); fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n"); fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n"); fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n"); + fprintf(stderr, "\t-t num -- set poll timeout to num\n"); + fprintf(stderr, "\t-T num -- set expected runtime to num ms\n"); fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n"); exit(1); } -- cgit v1.2.3-70-g09d2 From 0fcd72df8847d3a62eb34a084862157ce0564a94 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Fri, 14 Apr 2023 17:47:10 +0200 Subject: selftests: mptcp: join: fix ShellCheck warnings Most of the code had an issue according to ShellCheck. That's mainly due to the fact it incorrectly believes most of the code was unreachable because it's invoked by variable name, see how the "tests" array is used. Once SC2317 has been ignored, three small warnings were still visible: - SC2155: Declare and assign separately to avoid masking return values. - SC2046: Quote this to prevent word splitting: can be ignored because "ip netns pids" can display more than one pid. - SC2166: Prefer [ p ] || [ q ] as [ p -o q ] is not well defined. This probably didn't fix any actual issues but it might help spotting new interesting warnings reported by ShellCheck as just before, ShellCheck was reporting issues for most lines making it a bit useless. Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller --- tools/testing/selftests/net/mptcp/mptcp_join.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index fafd19ec7e1f..26310c17b4c6 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -6,6 +6,10 @@ # address all other issues detected by shellcheck. #shellcheck disable=SC2086 +# ShellCheck incorrectly believes that most of the code here is unreachable +# because it's invoked by variable name, see how the "tests" array is used +#shellcheck disable=SC2317 + ret=0 sin="" sinfail="" @@ -371,8 +375,9 @@ check_transfer() local line if [ -n "$bytes" ]; then + local out_size # when truncating we must check the size explicitly - local out_size=$(wc -c $out | awk '{print $1}') + out_size=$(wc -c $out | awk '{print $1}') if [ $out_size -ne $bytes ]; then echo "[ FAIL ] $what output file has wrong size ($out_size, $bytes)" fail_test @@ -500,6 +505,7 @@ kill_events_pids() kill_tests_wait() { + #shellcheck disable=SC2046 kill -SIGUSR1 $(ip netns pids $ns2) $(ip netns pids $ns1) wait } @@ -1703,7 +1709,7 @@ chk_subflow_nr() cnt1=$(ss -N $ns1 -tOni | grep -c token) cnt2=$(ss -N $ns2 -tOni | grep -c token) - if [ "$cnt1" != "$subflow_nr" -o "$cnt2" != "$subflow_nr" ]; then + if [ "$cnt1" != "$subflow_nr" ] || [ "$cnt2" != "$subflow_nr" ]; then echo "[fail] got $cnt1:$cnt2 subflows expected $subflow_nr" fail_test dump_stats=1 -- cgit v1.2.3-70-g09d2 From 49859de997c3115b85544bce6b6ceab60a7fabc4 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 17 Apr 2023 15:21:39 -0700 Subject: selftests/bpf: Add a selftest for checking subreg equality Add a selftest to ensure subreg equality if source register upper 32bit is 0. Without previous patch, the test will fail verification. Acked-by: Eduard Zingerman Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20230417222139.360607-1-yhs@fb.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_reg_equal.c | 58 ++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_reg_equal.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 73dff693d411..25bc8958dbfe 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -31,6 +31,7 @@ #include "verifier_meta_access.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" +#include "verifier_reg_equal.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" @@ -95,6 +96,7 @@ void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } +void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_reg_equal.c b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c new file mode 100644 index 000000000000..dc1d8c30fb0e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_reg_equal.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +SEC("socket") +__description("check w reg equal if r reg upper32 bits 0") +__success +__naked void subreg_equality_1(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + *(u64 *)(r10 - 8) = r0; \ + r2 = *(u32 *)(r10 - 8); \ + /* At this point upper 4-bytes of r2 are 0, \ + * thus insn w3 = w2 should propagate reg id, \ + * and w2 < 9 comparison would also propagate \ + * the range for r3. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__description("check w reg not equal if r reg upper32 bits not 0") +__failure __msg("R1 !read_ok") +__naked void subreg_equality_2(void) +{ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + r2 = r0; \ + /* Upper 4-bytes of r2 may not be 0, thus insn \ + * w3 = w2 should not propagate reg id, and \ + * w2 < 9 comparison should not propagate \ + * the range for r3 either. \ + */ \ + w3 = w2; \ + if w2 < 9 goto l0_%=; \ + exit; \ +l0_%=: if r3 < 9 goto l1_%=; \ + /* r1 read is illegal at this point */ \ + r0 -= r1; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 30bbfe3236b01b81ed5f4a91cd9d87a236b182e3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 17 Apr 2023 17:21:46 -0700 Subject: selftests/bpf: add missing __weak kfunc log fixup test Add test validating that libbpf correctly poisons and reports __weak unresolved kfuncs in post-processed verifier log. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230418002148.3255690-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/log_fixup.c | 31 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/test_log_fixup.c | 10 +++++++ 2 files changed, 41 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index bc27170bdeb0..dba71d98a227 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -135,6 +135,35 @@ cleanup: test_log_fixup__destroy(skel); } +static void missing_kfunc(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.use_missing_kfunc, true); + bpf_program__set_log_buf(skel->progs.use_missing_kfunc, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: \n" + "kfunc 'bpf_nonexistent_kfunc' is referenced but wasn't resolved\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + void test_log_fixup(void) { if (test__start_subtest("bad_core_relo_trunc_none")) @@ -147,4 +176,6 @@ void test_log_fixup(void) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) missing_map(); + if (test__start_subtest("missing_kfunc")) + missing_kfunc(); } diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c index 60450cb0e72e..1bd48feaaa42 100644 --- a/tools/testing/selftests/bpf/progs/test_log_fixup.c +++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c @@ -61,4 +61,14 @@ int use_missing_map(const void *ctx) return value != NULL; } +extern int bpf_nonexistent_kfunc(void) __ksym __weak; + +SEC("?raw_tp/sys_enter") +int use_missing_kfunc(const void *ctx) +{ + bpf_nonexistent_kfunc(); + + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From c5e64741670883a5b35d42f3125d611f5a4aa14f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 17 Apr 2023 17:21:47 -0700 Subject: libbpf: move bpf_for(), bpf_for_each(), and bpf_repeat() into bpf_helpers.h To make it easier for bleeding-edge BPF applications, such as sched_ext, to utilize open-coded iterators, move bpf_for(), bpf_for_each(), and bpf_repeat() macros from selftests/bpf-internal bpf_misc.h helper, to libbpf-provided bpf_helpers.h header. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230418002148.3255690-6-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/bpf_helpers.h | 103 +++++++++++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_misc.h | 103 --------------------------- 2 files changed, 103 insertions(+), 103 deletions(-) (limited to 'tools/testing') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index e7e1a8acc299..525dec66c129 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -291,4 +291,107 @@ enum libbpf_tristate { /* Helper macro to print out debug messages */ #define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args) +struct bpf_iter_num; + +extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; +extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; +extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; + +#ifndef bpf_for_each +/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for + * using BPF open-coded iterators without having to write mundane explicit + * low-level loop logic. Instead, it provides for()-like generic construct + * that can be used pretty naturally. E.g., for some hypothetical cgroup + * iterator, you'd write: + * + * struct cgroup *cg, *parent_cg = <...>; + * + * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { + * bpf_printk("Child cgroup id = %d", cg->cgroup_id); + * if (cg->cgroup_id == 123) + * break; + * } + * + * I.e., it looks almost like high-level for each loop in other languages, + * supports continue/break, and is verifiable by BPF verifier. + * + * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) + * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to + * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int + * *`, not just `int`. So for integers bpf_for() is more convenient. + * + * Note: this macro relies on C99 feature of allowing to declare variables + * inside for() loop, bound to for() loop lifetime. It also utilizes GCC + * extension: __attribute__((cleanup())), supported by both GCC and + * Clang. + */ +#define bpf_for_each(type, cur, args...) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ + cleanup(bpf_iter_##type##_destroy))), \ + /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_##type##_new(&___it, ##args), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_##type##_destroy, (void *)0); \ + /* iteration and termination check */ \ + (((cur) = bpf_iter_##type##_next(&___it))); \ +) +#endif /* bpf_for_each */ + +#ifndef bpf_for +/* bpf_for(i, start, end) implements a for()-like looping construct that sets + * provided integer variable *i* to values starting from *start* through, + * but not including, *end*. It also proves to BPF verifier that *i* belongs + * to range [start, end), so this can be used for accessing arrays without + * extra checks. + * + * Note: *start* and *end* are assumed to be expressions with no side effects + * and whose values do not change throughout bpf_for() loop execution. They do + * not have to be statically known or constant, though. + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_for(i, start, end) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, (start), (end)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + ({ \ + /* iteration step */ \ + int *___t = bpf_iter_num_next(&___it); \ + /* termination and bounds check */ \ + (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ + }); \ +) +#endif /* bpf_for */ + +#ifndef bpf_repeat +/* bpf_repeat(N) performs N iterations without exposing iteration number + * + * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() + * loop bound variables and cleanup attribute, supported by GCC and Clang. + */ +#define bpf_repeat(N) for ( \ + /* initialize and define destructor */ \ + struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ + cleanup(bpf_iter_num_destroy))), \ + /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ + *___p __attribute__((unused)) = ( \ + bpf_iter_num_new(&___it, 0, (N)), \ + /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ + /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ + (void)bpf_iter_num_destroy, (void *)0); \ + bpf_iter_num_next(&___it); \ + /* nothing here */ \ +) +#endif /* bpf_repeat */ + #endif diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 6e3b4903c541..3b307de8dab9 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -121,107 +121,4 @@ /* make it look to compiler like value is read and written */ #define __sink(expr) asm volatile("" : "+g"(expr)) -struct bpf_iter_num; - -extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __ksym; -extern int *bpf_iter_num_next(struct bpf_iter_num *it) __ksym; -extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __ksym; - -#ifndef bpf_for_each -/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for - * using BPF open-coded iterators without having to write mundane explicit - * low-level loop logic. Instead, it provides for()-like generic construct - * that can be used pretty naturally. E.g., for some hypothetical cgroup - * iterator, you'd write: - * - * struct cgroup *cg, *parent_cg = <...>; - * - * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) { - * bpf_printk("Child cgroup id = %d", cg->cgroup_id); - * if (cg->cgroup_id == 123) - * break; - * } - * - * I.e., it looks almost like high-level for each loop in other languages, - * supports continue/break, and is verifiable by BPF verifier. - * - * For iterating integers, the difference betwen bpf_for_each(num, i, N, M) - * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to - * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int - * *`, not just `int`. So for integers bpf_for() is more convenient. - * - * Note: this macro relies on C99 feature of allowing to declare variables - * inside for() loop, bound to for() loop lifetime. It also utilizes GCC - * extension: __attribute__((cleanup())), supported by both GCC and - * Clang. - */ -#define bpf_for_each(type, cur, args...) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \ - cleanup(bpf_iter_##type##_destroy))), \ - /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_##type##_new(&___it, ##args), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_##type##_destroy, (void *)0); \ - /* iteration and termination check */ \ - (((cur) = bpf_iter_##type##_next(&___it))); \ -) -#endif /* bpf_for_each */ - -#ifndef bpf_for -/* bpf_for(i, start, end) implements a for()-like looping construct that sets - * provided integer variable *i* to values starting from *start* through, - * but not including, *end*. It also proves to BPF verifier that *i* belongs - * to range [start, end), so this can be used for accessing arrays without - * extra checks. - * - * Note: *start* and *end* are assumed to be expressions with no side effects - * and whose values do not change throughout bpf_for() loop execution. They do - * not have to be statically known or constant, though. - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_for(i, start, end) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, (start), (end)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - ({ \ - /* iteration step */ \ - int *___t = bpf_iter_num_next(&___it); \ - /* termination and bounds check */ \ - (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \ - }); \ -) -#endif /* bpf_for */ - -#ifndef bpf_repeat -/* bpf_repeat(N) performs N iterations without exposing iteration number - * - * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for() - * loop bound variables and cleanup attribute, supported by GCC and Clang. - */ -#define bpf_repeat(N) for ( \ - /* initialize and define destructor */ \ - struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \ - cleanup(bpf_iter_num_destroy))), \ - /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \ - *___p __attribute__((unused)) = ( \ - bpf_iter_num_new(&___it, 0, (N)), \ - /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \ - /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \ - (void)bpf_iter_num_destroy, (void *)0); \ - bpf_iter_num_next(&___it); \ - /* nothing here */ \ -) -#endif /* bpf_repeat */ - #endif -- cgit v1.2.3-70-g09d2 From 2ddade322925641ee2a75f13665c51f2e74d7791 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 18 Apr 2023 16:36:17 +0200 Subject: selftests/xsk: Fix munmap for hugepage allocated umem Fix the unmapping of hugepage allocated umems so that they are properly unmapped. The new test referred to in the fixes label, introduced a test that allocated a umem that is not a multiple of a 2M hugepage size. This is fine for mmap() that rounds the size up the nearest multiple of 2M. But munmap() requires the size to be a multiple of the hugepage size in order for it to unmap the region. The current behaviour of not properly unmapping the umem, was discovered when further additions of tests that require hugepages (unaligned mode tests only) started failing as the system was running out of hugepages. Fixes: c0801598e543 ("selftests: xsk: Add test UNALIGNED_INV_DESC_4K1_FRAME_SIZE") Signed-off-by: Magnus Karlsson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230418143617.27762-1-magnus.karlsson@gmail.com --- tools/testing/selftests/bpf/xskxceiver.c | 19 +++++++++++++++---- tools/testing/selftests/bpf/xskxceiver.h | 1 + 2 files changed, 16 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 5a9691e942de..a59d04118842 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include #include @@ -1286,16 +1287,19 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; LIBBPF_OPTS(bpf_xdp_query_opts, opts); + off_t mmap_offset = 0; void *bufs; int ret; - if (ifobject->umem->unaligned_mode) + if (ifobject->umem->unaligned_mode) { mmap_flags |= MAP_HUGETLB; + mmap_offset = MAP_HUGE_2MB; + } if (ifobject->shared_umem) umem_sz *= 2; - bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, mmap_offset); if (bufs == MAP_FAILED) exit_with_error(errno); @@ -1379,6 +1383,11 @@ static void *worker_testapp_validate_rx(void *arg) pthread_exit(NULL); } +static u64 ceil_u64(u64 a, u64 b) +{ + return (a + b - 1) / b; +} + static void testapp_clean_xsk_umem(struct ifobject *ifobj) { u64 umem_sz = ifobj->umem->num_frames * ifobj->umem->frame_size; @@ -1386,6 +1395,7 @@ static void testapp_clean_xsk_umem(struct ifobject *ifobj) if (ifobj->shared_umem) umem_sz *= 2; + umem_sz = ceil_u64(umem_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; xsk_umem__delete(ifobj->umem->umem); munmap(ifobj->umem->buffer, umem_sz); } @@ -1619,14 +1629,15 @@ static void testapp_stats_fill_empty(struct test_spec *test) /* Simple test */ static bool hugepages_present(struct ifobject *ifobject) { - const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; + size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; void *bufs; bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); if (bufs == MAP_FAILED) return false; + mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; munmap(bufs, mmap_sz); return true; } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 919327807a4e..c535aeab2ca3 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -56,6 +56,7 @@ #define RX_FULL_RXQSIZE 32 #define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) +#define HUGEPAGE_SIZE (2 * 1024 * 1024) #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) -- cgit v1.2.3-70-g09d2 From 5ff54dedf35bfee7a16eb9f8fb7ecadf7d5564cb Mon Sep 17 00:00:00 2001 From: Feng Zhou Date: Thu, 20 Apr 2023 11:27:35 +0800 Subject: selftests/bpf: Add test to access integer type of variable array Add prog test for accessing integer type of variable array in tracing program. In addition, hook load_balance function to access sd->span[0], only to confirm whether the load is successful. Because there is no direct way to trigger load_balance call. Co-developed-by: Chengming Zhou Signed-off-by: Chengming Zhou Signed-off-by: Feng Zhou Link: https://lore.kernel.org/r/20230420032735.27760-3-zhoufeng.zf@bytedance.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 20 ++++++++++++++++++++ .../selftests/bpf/prog_tests/access_variable_array.c | 16 ++++++++++++++++ .../selftests/bpf/prog_tests/tracing_struct.c | 2 ++ .../selftests/bpf/progs/test_access_variable_array.c | 19 +++++++++++++++++++ tools/testing/selftests/bpf/progs/tracing_struct.c | 13 +++++++++++++ 5 files changed, 70 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/access_variable_array.c create mode 100644 tools/testing/selftests/bpf/progs/test_access_variable_array.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index fe847ebfb731..52785ba671e6 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -28,6 +28,11 @@ struct bpf_testmod_struct_arg_2 { long b; }; +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in bpf_testmod.ko BTF"); @@ -63,6 +68,12 @@ bpf_testmod_test_struct_arg_5(void) { return bpf_testmod_test_struct_arg_result; } +noinline int +bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) { + bpf_testmod_test_struct_arg_result = a->b[0]; + return bpf_testmod_test_struct_arg_result; +} + __bpf_kfunc void bpf_testmod_test_mod_kfunc(int i) { @@ -195,6 +206,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, }; struct bpf_testmod_struct_arg_1 struct_arg1 = {10}; struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; + struct bpf_testmod_struct_arg_3 *struct_arg3; int i = 1; while (bpf_testmod_return_ptr(i)) @@ -206,6 +218,14 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2); (void)bpf_testmod_test_struct_arg_5(); + struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) + + sizeof(int)), GFP_KERNEL); + if (struct_arg3 != NULL) { + struct_arg3->b[0] = 1; + (void)bpf_testmod_test_struct_arg_6(struct_arg3); + kfree(struct_arg3); + } + /* This is always true. Use the check to make sure the compiler * doesn't remove bpf_testmod_loop_test. */ diff --git a/tools/testing/selftests/bpf/prog_tests/access_variable_array.c b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c new file mode 100644 index 000000000000..08131782437c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/access_variable_array.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Bytedance */ + +#include +#include "test_access_variable_array.skel.h" + +void test_access_variable_array(void) +{ + struct test_access_variable_array *skel; + + skel = test_access_variable_array__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_access_variable_array__open_and_load")) + return; + + test_access_variable_array__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index 48dc9472e160..1c75a32186d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -53,6 +53,8 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t5_ret, 1, "t5 ret"); + ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); + tracing_struct__detach(skel); destroy_skel: tracing_struct__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/test_access_variable_array.c b/tools/testing/selftests/bpf/progs/test_access_variable_array.c new file mode 100644 index 000000000000..808c49b79889 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_access_variable_array.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include "vmlinux.h" +#include +#include + +unsigned long span = 0; + +SEC("fentry/load_balance") +int BPF_PROG(fentry_fentry, int this_cpu, struct rq *this_rq, + struct sched_domain *sd) +{ + span = sd->span[0]; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c index e718f0ebee7d..c435a3a8328a 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -13,12 +13,18 @@ struct bpf_testmod_struct_arg_2 { long b; }; +struct bpf_testmod_struct_arg_3 { + int a; + int b[]; +}; + long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; +int t6; SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) @@ -117,4 +123,11 @@ int BPF_PROG2(test_struct_arg_10, int, ret) return 0; } +SEC("fentry/bpf_testmod_test_struct_arg_6") +int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a) +{ + t6 = a->b[0]; + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 7c4b96c00043f3b2cad50653f085c178fda92e62 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 02:23:14 +0300 Subject: selftests/bpf: disable program test run for progs/refcounted_kptr.c Florian Westphal found a bug in test_loader.c processing of __retval tag. Because of this bug the function test_loader.c:do_prog_test_run() never executed and all __retval test tags were ignored. This hid an issue with progs/refcounted_kptr.c tests. When __retval tag bug is fixed and refcounted_kptr.c tests are run kernel reports various issues and eventually hangs. Shortest reproducer is the following command run a few times: $ for i in $(seq 1 4); do (./test_progs --allow=refcounted_kptr &); done Commenting out __retval tags for these tests until this issue is resolved. Reported-by: Florian Westphal Link: https://lore.kernel.org/bpf/f4c4aee644425842ee6aa8edf1da68f0a8260e7c.camel@gmail.com/T/ Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230420232317.2181776-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/refcounted_kptr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 1d348a225140..b6b2d4f97b19 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -219,7 +219,7 @@ static long __read_from_unstash(int idx) #define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ SEC("tc") \ __description(desc) \ -__success __retval(579) \ +__success /* __retval(579) temporarily disabled */ \ long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \ { \ long err, tree_data, list_data; \ @@ -258,7 +258,7 @@ INSERT_READ_BOTH(false, true, "insert_read_both: remove from list"); #define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ SEC("tc") \ __description(desc) \ -__success __retval(579) \ +__success /* __retval(579) temporarily disabled */ \ long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \ { \ long err, tree_data, list_data; \ @@ -296,7 +296,7 @@ INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list"); #define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \ SEC("tc") \ __description(desc) \ -__success __retval(-1) \ +__success /* temporarily __retval(-1) disabled */ \ long insert_double_##read_fn##_and_del_##read_root(void *ctx) \ { \ long err, list_data; \ @@ -329,7 +329,7 @@ INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-a #define INSERT_STASH_READ(rem_tree, desc) \ SEC("tc") \ __description(desc) \ -__success __retval(84) \ +__success /* __retval(84) temporarily disabled */ \ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ { \ long err, tree_data, map_data; \ -- cgit v1.2.3-70-g09d2 From 7cdddb99e4a69c26881d279472f1487cd67740c4 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 02:23:15 +0300 Subject: selftests/bpf: fix __retval() being always ignored Florian Westphal found a bug in and suggested a fix for test_loader.c processing of __retval tag. Because of this bug the function test_loader.c:do_prog_test_run() never executed and all __retval test tags were ignored. If this bug is fixed a number of test cases from progs/verifier_array_access.c fail with retval not matching the expected value. This test was recently converted to use test_loader.c and inline assembly in [1]. When doing the conversion I missed the important detail of test_verifier.c operation: when it creates fixup_map_array_ro, fixup_map_array_wo and fixup_map_array_small it populates these maps with a dummy record. Disabling the __retval checks for the affected verifier_array_access in this commit to avoid false-postivies in any potential bisects. The issue is addressed in the next patch. I verified that the __retval tags are now respected by changing expected return values for all tests annotated with __retval, and checking that these tests started to fail. [1] https://lore.kernel.org/bpf/20230325025524.144043-1-eddyz87@gmail.com/ Fixes: 19a8e06f5f91 ("selftests/bpf: Tests execution support for test_loader.c") Reported-by: Florian Westphal Link: https://lore.kernel.org/bpf/f4c4aee644425842ee6aa8edf1da68f0a8260e7c.camel@gmail.com/T/ Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230420232317.2181776-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/verifier_array_access.c | 4 ++-- tools/testing/selftests/bpf/test_loader.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c index 95d7ecc12963..fceeeef78721 100644 --- a/tools/testing/selftests/bpf/progs/verifier_array_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -330,7 +330,7 @@ l0_%=: exit; \ SEC("socket") __description("valid read map access into a read-only array 1") -__success __success_unpriv __retval(28) +__success __success_unpriv /* __retval(28) temporarily disable */ __naked void a_read_only_array_1_1(void) { asm volatile (" \ @@ -351,7 +351,7 @@ l0_%=: exit; \ SEC("tc") __description("valid read map access into a read-only array 2") -__success __retval(65507) +__success /* __retval(65507) temporarily disable */ __naked void a_read_only_array_2_1(void) { asm volatile (" \ diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 47e9e076bc8f..e2a1bdc5a570 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -587,7 +587,7 @@ void run_subtest(struct test_loader *tester, /* For some reason test_verifier executes programs * with all capabilities restored. Do the same here. */ - if (!restore_capabilities(&caps)) + if (restore_capabilities(&caps)) goto tobj_cleanup; do_prog_test_run(bpf_program__fd(tprog), &retval); -- cgit v1.2.3-70-g09d2 From 5b22f4d1436b2693c7176a76be0ba6d30503bf7c Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 02:23:16 +0300 Subject: selftests/bpf: add pre bpf_prog_test_run_opts() callback for test_loader When a test case is annotated with __retval tag the test_loader engine would use libbpf's bpf_prog_test_run_opts() to do a test run of the program and compare retvals. This commit allows to perform arbitrary actions on bpf object right before test loader invokes bpf_prog_test_run_opts(). This could be used to setup some state for program execution, e.g. fill some maps. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230420232317.2181776-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/test_loader.c | 8 ++++++++ tools/testing/selftests/bpf/test_progs.h | 9 +++++++++ 2 files changed, 17 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index e2a1bdc5a570..40c9b7d532c4 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -590,6 +590,14 @@ void run_subtest(struct test_loader *tester, if (restore_capabilities(&caps)) goto tobj_cleanup; + if (tester->pre_execution_cb) { + err = tester->pre_execution_cb(tobj); + if (err) { + PRINT_FAIL("pre_execution_cb failed: %d\n", err); + goto tobj_cleanup; + } + } + do_prog_test_run(bpf_program__fd(tprog), &retval); if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 10ba43250668..0ed3134333d4 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -424,14 +424,23 @@ int get_bpf_max_tramp_links(void); #define BPF_TESTMOD_TEST_FILE "/sys/kernel/bpf_testmod" +typedef int (*pre_execution_cb)(struct bpf_object *obj); + struct test_loader { char *log_buf; size_t log_buf_sz; size_t next_match_pos; + pre_execution_cb pre_execution_cb; struct bpf_object *obj; }; +static inline void test_loader__set_pre_execution_cb(struct test_loader *tester, + pre_execution_cb cb) +{ + tester->pre_execution_cb = cb; +} + typedef const void *(*skel_elf_bytes_fn)(size_t *sz); extern void test_loader__run_subtests(struct test_loader *tester, -- cgit v1.2.3-70-g09d2 From cbb110bc6672f785cab2cb308e9cfefee07af861 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 02:23:17 +0300 Subject: selftests/bpf: populate map_array_ro map for verifier_array_access test Two test cases: - "valid read map access into a read-only array 1" and - "valid read map access into a read-only array 2" Expect that map_array_ro map is filled with mock data. This logic was not taken into acount during initial test conversion. This commit modifies prog_tests/verifier.c entry point for this test to fill the map. Fixes: a3c830ae0209 ("selftests/bpf: verifier/array_access.c converted to inline assembly") Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230420232317.2181776-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 42 ++++++++++++++++++++-- .../selftests/bpf/progs/verifier_array_access.c | 4 +-- 2 files changed, 41 insertions(+), 5 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 25bc8958dbfe..7c68d78da9ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -44,8 +44,17 @@ #include "verifier_xdp.skel.h" #include "verifier_xdp_direct_packet_access.skel.h" +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + __maybe_unused -static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_factory) +static void run_tests_aux(const char *skel_name, + skel_elf_bytes_fn elf_bytes_factory, + pre_execution_cb pre_execution_cb) { struct test_loader tester = {}; __u64 old_caps; @@ -58,6 +67,7 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac return; } + test_loader__set_pre_execution_cb(&tester, pre_execution_cb); test_loader__run_subtests(&tester, skel_name, elf_bytes_factory); test_loader_fini(&tester); @@ -66,10 +76,9 @@ static void run_tests_aux(const char *skel_name, skel_elf_bytes_fn elf_bytes_fac PRINT_FAIL("failed to restore CAP_SYS_ADMIN: %i, %s\n", err, strerror(err)); } -#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes) +#define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL) void test_verifier_and(void) { RUN(verifier_and); } -void test_verifier_array_access(void) { RUN(verifier_array_access); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } @@ -108,3 +117,30 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } + +static int init_array_access_maps(struct bpf_object *obj) +{ + struct bpf_map *array_ro; + struct test_val value = { + .index = (6 + 1) * sizeof(int), + .foo[6] = 0xabcdef12, + }; + int err, key = 0; + + array_ro = bpf_object__find_map_by_name(obj, "map_array_ro"); + if (!ASSERT_OK_PTR(array_ro, "lookup map_array_ro")) + return -EINVAL; + + err = bpf_map_update_elem(bpf_map__fd(array_ro), &key, &value, 0); + if (!ASSERT_OK(err, "map_array_ro update")) + return err; + + return 0; +} + +void test_verifier_array_access(void) +{ + run_tests_aux("verifier_array_access", + verifier_array_access__elf_bytes, + init_array_access_maps); +} diff --git a/tools/testing/selftests/bpf/progs/verifier_array_access.c b/tools/testing/selftests/bpf/progs/verifier_array_access.c index fceeeef78721..95d7ecc12963 100644 --- a/tools/testing/selftests/bpf/progs/verifier_array_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_array_access.c @@ -330,7 +330,7 @@ l0_%=: exit; \ SEC("socket") __description("valid read map access into a read-only array 1") -__success __success_unpriv /* __retval(28) temporarily disable */ +__success __success_unpriv __retval(28) __naked void a_read_only_array_1_1(void) { asm volatile (" \ @@ -351,7 +351,7 @@ l0_%=: exit; \ SEC("tc") __description("valid read map access into a read-only array 2") -__success /* __retval(65507) temporarily disable */ +__success __retval(65507) __naked void a_read_only_array_2_1(void) { asm volatile (" \ -- cgit v1.2.3-70-g09d2 From 54e906f1639e195bcb52725aab6814e2af922540 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 18 Apr 2023 14:14:56 +0300 Subject: selftests: forwarding: sch_tbf_*: Add a pre-run hook The driver-specific wrappers of these selftests invoke bail_on_lldpad to make sure that LLDPAD doesn't trample the configuration. The function bail_on_lldpad is going to move to lib.sh in the next patch. With that, it won't be visible for the wrappers before sourcing the framework script. And after sourcing it, it is too late: the selftest will have run by then. One option might be to source NUM_NETIFS=0 lib.sh from the wrapper, but even if that worked (it might, it might not), that seems cumbersome. lib.sh is doing fair amount of stuff, and even if it works today, it does not look particularly solid as a solution. Instead, introduce a hook, sch_tbf_pre_hook(), that when available, gets invoked. Move the bail to the hook. Signed-off-by: Petr Machata Reviewed-by: Danielle Ratson Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh | 6 +++++- tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh | 6 +++++- tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh | 6 +++++- tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh | 4 ++++ tools/testing/selftests/net/forwarding/sch_tbf_root.sh | 4 ++++ 5 files changed, 23 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh index c6ce0b448bf3..b9b4cdf14ceb 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh @@ -2,7 +2,11 @@ # SPDX-License-Identifier: GPL-2.0 source qos_lib.sh -bail_on_lldpad + +sch_tbf_pre_hook() +{ + bail_on_lldpad +} lib_dir=$(dirname $0)/../../../net/forwarding TCFLAGS=skip_sw diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh index 8d245f331619..dff9810ee04f 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh @@ -2,7 +2,11 @@ # SPDX-License-Identifier: GPL-2.0 source qos_lib.sh -bail_on_lldpad + +sch_tbf_pre_hook() +{ + bail_on_lldpad +} lib_dir=$(dirname $0)/../../../net/forwarding TCFLAGS=skip_sw diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh index 013886061f15..75406bd7036e 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh @@ -2,7 +2,11 @@ # SPDX-License-Identifier: GPL-2.0 source qos_lib.sh -bail_on_lldpad + +sch_tbf_pre_hook() +{ + bail_on_lldpad +} lib_dir=$(dirname $0)/../../../net/forwarding TCFLAGS=skip_sw diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh index 75a37c189ef3..df9bcd6a811a 100644 --- a/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh @@ -57,6 +57,10 @@ tbf_root_test() tc qdisc del dev $swp2 root } +if type -t sch_tbf_pre_hook >/dev/null; then + sch_tbf_pre_hook +fi + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/net/forwarding/sch_tbf_root.sh b/tools/testing/selftests/net/forwarding/sch_tbf_root.sh index 72aa21ba88c7..96c997be0d03 100755 --- a/tools/testing/selftests/net/forwarding/sch_tbf_root.sh +++ b/tools/testing/selftests/net/forwarding/sch_tbf_root.sh @@ -23,6 +23,10 @@ tbf_test() tc qdisc del dev $swp2 root } +if type -t sch_tbf_pre_hook >/dev/null; then + sch_tbf_pre_hook +fi + trap cleanup EXIT setup_prepare -- cgit v1.2.3-70-g09d2 From 8fcac79270cae2e07e5475b682bc425b71e80595 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 18 Apr 2023 14:14:57 +0300 Subject: selftests: forwarding: generalize bail_on_lldpad from mlxsw mlxsw selftests often invoke a bail_on_lldpad() helper to make sure LLDPAD is not running, to prevent conflicts between the QoS configuration applied through TC or DCB command line tool, and the DCB configuration that LLDPAD might apply. This helper might be useful to others. Move the function to lib.sh, and parameterize to make reusable in other contexts. Signed-off-by: Petr Machata Reviewed-by: Danielle Ratson Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- .../selftests/drivers/net/mlxsw/qos_headroom.sh | 3 +-- .../testing/selftests/drivers/net/mlxsw/qos_lib.sh | 28 ------------------- .../testing/selftests/drivers/net/mlxsw/qos_pfc.sh | 3 +-- .../testing/selftests/drivers/net/mlxsw/sch_ets.sh | 3 +-- .../selftests/drivers/net/mlxsw/sch_red_core.sh | 1 - .../selftests/drivers/net/mlxsw/sch_red_ets.sh | 2 +- .../selftests/drivers/net/mlxsw/sch_red_root.sh | 2 +- .../selftests/drivers/net/mlxsw/sch_tbf_ets.sh | 4 +-- .../selftests/drivers/net/mlxsw/sch_tbf_prio.sh | 4 +-- .../selftests/drivers/net/mlxsw/sch_tbf_root.sh | 4 +-- tools/testing/selftests/net/forwarding/lib.sh | 31 ++++++++++++++++++++++ 11 files changed, 39 insertions(+), 46 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh index 3569ff45f7d5..88162b4027c0 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh @@ -18,7 +18,6 @@ lib_dir=$(dirname $0)/../../../net/forwarding NUM_NETIFS=0 source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh -source qos_lib.sh swp=$NETIF_NO_CABLE @@ -371,7 +370,7 @@ test_tc_int_buf() tc qdisc delete dev $swp root } -bail_on_lldpad +bail_on_lldpad "configure DCB" "configure Qdiscs" trap cleanup EXIT setup_wait diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh index faa51012cdac..5ad092b9bf10 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh @@ -54,31 +54,3 @@ measure_rate() echo $ir $er return $ret } - -bail_on_lldpad() -{ - if systemctl is-active --quiet lldpad; then - - cat >/dev/stderr <<-EOF - WARNING: lldpad is running - - lldpad will likely configure DCB, and this test will - configure Qdiscs. mlxsw does not support both at the - same time, one of them is arbitrarily going to overwrite - the other. That will cause spurious failures (or, - unlikely, passes) of this test. - EOF - - if [[ -z $ALLOW_LLDPAD ]]; then - cat >/dev/stderr <<-EOF - - If you want to run the test anyway, please set - an environment variable ALLOW_LLDPAD to a - non-empty string. - EOF - exit 1 - else - return - fi - fi -} diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh index f9858e221996..42ce602d8d49 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh @@ -79,7 +79,6 @@ lib_dir=$(dirname $0)/../../../net/forwarding NUM_NETIFS=6 source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh -source qos_lib.sh _1KB=1000 _100KB=$((100 * _1KB)) @@ -393,7 +392,7 @@ test_qos_pfc() log_test "PFC" } -bail_on_lldpad +bail_on_lldpad "configure DCB" "configure Qdiscs" trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh index ceaa76b17a43..139175fd03e7 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh @@ -5,7 +5,6 @@ lib_dir=$(dirname $0)/../../../net/forwarding source $lib_dir/sch_ets_core.sh source $lib_dir/devlink_lib.sh -source qos_lib.sh ALL_TESTS=" ping_ipv4 @@ -78,5 +77,5 @@ collect_stats() done } -bail_on_lldpad +bail_on_lldpad "configure DCB" "configure Qdiscs" ets_run diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 45b41b8f3232..299e06a5808c 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -74,7 +74,6 @@ lib_dir=$(dirname $0)/../../../net/forwarding source $lib_dir/lib.sh source $lib_dir/devlink_lib.sh source mlxsw_lib.sh -source qos_lib.sh ipaddr() { diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index 0d01c7cd82a1..8ecddafa79b3 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -166,7 +166,7 @@ ecn_mirror_test() uninstall_qdisc } -bail_on_lldpad +bail_on_lldpad "configure DCB" "configure Qdiscs" trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh index 860205338e6f..159108d02895 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh @@ -73,7 +73,7 @@ red_mirror_test() uninstall_qdisc } -bail_on_lldpad +bail_on_lldpad "configure DCB" "configure Qdiscs" trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh index b9b4cdf14ceb..ecc3664376b3 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh @@ -1,11 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -source qos_lib.sh - sch_tbf_pre_hook() { - bail_on_lldpad + bail_on_lldpad "configure DCB" "configure Qdiscs" } lib_dir=$(dirname $0)/../../../net/forwarding diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh index dff9810ee04f..2e0a4efb1703 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh @@ -1,11 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -source qos_lib.sh - sch_tbf_pre_hook() { - bail_on_lldpad + bail_on_lldpad "configure DCB" "configure Qdiscs" } lib_dir=$(dirname $0)/../../../net/forwarding diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh index 75406bd7036e..6679a338dfc4 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh @@ -1,11 +1,9 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -source qos_lib.sh - sch_tbf_pre_hook() { - bail_on_lldpad + bail_on_lldpad "configure DCB" "configure Qdiscs" } lib_dir=$(dirname $0)/../../../net/forwarding diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index d47499ba81c7..efd48e1cadd2 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -1887,3 +1887,34 @@ mldv1_done_get() payload_template_expand_checksum "$hbh$icmpv6" $checksum } + +bail_on_lldpad() +{ + local reason1="$1"; shift + local reason2="$1"; shift + + if systemctl is-active --quiet lldpad; then + + cat >/dev/stderr <<-EOF + WARNING: lldpad is running + + lldpad will likely $reason1, and this test will + $reason2. Both are not supported at the same time, + one of them is arbitrarily going to overwrite the + other. That will cause spurious failures (or, unlikely, + passes) of this test. + EOF + + if [[ -z $ALLOW_LLDPAD ]]; then + cat >/dev/stderr <<-EOF + + If you want to run the test anyway, please set + an environment variable ALLOW_LLDPAD to a + non-empty string. + EOF + exit 1 + else + return + fi + fi +} -- cgit v1.2.3-70-g09d2 From b5bf7126a6a0e706ef0e6c6cd22b2ebc23bf54a2 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 18 Apr 2023 14:14:58 +0300 Subject: selftests: forwarding: introduce helper for standard ethtool counters Counters for the MAC Merge layer and preemptible MAC have standardized so far on using structured ethtool stats as opposed to the driver specific names and meanings. Benefit from that rare opportunity and introduce a helper to lib.sh for querying standardized counters, in the hope that these will take off for other uses as well. Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/forwarding/lib.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index efd48e1cadd2..36e47c9d7cca 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -787,6 +787,17 @@ ethtool_stats_get() ethtool -S $dev | grep "^ *$stat:" | head -n 1 | cut -d: -f2 } +ethtool_std_stats_get() +{ + local dev=$1; shift + local grp=$1; shift + local name=$1; shift + local src=$1; shift + + ethtool --json -S $dev --groups $grp -- --src $src | \ + jq '.[]."'"$grp"'"."'$name'"' +} + qdisc_stats_get() { local dev=$1; shift -- cgit v1.2.3-70-g09d2 From e6991384ace5c9cbbe453ad1d8998b0cdaf222f5 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 18 Apr 2023 14:14:59 +0300 Subject: selftests: forwarding: add a test for MAC Merge layer The MAC Merge layer (IEEE 802.3-2018 clause 99) does all the heavy lifting for Frame Preemption (IEEE 802.1Q-2018 clause 6.7.2), a TSN feature for minimizing latency. Preemptible traffic is different on the wire from normal traffic in incompatible ways. If we send a preemptible packet and the link partner doesn't support preemption, it will drop it as an error frame and we will never know. The MAC Merge layer has a control plane of its own, which can be manipulated (using ethtool) in order to negotiate this capability with the link partner (through LLDP). Actually the TLV format for LLDP solves this problem only partly, because both partners only advertise: - if they support preemption (RX and TX) - if they have enabled preemption (TX) so we cannot tell the link partner what to do - we cannot force it to enable reception of our preemptible packets. That is fully solved by the verification feature, where the local device generates some small probe frames which look like preemptible frames with no useful content, and the link partner is obliged to respond to them if it supports the standard. If the verification times out, we know that preemption isn't active in our TX direction on the link. Having clarified the definition, this selftest exercises the manual (ethtool) configuration path of 2 link partners (with and without verification), and the LLDP code path, using the openlldp project. The test also verifies the TX activity of the MAC Merge layer by sending traffic through a traffic class configured as preemptible (using mqprio). There isn't a good way to make this really portable (user space cannot find out how many traffic classes there are for a device), but I chose num_tc 4 here, that should work reasonably well. I also know that some devices (stmmac) only permit TXQ0 to be preemptible, so this is why PREEMPTIBLE_PRIO was strategically chosen as 0. Even if other hardware is more configurable, this test should cover the baseline. This is not really a "forwarding" selftest, but I put it near the other "ethtool" selftests. $ ./ethtool_mm.sh eno0 swp0 TEST: Manual configuration with verification: eno0 to swp0 [ OK ] TEST: Manual configuration with verification: swp0 to eno0 [ OK ] TEST: Manual configuration without verification: eno0 to swp0 [ OK ] TEST: Manual configuration without verification: swp0 to eno0 [ OK ] TEST: Manual configuration with failed verification: eno0 to swp0 [ OK ] TEST: Manual configuration with failed verification: swp0 to eno0 [ OK ] TEST: LLDP [ OK ] Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- tools/testing/selftests/net/forwarding/Makefile | 1 + .../testing/selftests/net/forwarding/ethtool_mm.sh | 288 +++++++++++++++++++++ tools/testing/selftests/net/forwarding/lib.sh | 18 ++ 3 files changed, 307 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/ethtool_mm.sh (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile index 236f6b796a52..a474c60fe348 100644 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -15,6 +15,7 @@ TEST_PROGS = bridge_igmp.sh \ custom_multipath_hash.sh \ dual_vxlan_bridge.sh \ ethtool_extended_state.sh \ + ethtool_mm.sh \ ethtool.sh \ gre_custom_multipath_hash.sh \ gre_inner_v4_multipath.sh \ diff --git a/tools/testing/selftests/net/forwarding/ethtool_mm.sh b/tools/testing/selftests/net/forwarding/ethtool_mm.sh new file mode 100755 index 000000000000..c580ad623848 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ethtool_mm.sh @@ -0,0 +1,288 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + manual_with_verification_h1_to_h2 + manual_with_verification_h2_to_h1 + manual_without_verification_h1_to_h2 + manual_without_verification_h2_to_h1 + manual_failed_verification_h1_to_h2 + manual_failed_verification_h2_to_h1 + lldp +" + +NUM_NETIFS=2 +REQUIRE_MZ=no +PREEMPTIBLE_PRIO=0 +source lib.sh + +traffic_test() +{ + local if=$1; shift + local src=$1; shift + local num_pkts=10000 + local before= + local after= + local delta= + + before=$(ethtool_std_stats_get $if "eth-mac" "FramesTransmittedOK" $src) + + $MZ $if -q -c $num_pkts -p 64 -b bcast -t ip -R $PREEMPTIBLE_PRIO + + after=$(ethtool_std_stats_get $if "eth-mac" "FramesTransmittedOK" $src) + + delta=$((after - before)) + + # Allow an extra 1% tolerance for random packets sent by the stack + [ $delta -ge $num_pkts ] && [ $delta -le $((num_pkts + 100)) ] +} + +manual_with_verification() +{ + local tx=$1; shift + local rx=$1; shift + + RET=0 + + # It isn't completely clear from IEEE 802.3-2018 Figure 99-5: Transmit + # Processing state diagram whether the "send_r" variable (send response + # to verification frame) should be taken into consideration while the + # MAC Merge TX direction is disabled. That being said, at least the + # NXP ENETC does not, and requires tx-enabled on in order to respond to + # the link partner's verification frames. + ethtool --set-mm $rx tx-enabled on + ethtool --set-mm $tx verify-enabled on tx-enabled on + + # Wait for verification to finish + sleep 1 + + ethtool --json --show-mm $tx | jq -r '.[]."verify-status"' | \ + grep -q 'SUCCEEDED' + check_err "$?" "Verification did not succeed" + + ethtool --json --show-mm $tx | jq -r '.[]."tx-active"' | grep -q 'true' + check_err "$?" "pMAC TX is not active" + + traffic_test $tx "pmac" + check_err "$?" "Traffic did not get sent through $tx's pMAC" + + ethtool --set-mm $tx verify-enabled off tx-enabled off + ethtool --set-mm $rx tx-enabled off + + log_test "Manual configuration with verification: $tx to $rx" +} + +manual_with_verification_h1_to_h2() +{ + manual_with_verification $h1 $h2 +} + +manual_with_verification_h2_to_h1() +{ + manual_with_verification $h2 $h1 +} + +manual_without_verification() +{ + local tx=$1; shift + local rx=$1; shift + + RET=0 + + ethtool --set-mm $tx verify-enabled off tx-enabled on + + ethtool --json --show-mm $tx | jq -r '.[]."verify-status"' | \ + grep -q 'DISABLED' + check_err "$?" "Verification is not disabled" + + ethtool --json --show-mm $tx | jq -r '.[]."tx-active"' | grep -q 'true' + check_err "$?" "pMAC TX is not active" + + traffic_test $tx "pmac" + check_err "$?" "Traffic did not get sent through $tx's pMAC" + + ethtool --set-mm $tx verify-enabled off tx-enabled off + + log_test "Manual configuration without verification: $tx to $rx" +} + +manual_without_verification_h1_to_h2() +{ + manual_without_verification $h1 $h2 +} + +manual_without_verification_h2_to_h1() +{ + manual_without_verification $h2 $h1 +} + +manual_failed_verification() +{ + local tx=$1; shift + local rx=$1; shift + + RET=0 + + ethtool --set-mm $rx pmac-enabled off + ethtool --set-mm $tx verify-enabled on tx-enabled on + + # Wait for verification to time out + sleep 1 + + ethtool --json --show-mm $tx | jq -r '.[]."verify-status"' | \ + grep -q 'SUCCEEDED' + check_fail "$?" "Verification succeeded when it shouldn't have" + + ethtool --json --show-mm $tx | jq -r '.[]."tx-active"' | grep -q 'true' + check_fail "$?" "pMAC TX is active when it shouldn't have" + + traffic_test $tx "emac" + check_err "$?" "Traffic did not get sent through $tx's eMAC" + + ethtool --set-mm $tx verify-enabled off tx-enabled off + ethtool --set-mm $rx pmac-enabled on + + log_test "Manual configuration with failed verification: $tx to $rx" +} + +manual_failed_verification_h1_to_h2() +{ + manual_failed_verification $h1 $h2 +} + +manual_failed_verification_h2_to_h1() +{ + manual_failed_verification $h2 $h1 +} + +lldp_change_add_frag_size() +{ + local add_frag_size=$1 + + lldptool -T -i $h1 -V addEthCaps addFragSize=$add_frag_size >/dev/null + # Wait for TLVs to be received + sleep 2 + lldptool -i $h2 -t -n -V addEthCaps | \ + grep -q "Additional fragment size: $add_frag_size" +} + +lldp() +{ + RET=0 + + systemctl start lldpad + + # Configure the interfaces to receive and transmit LLDPDUs + lldptool -L -i $h1 adminStatus=rxtx >/dev/null + lldptool -L -i $h2 adminStatus=rxtx >/dev/null + + # Enable the transmission of Additional Ethernet Capabilities TLV + lldptool -T -i $h1 -V addEthCaps enableTx=yes >/dev/null + lldptool -T -i $h2 -V addEthCaps enableTx=yes >/dev/null + + # Wait for TLVs to be received + sleep 2 + + lldptool -i $h1 -t -n -V addEthCaps | \ + grep -q "Preemption capability active" + check_err "$?" "$h1 pMAC TX is not active" + + lldptool -i $h2 -t -n -V addEthCaps | \ + grep -q "Preemption capability active" + check_err "$?" "$h2 pMAC TX is not active" + + lldp_change_add_frag_size 3 + check_err "$?" "addFragSize 3" + + lldp_change_add_frag_size 2 + check_err "$?" "addFragSize 2" + + lldp_change_add_frag_size 1 + check_err "$?" "addFragSize 1" + + lldp_change_add_frag_size 0 + check_err "$?" "addFragSize 0" + + traffic_test $h1 "pmac" + check_err "$?" "Traffic did not get sent through $h1's pMAC" + + traffic_test $h2 "pmac" + check_err "$?" "Traffic did not get sent through $h2's pMAC" + + systemctl stop lldpad + + log_test "LLDP" +} + +h1_create() +{ + ip link set dev $h1 up + + tc qdisc add dev $h1 root mqprio num_tc 4 map 0 1 2 3 \ + queues 1@0 1@1 1@2 1@3 \ + fp P E E E \ + hw 1 + + ethtool --set-mm $h1 pmac-enabled on tx-enabled off verify-enabled off +} + +h2_create() +{ + ip link set dev $h2 up + + ethtool --set-mm $h2 pmac-enabled on tx-enabled off verify-enabled off + + tc qdisc add dev $h2 root mqprio num_tc 4 map 0 1 2 3 \ + queues 1@0 1@1 1@2 1@3 \ + fp P E E E \ + hw 1 +} + +h1_destroy() +{ + ethtool --set-mm $h1 pmac-enabled off tx-enabled off verify-enabled off + + tc qdisc del dev $h1 root + + ip link set dev $h1 down +} + +h2_destroy() +{ + tc qdisc del dev $h2 root + + ethtool --set-mm $h2 pmac-enabled off tx-enabled off verify-enabled off + + ip link set dev $h2 down +} + +setup_prepare() +{ + check_ethtool_mm_support + check_tc_fp_support + require_command lldptool + bail_on_lldpad "autoconfigure the MAC Merge layer" "configure it manually" + + h1=${NETIFS[p1]} + h2=${NETIFS[p2]} + + h1_create + h2_create +} + +cleanup() +{ + pre_cleanup + + h2_destroy + h1_destroy +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 36e47c9d7cca..057c3d0ad620 100755 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -120,6 +120,15 @@ check_tc_action_hw_stats_support() fi } +check_tc_fp_support() +{ + tc qdisc add dev lo mqprio help 2>&1 | grep -q "fp " + if [[ $? -ne 0 ]]; then + echo "SKIP: iproute2 too old; tc is missing frame preemption support" + exit $ksft_skip + fi +} + check_ethtool_lanes_support() { ethtool --help 2>&1| grep lanes &> /dev/null @@ -129,6 +138,15 @@ check_ethtool_lanes_support() fi } +check_ethtool_mm_support() +{ + ethtool --help 2>&1| grep -- '--show-mm' &> /dev/null + if [[ $? -ne 0 ]]; then + echo "SKIP: ethtool too old; it is missing MAC Merge layer support" + exit $ksft_skip + fi +} + check_locked_port_support() { if ! bridge -d link show | grep -q " locked"; then -- cgit v1.2.3-70-g09d2 From 7648ac72dcd7e22ac1fa5e573e536592773831dc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 19 Apr 2023 18:35:00 +0300 Subject: selftests: net: Add bridge neighbor suppression test Add test cases for bridge neighbor suppression, testing both per-port and per-{Port, VLAN} neighbor suppression with both ARP and NS packets. Example truncated output: # ./test_bridge_neigh_suppress.sh [...] Tests passed: 148 Tests failed: 0 Signed-off-by: Ido Schimmel Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 1 + .../selftests/net/test_bridge_neigh_suppress.sh | 862 +++++++++++++++++++++ 2 files changed, 863 insertions(+) create mode 100755 tools/testing/selftests/net/test_bridge_neigh_suppress.sh (limited to 'tools/testing') diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 1de34ec99290..c12df57d5539 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -83,6 +83,7 @@ TEST_GEN_FILES += nat6to4.o TEST_GEN_FILES += ip_local_port_range TEST_GEN_FILES += bind_wildcard TEST_PROGS += test_vxlan_mdb.sh +TEST_PROGS += test_bridge_neigh_suppress.sh TEST_FILES := settings diff --git a/tools/testing/selftests/net/test_bridge_neigh_suppress.sh b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh new file mode 100755 index 000000000000..d80f2cd87614 --- /dev/null +++ b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh @@ -0,0 +1,862 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# This test is for checking bridge neighbor suppression functionality. The +# topology consists of two bridges (VTEPs) connected using VXLAN. A single +# host is connected to each bridge over multiple VLANs. The test checks that +# ARP/NS messages from the first host are suppressed on the VXLAN port when +# should. +# +# +-----------------------+ +------------------------+ +# | h1 | | h2 | +# | | | | +# | + eth0.10 | | + eth0.10 | +# | | 192.0.2.1/28 | | | 192.0.2.2/28 | +# | | 2001:db8:1::1/64 | | | 2001:db8:1::2/64 | +# | | | | | | +# | | + eth0.20 | | | + eth0.20 | +# | \ | 192.0.2.17/28 | | \ | 192.0.2.18/28 | +# | \ | 2001:db8:2::1/64 | | \ | 2001:db8:2::2/64 | +# | \| | | \| | +# | + eth0 | | + eth0 | +# +----|------------------+ +----|-------------------+ +# | | +# | | +# +----|-------------------------------+ +----|-------------------------------+ +# | + swp1 + vx0 | | + swp1 + vx0 | +# | | | | | | | | +# | | br0 | | | | | | +# | +------------+-----------+ | | +------------+-----------+ | +# | | | | | | +# | | | | | | +# | +---+---+ | | +---+---+ | +# | | | | | | | | +# | | | | | | | | +# | + + | | + + | +# | br0.10 br0.20 | | br0.10 br0.20 | +# | | | | +# | 192.0.2.33 | | 192.0.2.34 | +# | + lo | | + lo | +# | | | | +# | | | | +# | 192.0.2.49/28 | | 192.0.2.50/28 | +# | veth0 +-------+ veth0 | +# | | | | +# | sw1 | | sw2 | +# +------------------------------------+ +------------------------------------+ + +ret=0 +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 + +# All tests in this script. Can be overridden with -t option. +TESTS=" + neigh_suppress_arp + neigh_suppress_ns + neigh_vlan_suppress_arp + neigh_vlan_suppress_ns +" +VERBOSE=0 +PAUSE_ON_FAIL=no +PAUSE=no + +################################################################################ +# Utilities + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + if [ ${rc} -eq ${expected} ]; then + printf "TEST: %-60s [ OK ]\n" "${msg}" + nsuccess=$((nsuccess+1)) + else + ret=1 + nfail=$((nfail+1)) + printf "TEST: %-60s [FAIL]\n" "${msg}" + if [ "$VERBOSE" = "1" ]; then + echo " rc=$rc, expected $expected" + fi + + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi + + if [ "${PAUSE}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + + [ "$VERBOSE" = "1" ] && echo +} + +run_cmd() +{ + local cmd="$1" + local out + local stderr="2>/dev/null" + + if [ "$VERBOSE" = "1" ]; then + printf "COMMAND: $cmd\n" + stderr= + fi + + out=$(eval $cmd $stderr) + rc=$? + if [ "$VERBOSE" = "1" -a -n "$out" ]; then + echo " $out" + fi + + return $rc +} + +tc_check_packets() +{ + local ns=$1; shift + local id=$1; shift + local handle=$1; shift + local count=$1; shift + local pkts + + sleep 0.1 + pkts=$(tc -n $ns -j -s filter show $id \ + | jq ".[] | select(.options.handle == $handle) | \ + .options.actions[0].stats.packets") + [[ $pkts == $count ]] +} + +################################################################################ +# Setup + +setup_topo_ns() +{ + local ns=$1; shift + + ip netns add $ns + ip -n $ns link set dev lo up + + ip netns exec $ns sysctl -qw net.ipv6.conf.all.keep_addr_on_down=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.default.ignore_routes_with_linkdown=1 + ip netns exec $ns sysctl -qw net.ipv6.conf.all.accept_dad=0 + ip netns exec $ns sysctl -qw net.ipv6.conf.default.accept_dad=0 +} + +setup_topo() +{ + local ns + + for ns in h1 h2 sw1 sw2; do + setup_topo_ns $ns + done + + ip link add name veth0 type veth peer name veth1 + ip link set dev veth0 netns h1 name eth0 + ip link set dev veth1 netns sw1 name swp1 + + ip link add name veth0 type veth peer name veth1 + ip link set dev veth0 netns sw1 name veth0 + ip link set dev veth1 netns sw2 name veth0 + + ip link add name veth0 type veth peer name veth1 + ip link set dev veth0 netns h2 name eth0 + ip link set dev veth1 netns sw2 name swp1 +} + +setup_host_common() +{ + local ns=$1; shift + local v4addr1=$1; shift + local v4addr2=$1; shift + local v6addr1=$1; shift + local v6addr2=$1; shift + + ip -n $ns link set dev eth0 up + ip -n $ns link add link eth0 name eth0.10 up type vlan id 10 + ip -n $ns link add link eth0 name eth0.20 up type vlan id 20 + + ip -n $ns address add $v4addr1 dev eth0.10 + ip -n $ns address add $v4addr2 dev eth0.20 + ip -n $ns address add $v6addr1 dev eth0.10 + ip -n $ns address add $v6addr2 dev eth0.20 +} + +setup_h1() +{ + local ns=h1 + local v4addr1=192.0.2.1/28 + local v4addr2=192.0.2.17/28 + local v6addr1=2001:db8:1::1/64 + local v6addr2=2001:db8:2::1/64 + + setup_host_common $ns $v4addr1 $v4addr2 $v6addr1 $v6addr2 +} + +setup_h2() +{ + local ns=h2 + local v4addr1=192.0.2.2/28 + local v4addr2=192.0.2.18/28 + local v6addr1=2001:db8:1::2/64 + local v6addr2=2001:db8:2::2/64 + + setup_host_common $ns $v4addr1 $v4addr2 $v6addr1 $v6addr2 +} + +setup_sw_common() +{ + local ns=$1; shift + local local_addr=$1; shift + local remote_addr=$1; shift + local veth_addr=$1; shift + local gw_addr=$1; shift + + ip -n $ns address add $local_addr/32 dev lo + + ip -n $ns link set dev veth0 up + ip -n $ns address add $veth_addr/28 dev veth0 + ip -n $ns route add default via $gw_addr + + ip -n $ns link add name br0 up type bridge vlan_filtering 1 \ + vlan_default_pvid 0 mcast_snooping 0 + + ip -n $ns link add link br0 name br0.10 up type vlan id 10 + bridge -n $ns vlan add vid 10 dev br0 self + + ip -n $ns link add link br0 name br0.20 up type vlan id 20 + bridge -n $ns vlan add vid 20 dev br0 self + + ip -n $ns link set dev swp1 up master br0 + bridge -n $ns vlan add vid 10 dev swp1 + bridge -n $ns vlan add vid 20 dev swp1 + + ip -n $ns link add name vx0 up master br0 type vxlan \ + local $local_addr dstport 4789 nolearning external + bridge -n $ns fdb add 00:00:00:00:00:00 dev vx0 self static \ + dst $remote_addr src_vni 10010 + bridge -n $ns fdb add 00:00:00:00:00:00 dev vx0 self static \ + dst $remote_addr src_vni 10020 + bridge -n $ns link set dev vx0 vlan_tunnel on learning off + + bridge -n $ns vlan add vid 10 dev vx0 + bridge -n $ns vlan add vid 10 dev vx0 tunnel_info id 10010 + + bridge -n $ns vlan add vid 20 dev vx0 + bridge -n $ns vlan add vid 20 dev vx0 tunnel_info id 10020 +} + +setup_sw1() +{ + local ns=sw1 + local local_addr=192.0.2.33 + local remote_addr=192.0.2.34 + local veth_addr=192.0.2.49 + local gw_addr=192.0.2.50 + + setup_sw_common $ns $local_addr $remote_addr $veth_addr $gw_addr +} + +setup_sw2() +{ + local ns=sw2 + local local_addr=192.0.2.34 + local remote_addr=192.0.2.33 + local veth_addr=192.0.2.50 + local gw_addr=192.0.2.49 + + setup_sw_common $ns $local_addr $remote_addr $veth_addr $gw_addr +} + +setup() +{ + set -e + + setup_topo + setup_h1 + setup_h2 + setup_sw1 + setup_sw2 + + sleep 5 + + set +e +} + +cleanup() +{ + local ns + + for ns in h1 h2 sw1 sw2; do + ip netns del $ns &> /dev/null + done +} + +################################################################################ +# Tests + +neigh_suppress_arp_common() +{ + local vid=$1; shift + local sip=$1; shift + local tip=$1; shift + local h2_mac + + echo + echo "Per-port ARP suppression - VLAN $vid" + echo "----------------------------------" + + run_cmd "tc -n sw1 qdisc replace dev vx0 clsact" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip arp_sip $sip arp_op request action pass" + + # Initial state - check that ARP requests are not suppressed and that + # ARP replies are received. + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "ARP suppression" + + # Enable neighbor suppression and check that nothing changes compared + # to the initial state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "ARP suppression" + + # Install an FDB entry for the remote host and check that nothing + # changes compared to the initial state. + h2_mac=$(ip -n h2 -j -p link show eth0.$vid | jq -r '.[]["address"]') + run_cmd "bridge -n sw1 fdb replace $h2_mac dev vx0 master static vlan $vid" + log_test $? 0 "FDB entry installation" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "ARP suppression" + + # Install a neighbor on the matching SVI interface and check that ARP + # requests are suppressed. + run_cmd "ip -n sw1 neigh replace $tip lladdr $h2_mac nud permanent dev br0.$vid" + log_test $? 0 "Neighbor entry installation" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "ARP suppression" + + # Take the second host down and check that ARP requests are suppressed + # and that ARP replies are received. + run_cmd "ip -n h2 link set dev eth0.$vid down" + log_test $? 0 "H2 down" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "ARP suppression" + + run_cmd "ip -n h2 link set dev eth0.$vid up" + log_test $? 0 "H2 up" + + # Disable neighbor suppression and check that ARP requests are no + # longer suppressed. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 0 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 4 + log_test $? 0 "ARP suppression" + + # Take the second host down and check that ARP requests are not + # suppressed and that ARP replies are not received. + run_cmd "ip -n h2 link set dev eth0.$vid down" + log_test $? 0 "H2 down" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip -I eth0.$vid $tip" + log_test $? 1 "arping" + tc_check_packets sw1 "dev vx0 egress" 101 5 + log_test $? 0 "ARP suppression" +} + +neigh_suppress_arp() +{ + local vid=10 + local sip=192.0.2.1 + local tip=192.0.2.2 + + neigh_suppress_arp_common $vid $sip $tip + + vid=20 + sip=192.0.2.17 + tip=192.0.2.18 + neigh_suppress_arp_common $vid $sip $tip +} + +neigh_suppress_ns_common() +{ + local vid=$1; shift + local saddr=$1; shift + local daddr=$1; shift + local maddr=$1; shift + local h2_mac + + echo + echo "Per-port NS suppression - VLAN $vid" + echo "---------------------------------" + + run_cmd "tc -n sw1 qdisc replace dev vx0 clsact" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr type 135 code 0 action pass" + + # Initial state - check that NS messages are not suppressed and that ND + # messages are received. + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "NS suppression" + + # Enable neighbor suppression and check that nothing changes compared + # to the initial state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "NS suppression" + + # Install an FDB entry for the remote host and check that nothing + # changes compared to the initial state. + h2_mac=$(ip -n h2 -j -p link show eth0.$vid | jq -r '.[]["address"]') + run_cmd "bridge -n sw1 fdb replace $h2_mac dev vx0 master static vlan $vid" + log_test $? 0 "FDB entry installation" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "NS suppression" + + # Install a neighbor on the matching SVI interface and check that NS + # messages are suppressed. + run_cmd "ip -n sw1 neigh replace $daddr lladdr $h2_mac nud permanent dev br0.$vid" + log_test $? 0 "Neighbor entry installation" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "NS suppression" + + # Take the second host down and check that NS messages are suppressed + # and that ND messages are received. + run_cmd "ip -n h2 link set dev eth0.$vid down" + log_test $? 0 "H2 down" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 3 + log_test $? 0 "NS suppression" + + run_cmd "ip -n h2 link set dev eth0.$vid up" + log_test $? 0 "H2 up" + + # Disable neighbor suppression and check that NS messages are no longer + # suppressed. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 0 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 4 + log_test $? 0 "NS suppression" + + # Take the second host down and check that NS messages are not + # suppressed and that ND messages are not received. + run_cmd "ip -n h2 link set dev eth0.$vid down" + log_test $? 0 "H2 down" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr -w 5000 $daddr eth0.$vid" + log_test $? 2 "ndisc6" + tc_check_packets sw1 "dev vx0 egress" 101 5 + log_test $? 0 "NS suppression" +} + +neigh_suppress_ns() +{ + local vid=10 + local saddr=2001:db8:1::1 + local daddr=2001:db8:1::2 + local maddr=ff02::1:ff00:2 + + neigh_suppress_ns_common $vid $saddr $daddr $maddr + + vid=20 + saddr=2001:db8:2::1 + daddr=2001:db8:2::2 + maddr=ff02::1:ff00:2 + + neigh_suppress_ns_common $vid $saddr $daddr $maddr +} + +neigh_vlan_suppress_arp() +{ + local vid1=10 + local vid2=20 + local sip1=192.0.2.1 + local sip2=192.0.2.17 + local tip1=192.0.2.2 + local tip2=192.0.2.18 + local h2_mac1 + local h2_mac2 + + echo + echo "Per-{Port, VLAN} ARP suppression" + echo "--------------------------------" + + run_cmd "tc -n sw1 qdisc replace dev vx0 clsact" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto 0x0806 flower indev swp1 arp_tip $tip1 arp_sip $sip1 arp_op request action pass" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 102 proto 0x0806 flower indev swp1 arp_tip $tip2 arp_sip $sip2 arp_op request action pass" + + h2_mac1=$(ip -n h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]') + h2_mac2=$(ip -n h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]') + run_cmd "bridge -n sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1" + run_cmd "bridge -n sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2" + run_cmd "ip -n sw1 neigh replace $tip1 lladdr $h2_mac1 nud permanent dev br0.$vid1" + run_cmd "ip -n sw1 neigh replace $tip2 lladdr $h2_mac2 nud permanent dev br0.$vid2" + + # Enable per-{Port, VLAN} neighbor suppression and check that ARP + # requests are not suppressed and that ARP replies are received. + run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\"" + log_test $? 0 "\"neigh_vlan_suppress\" is on" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 1 + log_test $? 0 "ARP suppression (VLAN $vid2)" + + # Enable neighbor suppression on VLAN 10 and check that only on this + # VLAN ARP requests are suppressed. + run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on (VLAN $vid1)" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid2)" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 2 + log_test $? 0 "ARP suppression (VLAN $vid2)" + + # Enable neighbor suppression on the port and check that it has no + # effect compared to previous state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 3 + log_test $? 0 "ARP suppression (VLAN $vid2)" + + # Disable neighbor suppression on the port and check that it has no + # effect compared to previous state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 4 + log_test $? 0 "ARP suppression (VLAN $vid2)" + + # Disable neighbor suppression on VLAN 10 and check that ARP requests + # are no longer suppressed on this VLAN. + run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid1)" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 5 + log_test $? 0 "ARP suppression (VLAN $vid2)" + + # Disable per-{Port, VLAN} neighbor suppression, enable neighbor + # suppression on the port and check that on both VLANs ARP requests are + # suppressed. + run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\"" + log_test $? 0 "\"neigh_vlan_suppress\" is off" + + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip1 -I eth0.$vid1 $tip1" + log_test $? 0 "arping (VLAN $vid1)" + run_cmd "ip netns exec h1 arping -q -b -c 1 -w 5 -s $sip2 -I eth0.$vid2 $tip2" + log_test $? 0 "arping (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "ARP suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 5 + log_test $? 0 "ARP suppression (VLAN $vid2)" +} + +neigh_vlan_suppress_ns() +{ + local vid1=10 + local vid2=20 + local saddr1=2001:db8:1::1 + local saddr2=2001:db8:2::1 + local daddr1=2001:db8:1::2 + local daddr2=2001:db8:2::2 + local maddr=ff02::1:ff00:2 + local h2_mac1 + local h2_mac2 + + echo + echo "Per-{Port, VLAN} NS suppression" + echo "-------------------------------" + + run_cmd "tc -n sw1 qdisc replace dev vx0 clsact" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 101 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr1 type 135 code 0 action pass" + run_cmd "tc -n sw1 filter replace dev vx0 egress pref 1 handle 102 proto ipv6 flower indev swp1 ip_proto icmpv6 dst_ip $maddr src_ip $saddr2 type 135 code 0 action pass" + + h2_mac1=$(ip -n h2 -j -p link show eth0.$vid1 | jq -r '.[]["address"]') + h2_mac2=$(ip -n h2 -j -p link show eth0.$vid2 | jq -r '.[]["address"]') + run_cmd "bridge -n sw1 fdb replace $h2_mac1 dev vx0 master static vlan $vid1" + run_cmd "bridge -n sw1 fdb replace $h2_mac2 dev vx0 master static vlan $vid2" + run_cmd "ip -n sw1 neigh replace $daddr1 lladdr $h2_mac1 nud permanent dev br0.$vid1" + run_cmd "ip -n sw1 neigh replace $daddr2 lladdr $h2_mac2 nud permanent dev br0.$vid2" + + # Enable per-{Port, VLAN} neighbor suppression and check that NS + # messages are not suppressed and that ND messages are received. + run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress on\"" + log_test $? 0 "\"neigh_vlan_suppress\" is on" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 1 + log_test $? 0 "NS suppression (VLAN $vid2)" + + # Enable neighbor suppression on VLAN 10 and check that only on this + # VLAN NS messages are suppressed. + run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on (VLAN $vid1)" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid2 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid2)" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 2 + log_test $? 0 "NS suppression (VLAN $vid2)" + + # Enable neighbor suppression on the port and check that it has no + # effect compared to previous state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 3 + log_test $? 0 "NS suppression (VLAN $vid2)" + + # Disable neighbor suppression on the port and check that it has no + # effect compared to previous state. + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 1 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 4 + log_test $? 0 "NS suppression (VLAN $vid2)" + + # Disable neighbor suppression on VLAN 10 and check that NS messages + # are no longer suppressed on this VLAN. + run_cmd "bridge -n sw1 vlan set vid $vid1 dev vx0 neigh_suppress off" + run_cmd "bridge -n sw1 -d vlan show dev vx0 vid $vid1 | grep \"neigh_suppress off\"" + log_test $? 0 "\"neigh_suppress\" is off (VLAN $vid1)" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 5 + log_test $? 0 "NS suppression (VLAN $vid2)" + + # Disable per-{Port, VLAN} neighbor suppression, enable neighbor + # suppression on the port and check that on both VLANs NS messages are + # suppressed. + run_cmd "bridge -n sw1 link set dev vx0 neigh_vlan_suppress off" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_vlan_suppress off\"" + log_test $? 0 "\"neigh_vlan_suppress\" is off" + + run_cmd "bridge -n sw1 link set dev vx0 neigh_suppress on" + run_cmd "bridge -n sw1 -d link show dev vx0 | grep \"neigh_suppress on\"" + log_test $? 0 "\"neigh_suppress\" is on" + + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr1 -w 5000 $daddr1 eth0.$vid1" + log_test $? 0 "ndisc6 (VLAN $vid1)" + run_cmd "ip netns exec h1 ndisc6 -q -r 1 -s $saddr2 -w 5000 $daddr2 eth0.$vid2" + log_test $? 0 "ndisc6 (VLAN $vid2)" + + tc_check_packets sw1 "dev vx0 egress" 101 2 + log_test $? 0 "NS suppression (VLAN $vid1)" + tc_check_packets sw1 "dev vx0 egress" 102 5 + log_test $? 0 "NS suppression (VLAN $vid2)" +} + +################################################################################ +# Usage + +usage() +{ + cat < Test(s) to run (default: all) + (options: $TESTS) + -p Pause on fail + -P Pause after each test before cleanup + -v Verbose mode (show commands and output) +EOF +} + +################################################################################ +# Main + +trap cleanup EXIT + +while getopts ":t:pPvh" opt; do + case $opt in + t) TESTS=$OPTARG;; + p) PAUSE_ON_FAIL=yes;; + P) PAUSE=yes;; + v) VERBOSE=$(($VERBOSE + 1));; + h) usage; exit 0;; + *) usage; exit 1;; + esac +done + +# Make sure we don't pause twice. +[ "${PAUSE}" = "yes" ] && PAUSE_ON_FAIL=no + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit $ksft_skip; +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v bridge)" ]; then + echo "SKIP: Could not run test without bridge tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v tc)" ]; then + echo "SKIP: Could not run test without tc tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v arping)" ]; then + echo "SKIP: Could not run test without arping tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v ndisc6)" ]; then + echo "SKIP: Could not run test without ndisc6 tool" + exit $ksft_skip +fi + +if [ ! -x "$(command -v jq)" ]; then + echo "SKIP: Could not run test without jq tool" + exit $ksft_skip +fi + +bridge link help 2>&1 | grep -q "neigh_vlan_suppress" +if [ $? -ne 0 ]; then + echo "SKIP: iproute2 bridge too old, missing per-VLAN neighbor suppression support" + exit $ksft_skip +fi + +# Start clean. +cleanup + +for t in $TESTS +do + setup; $t; cleanup; +done + +if [ "$TESTS" != "none" ]; then + printf "\nTests passed: %3d\n" ${nsuccess} + printf "Tests failed: %3d\n" ${nfail} +fi + +exit $ret -- cgit v1.2.3-70-g09d2 From 4ab07209d5cc8cb6d2a5324c07b3efc3b2fde494 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Fri, 21 Apr 2023 00:44:31 -0700 Subject: bpf: Fix bpf_refcount_acquire's refcount_t address calculation When calculating the address of the refcount_t struct within a local kptr, bpf_refcount_acquire_impl should add refcount_off bytes to the address of the local kptr. Due to some missing parens, the function is incorrectly adding sizeof(refcount_t) * refcount_off bytes. This patch fixes the calculation. Due to the incorrect calculation, bpf_refcount_acquire_impl was trying to refcount_inc some memory well past the end of local kptrs, resulting in kasan and refcount complaints, as reported in [0]. In that thread, Florian and Eduard discovered that bpf selftests written in the new style - with __success and an expected __retval, specifically - were not actually being run. As a result, selftests added in bpf_refcount series weren't really exercising this behavior, and thus didn't unearth the bug. With this fixed behavior it's safe to revert commit 7c4b96c00043 ("selftests/bpf: disable program test run for progs/refcounted_kptr.c"), this patch does so. [0] https://lore.kernel.org/bpf/ZEEp+j22imoN6rn9@strlen.de/ Fixes: 7c50b1cb76ac ("bpf: Add bpf_refcount_acquire kfunc") Reported-by: Florian Westphal Reported-by: Eduard Zingerman Signed-off-by: Dave Marchevsky Signed-off-by: Daniel Borkmann Tested-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20230421074431.3548349-1-davemarchevsky@fb.com --- kernel/bpf/helpers.c | 2 +- tools/testing/selftests/bpf/progs/refcounted_kptr.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 00e5fb0682ac..8d368fa353f9 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1925,7 +1925,7 @@ __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta /* Could just cast directly to refcount_t *, but need some code using * bpf_refcount type so that it is emitted in vmlinux BTF */ - ref = (struct bpf_refcount *)p__refcounted_kptr + meta->record->refcount_off; + ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); refcount_inc((refcount_t *)ref); return (void *)p__refcounted_kptr; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index b6b2d4f97b19..1d348a225140 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -219,7 +219,7 @@ static long __read_from_unstash(int idx) #define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ SEC("tc") \ __description(desc) \ -__success /* __retval(579) temporarily disabled */ \ +__success __retval(579) \ long insert_and_remove_tree_##rem_tree##_list_##rem_list(void *ctx) \ { \ long err, tree_data, list_data; \ @@ -258,7 +258,7 @@ INSERT_READ_BOTH(false, true, "insert_read_both: remove from list"); #define INSERT_READ_BOTH(rem_tree, rem_list, desc) \ SEC("tc") \ __description(desc) \ -__success /* __retval(579) temporarily disabled */ \ +__success __retval(579) \ long insert_and_remove_lf_tree_##rem_tree##_list_##rem_list(void *ctx) \ { \ long err, tree_data, list_data; \ @@ -296,7 +296,7 @@ INSERT_READ_BOTH(false, true, "insert_read_both_list_first: remove from list"); #define INSERT_DOUBLE_READ_AND_DEL(read_fn, read_root, desc) \ SEC("tc") \ __description(desc) \ -__success /* temporarily __retval(-1) disabled */ \ +__success __retval(-1) \ long insert_double_##read_fn##_and_del_##read_root(void *ctx) \ { \ long err, list_data; \ @@ -329,7 +329,7 @@ INSERT_DOUBLE_READ_AND_DEL(__read_from_list, head, "insert_double_del: 2x read-a #define INSERT_STASH_READ(rem_tree, desc) \ SEC("tc") \ __description(desc) \ -__success /* __retval(84) temporarily disabled */ \ +__success __retval(84) \ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ { \ long err, tree_data, map_data; \ -- cgit v1.2.3-70-g09d2 From 02e93e0475df21a1091a9833c52c0e241586573a Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Fri, 21 Apr 2023 08:22:08 +0200 Subject: selftests/xsk: Put MAP_HUGE_2MB in correct argument Put the flag MAP_HUGE_2MB in the correct flags argument instead of the wrong offset argument. Fixes: 2ddade322925 ("selftests/xsk: Fix munmap for hugepage allocated umem") Reported-by: Kal Cutter Conley Signed-off-by: Magnus Karlsson Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230421062208.3772-1-magnus.karlsson@gmail.com --- tools/testing/selftests/bpf/xskxceiver.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index a59d04118842..f144d0604ddf 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -1287,19 +1287,16 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; LIBBPF_OPTS(bpf_xdp_query_opts, opts); - off_t mmap_offset = 0; void *bufs; int ret; - if (ifobject->umem->unaligned_mode) { - mmap_flags |= MAP_HUGETLB; - mmap_offset = MAP_HUGE_2MB; - } + if (ifobject->umem->unaligned_mode) + mmap_flags |= MAP_HUGETLB | MAP_HUGE_2MB; if (ifobject->shared_umem) umem_sz *= 2; - bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, mmap_offset); + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); if (bufs == MAP_FAILED) exit_with_error(errno); @@ -1633,7 +1630,7 @@ static bool hugepages_present(struct ifobject *ifobject) void *bufs; bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); if (bufs == MAP_FAILED) return false; -- cgit v1.2.3-70-g09d2 From 833d67ecdc5f35f1ebf59d0fccc1ce771434be9c Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 18 Apr 2023 15:53:39 -0700 Subject: selftests/bpf: Verify optval=NULL case Make sure we get optlen exported instead of getting EFAULT. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20230418225343.553806-3-sdf@google.com --- .../testing/selftests/bpf/prog_tests/sockopt_sk.c | 28 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/sockopt_sk.c | 12 ++++++++++ 2 files changed, 40 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 60d952719d27..4512dd808c33 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -3,6 +3,7 @@ #include "cgroup_helpers.h" #include +#include #include "sockopt_sk.skel.h" #ifndef SOL_TCP @@ -183,6 +184,33 @@ static int getsetsockopt(void) goto err; } + /* optval=NULL case is handled correctly */ + + close(fd); + fd = socket(AF_NETLINK, SOCK_RAW, 0); + if (fd < 0) { + log_err("Failed to create AF_NETLINK socket"); + return -1; + } + + buf.u32 = 1; + optlen = sizeof(__u32); + err = setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &buf, optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_ADD_MEMBERSHIP) err=%d errno=%d", + err, errno); + goto err; + } + + optlen = 0; + err = getsockopt(fd, SOL_NETLINK, NETLINK_LIST_MEMBERSHIPS, NULL, &optlen); + if (err) { + log_err("Unexpected getsockopt(NETLINK_LIST_MEMBERSHIPS) err=%d errno=%d", + err, errno); + goto err; + } + ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value"); + free(big_buf); close(fd); return 0; diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index c8d810010a94..fe1df4cd206e 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -32,6 +32,12 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ @@ -131,6 +137,12 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval_end = ctx->optval_end; __u8 *optval = ctx->optval; struct sockopt_sk *storage; + struct bpf_sock *sk; + + /* Bypass AF_NETLINK. */ + sk = ctx->sk; + if (sk && sk->family == AF_NETLINK) + return 1; /* Make sure bpf_get_netns_cookie is callable. */ -- cgit v1.2.3-70-g09d2 From 006c0e44ed924140d44bc756e6ea36301fcea68d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 21 Apr 2023 19:03:00 +0200 Subject: selftests/bpf: add missing netfilter return value and ctx access tests Extend prog_tests with two test cases: # ./test_progs --allow=verifier_netfilter_retcode #278/1 verifier_netfilter_retcode/bpf_exit with invalid return code. test1:OK #278/2 verifier_netfilter_retcode/bpf_exit with valid return code. test2:OK #278/3 verifier_netfilter_retcode/bpf_exit with valid return code. test3:OK #278/4 verifier_netfilter_retcode/bpf_exit with invalid return code. test4:OK #278 verifier_netfilter_retcode:OK This checks that only accept and drop (0,1) are permitted. NF_QUEUE could be implemented later if we can guarantee that attachment of such programs can be rejected if they get attached to a pf/hook that doesn't support async reinjection. NF_STOLEN could be implemented via trusted helpers that can guarantee that the skb will eventually be free'd. v4: test case for bpf_nf_ctx access checks, requested by Alexei Starovoitov. v5: also check ctx->{state,skb} can be dereferenced (Alexei). # ./test_progs --allow=verifier_netfilter_ctx #281/1 verifier_netfilter_ctx/netfilter invalid context access, size too short:OK #281/2 verifier_netfilter_ctx/netfilter invalid context access, size too short:OK #281/3 verifier_netfilter_ctx/netfilter invalid context access, past end of ctx:OK #281/4 verifier_netfilter_ctx/netfilter invalid context, write:OK #281/5 verifier_netfilter_ctx/netfilter valid context read and invalid write:OK #281/6 verifier_netfilter_ctx/netfilter test prog with skb and state read access:OK #281/7 verifier_netfilter_ctx/netfilter test prog with skb and state read access @unpriv:OK #281 verifier_netfilter_ctx:OK Summary: 1/7 PASSED, 0 SKIPPED, 0 FAILED This checks: 1/2: partial reads of ctx->{skb,state} are rejected 3. read access past sizeof(ctx) is rejected 4. write to ctx content, e.g. 'ctx->skb = NULL;' is rejected 5. ctx->state content cannot be altered 6. ctx->state and ctx->skb can be dereferenced 7. ... same program fails for unpriv (CAP_NET_ADMIN needed). Link: https://lore.kernel.org/bpf/20230419021152.sjq4gttphzzy6b5f@dhcp-172-26-102-232.dhcp.thefacebook.com/ Link: https://lore.kernel.org/bpf/20230420201655.77kkgi3dh7fesoll@MacBook-Pro-6.local/ Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230421170300.24115-8-fw@strlen.de Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 4 + .../selftests/bpf/progs/verifier_netfilter_ctx.c | 121 +++++++++++++++++++++ .../bpf/progs/verifier_netfilter_retcode.c | 49 +++++++++ 3 files changed, 174 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c create mode 100644 tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 7c68d78da9ea..7534f5499d11 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -29,6 +29,8 @@ #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" +#include "verifier_netfilter_ctx.skel.h" +#include "verifier_netfilter_retcode.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" @@ -103,6 +105,8 @@ void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } +void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } +void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c new file mode 100644 index 000000000000..65bba330e7e5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" + +#include "bpf_misc.h" + +#include +#include +#include + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test1(void) +{ + asm volatile (" \ + r2 = *(u8*)(r1 + %[__bpf_nf_ctx_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_state, offsetof(struct bpf_nf_ctx, state)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, size too short") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test2(void) +{ + asm volatile (" \ + r2 = *(u16*)(r1 + %[__bpf_nf_ctx_skb]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context access, past end of ctx") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test3(void) +{ + asm volatile (" \ + r2 = *(u64*)(r1 + %[__bpf_nf_ctx_size]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_size, sizeof(struct bpf_nf_ctx)) + : __clobber_all); +} + +SEC("netfilter") +__description("netfilter invalid context, write") +__failure __msg("invalid bpf_context access") +__naked void with_invalid_ctx_access_test4(void) +{ + asm volatile (" \ + r2 = r1; \ + *(u64*)(r2 + 0) = r1; \ + r0 = 1; \ + exit; \ +" : + : __imm_const(__bpf_nf_ctx_skb, offsetof(struct bpf_nf_ctx, skb)) + : __clobber_all); +} + +#define NF_DROP 0 +#define NF_ACCEPT 1 + +SEC("netfilter") +__description("netfilter valid context read and invalid write") +__failure __msg("only read is supported") +int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx) +{ + struct nf_hook_state *state = (void *)ctx->state; + + state->sk = NULL; + return NF_ACCEPT; +} + +extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, + struct bpf_dynptr *ptr__uninit) __ksym; +extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, + void *buffer, uint32_t buffer__sz) __ksym; + +SEC("netfilter") +__description("netfilter test prog with skb and state read access") +__success __failure_unpriv +__retval(0) +int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) +{ + const struct nf_hook_state *state = ctx->state; + struct sk_buff *skb = ctx->skb; + const struct iphdr *iph; + const struct tcphdr *th; + u8 buffer_iph[20] = {}; + u8 buffer_th[40] = {}; + struct bpf_dynptr ptr; + uint8_t ihl; + + if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) + return NF_ACCEPT; + + iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph)); + if (!iph) + return NF_ACCEPT; + + if (state->pf != 2) + return NF_ACCEPT; + + ihl = iph->ihl << 2; + + th = bpf_dynptr_slice(&ptr, ihl, buffer_th, sizeof(buffer_th)); + if (!th) + return NF_ACCEPT; + + return th->dest == bpf_htons(22) ? NF_ACCEPT : NF_DROP; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c new file mode 100644 index 000000000000..353ae6da00e1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" + +SEC("netfilter") +__description("bpf_exit with invalid return code. test1") +__failure __msg("R0 is not a known value") +__naked void with_invalid_return_code_test1(void) +{ + asm volatile (" \ + r0 = *(u64*)(r1 + 0); \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test2") +__success +__naked void with_valid_return_code_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with valid return code. test3") +__success +__naked void with_valid_return_code_test3(void) +{ + asm volatile (" \ + r0 = 1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("netfilter") +__description("bpf_exit with invalid return code. test4") +__failure __msg("R0 has value (0x2; 0x0)") +__naked void with_invalid_return_code_test4(void) +{ + asm volatile (" \ + r0 = 2; \ + exit; \ +" ::: __clobber_all); +} -- cgit v1.2.3-70-g09d2 From 63bb645b9da373c29ba4b5bea14e80c49e676694 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:11 +0300 Subject: selftests/bpf: Add notion of auxiliary programs for test_loader In order to express test cases that use bpf_tail_call() intrinsic it is necessary to have several programs to be loaded at a time. This commit adds __auxiliary annotation to the set of annotations supported by test_loader.c. Programs marked as auxiliary are always loaded but are not treated as a separate test. For example: void dummy_prog1(void); struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(max_entries, 4); __uint(key_size, sizeof(int)); __array(values, void (void)); } prog_map SEC(".maps") = { .values = { [0] = (void *) &dummy_prog1, }, }; SEC("tc") __auxiliary __naked void dummy_prog1(void) { asm volatile ("r0 = 42; exit;"); } SEC("tc") __description("reference tracking: check reference or tail call") __success __retval(0) __naked void check_reference_or_tail_call(void) { asm volatile ( "r2 = %[prog_map] ll;" "r3 = 0;" "call %[bpf_tail_call];" "r0 = 0;" "exit;" :: __imm(bpf_tail_call), : __clobber_all); } Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/bpf_misc.h | 6 ++ tools/testing/selftests/bpf/test_loader.c | 89 +++++++++++++++++++++------- 2 files changed, 73 insertions(+), 22 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 3b307de8dab9..d3c1217ba79a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -53,6 +53,10 @@ * - A numeric value. * Multiple __flag attributes could be specified, the final flags * value is derived by applying binary "or" to all specified values. + * + * __auxiliary Annotated program is not a separate test, but used as auxiliary + * for some other test cases and should always be loaded. + * __auxiliary_unpriv Same, but load program in unprivileged mode. */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) @@ -65,6 +69,8 @@ #define __flag(flag) __attribute__((btf_decl_tag("comment:test_prog_flags="#flag))) #define __retval(val) __attribute__((btf_decl_tag("comment:test_retval="#val))) #define __retval_unpriv(val) __attribute__((btf_decl_tag("comment:test_retval_unpriv="#val))) +#define __auxiliary __attribute__((btf_decl_tag("comment:test_auxiliary"))) +#define __auxiliary_unpriv __attribute__((btf_decl_tag("comment:test_auxiliary_unpriv"))) /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 40c9b7d532c4..b4edd8454934 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -25,6 +25,8 @@ #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" #define TEST_TAG_RETVAL_PFX "comment:test_retval=" #define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv=" +#define TEST_TAG_AUXILIARY "comment:test_auxiliary" +#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv" /* Warning: duplicated in bpf_misc.h */ #define POINTER_VALUE 0xcafe4all @@ -59,6 +61,8 @@ struct test_spec { int log_level; int prog_flags; int mode_mask; + bool auxiliary; + bool valid; }; static int tester_init(struct test_loader *tester) @@ -87,6 +91,11 @@ static void free_test_spec(struct test_spec *spec) free(spec->unpriv.name); free(spec->priv.expect_msgs); free(spec->unpriv.expect_msgs); + + spec->priv.name = NULL; + spec->unpriv.name = NULL; + spec->priv.expect_msgs = NULL; + spec->unpriv.expect_msgs = NULL; } static int push_msg(const char *msg, struct test_subspec *subspec) @@ -204,6 +213,12 @@ static int parse_test_spec(struct test_loader *tester, spec->unpriv.expect_failure = false; spec->mode_mask |= UNPRIV; has_unpriv_result = true; + } else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) { + spec->auxiliary = true; + spec->mode_mask |= PRIV; + } else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) { + spec->auxiliary = true; + spec->mode_mask |= UNPRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; err = push_msg(msg, &spec->priv); @@ -314,6 +329,8 @@ static int parse_test_spec(struct test_loader *tester, } } + spec->valid = true; + return 0; cleanup: @@ -516,16 +533,18 @@ void run_subtest(struct test_loader *tester, struct bpf_object_open_opts *open_opts, const void *obj_bytes, size_t obj_byte_cnt, + struct test_spec *specs, struct test_spec *spec, bool unpriv) { struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; + struct bpf_program *tprog, *tprog_iter; + struct test_spec *spec_iter; struct cap_state caps = {}; - struct bpf_program *tprog; struct bpf_object *tobj; struct bpf_map *map; - int retval; - int err; + int retval, err, i; + bool should_load; if (!test__start_subtest(subspec->name)) return; @@ -546,15 +565,23 @@ void run_subtest(struct test_loader *tester, if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */ goto subtest_cleanup; - bpf_object__for_each_program(tprog, tobj) - bpf_program__set_autoload(tprog, false); + i = 0; + bpf_object__for_each_program(tprog_iter, tobj) { + spec_iter = &specs[i++]; + should_load = false; + + if (spec_iter->valid) { + if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) { + tprog = tprog_iter; + should_load = true; + } - bpf_object__for_each_program(tprog, tobj) { - /* only load specified program */ - if (strcmp(bpf_program__name(tprog), spec->prog_name) == 0) { - bpf_program__set_autoload(tprog, true); - break; + if (spec_iter->auxiliary && + spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV)) + should_load = true; } + + bpf_program__set_autoload(tprog_iter, should_load); } prepare_case(tester, spec, tobj, tprog); @@ -617,11 +644,12 @@ static void process_subtest(struct test_loader *tester, skel_elf_bytes_fn elf_bytes_factory) { LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name); + struct test_spec *specs = NULL; struct bpf_object *obj = NULL; struct bpf_program *prog; const void *obj_bytes; + int err, i, nr_progs; size_t obj_byte_cnt; - int err; if (tester_init(tester) < 0) return; /* failed to initialize tester */ @@ -631,25 +659,42 @@ static void process_subtest(struct test_loader *tester, if (!ASSERT_OK_PTR(obj, "obj_open_mem")) return; - bpf_object__for_each_program(prog, obj) { - struct test_spec spec; + nr_progs = 0; + bpf_object__for_each_program(prog, obj) + ++nr_progs; + + specs = calloc(nr_progs, sizeof(struct test_spec)); + if (!ASSERT_OK_PTR(specs, "Can't alloc specs array")) + return; - /* if we can't derive test specification, go to the next test */ - err = parse_test_spec(tester, obj, prog, &spec); - if (err) { + i = 0; + bpf_object__for_each_program(prog, obj) { + /* ignore tests for which we can't derive test specification */ + err = parse_test_spec(tester, obj, prog, &specs[i++]); + if (err) PRINT_FAIL("Can't parse test spec for program '%s'\n", bpf_program__name(prog)); + } + + i = 0; + bpf_object__for_each_program(prog, obj) { + struct test_spec *spec = &specs[i++]; + + if (!spec->valid || spec->auxiliary) continue; - } - if (spec.mode_mask & PRIV) - run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, false); - if (spec.mode_mask & UNPRIV) - run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, &spec, true); + if (spec->mode_mask & PRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, + specs, spec, false); + if (spec->mode_mask & UNPRIV) + run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt, + specs, spec, true); - free_test_spec(&spec); } + for (i = 0; i < nr_progs; ++i) + free_test_spec(&specs[i]); + free(specs); bpf_object__close(obj); } -- cgit v1.2.3-70-g09d2 From c92336559ac0127250cb3ffa9e6f69f9d7e81779 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:12 +0300 Subject: selftests/bpf: verifier/bounds converted to inline assembly Test verifier/bounds automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_bounds.c | 1076 ++++++++++++++++++++ tools/testing/selftests/bpf/verifier/bounds.c | 884 ---------------- 3 files changed, 1078 insertions(+), 884 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bounds.c delete mode 100644 tools/testing/selftests/bpf/verifier/bounds.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 7534f5499d11..d71cbd63d94d 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -6,6 +6,7 @@ #include "verifier_and.skel.h" #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" +#include "verifier_bounds.skel.h" #include "verifier_bounds_deduction.skel.h" #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" @@ -82,6 +83,7 @@ static void run_tests_aux(const char *skel_name, void test_verifier_and(void) { RUN(verifier_and); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } +void test_verifier_bounds(void) { RUN(verifier_bounds); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c new file mode 100644 index 000000000000..c5588a14fe2e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bounds.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("subtraction bounds (map value) variant 1") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void bounds_map_value_variant_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r1 >>= 56; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("subtraction bounds (map value) variant 2") +__failure +__msg("R0 min value is negative, either use unsigned index or do a if (index >=0) check.") +__msg_unpriv("R1 has unknown scalar with mixed signed bounds") +__naked void bounds_map_value_variant_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + if r1 > 0xff goto l0_%=; \ + r3 = *(u8*)(r0 + 1); \ + if r3 > 0xff goto l0_%=; \ + r1 -= r3; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("check subtraction on pointers for unpriv") +__success __failure_unpriv __msg_unpriv("R9 pointer -= pointer prohibited") +__retval(0) +__naked void subtraction_on_pointers_for_unpriv(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 9; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + r9 = r10; \ + r9 -= r0; \ + r1 = %[map_hash_8b] ll; \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: *(u64*)(r0 + 0) = r9; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on zero-extended MOV") +__success __success_unpriv __retval(0) +__naked void based_on_zero_extended_mov(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0x0000'0000'ffff'ffff */ \ + w2 = 0xffffffff; \ + /* r2 = 0 */ \ + r2 >>= 32; \ + /* no-op */ \ + r0 += r2; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test1") +__failure __msg("map_value pointer and 4294967295") +__failure_unpriv +__naked void on_sign_extended_mov_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xffff'ffff */ \ + r2 >>= 32; \ + /* r0 = */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check based on sign-extended MOV. test2") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void on_sign_extended_mov_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r2 = 0xffff'ffff'ffff'ffff */ \ + r2 = 0xffffffff; \ + /* r2 = 0xfff'ffff */ \ + r2 >>= 36; \ + /* r0 = */ \ + r0 += r2; \ + /* access to OOB pointer */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test1") +__failure __msg("value_size=8 off=1073741825") +__naked void var_off_insn_off_test1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_0]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("bounds check based on reg_off + var_off + insn_off. test2") +__failure __msg("value 1073741823") +__naked void var_off_insn_off_test2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r6 &= 1; \ + r6 += %[__imm_0]; \ + r0 += r6; \ + r0 += %[__imm_1]; \ +l0_%=: r0 = *(u8*)(r0 + 3); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, (1 << 30) - 1), + __imm_const(__imm_1, (1 << 29) - 1), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of non-boundary-crossing range") +__success __success_unpriv __retval(0) +__naked void of_non_boundary_crossing_range(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r2 = 1; \ + /* r2 = 0x10'0000'0000 */ \ + r2 <<= 36; \ + /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ \ + r1 += r2; \ + /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ \ + r1 += 0x7fffffff; \ + /* r1 = [0x00, 0xff] */ \ + w1 -= 0x7fffffff; \ + /* r1 = 0 */ \ + r1 >>= 8; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (1)") +__failure +/* not actually fully unbounded, but the bound is very high */ +__msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + */ \ + w1 += 0; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after truncation of boundary-crossing range (2)") +__failure __msg("value -4294967168 makes map_value pointer be out of bounds") +__failure_unpriv +__naked void of_boundary_crossing_range_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0x1'0000'007f] */ \ + r1 += %[__imm_0]; \ + /* r1 = [0xffff'ff80, 0xffff'ffff] or \ + * [0x0000'0000, 0x0000'007f] \ + * difference to previous test: truncation via MOV32\ + * instead of ALU32. \ + */ \ + w1 = w1; \ + r1 -= %[__imm_0]; \ + /* r1 = [0x00, 0xff] or \ + * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]\ + */ \ + r1 -= %[__imm_0]; \ + /* error on OOB pointer computation */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b), + __imm_const(__imm_0, 0xffffff80 >> 1) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after wrapping 32-bit addition") +__success __success_unpriv __retval(0) +__naked void after_wrapping_32_bit_addition(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = 0x7fff'ffff */ \ + r1 = 0x7fffffff; \ + /* r1 = 0xffff'fffe */ \ + r1 += 0x7fffffff; \ + /* r1 = 0 */ \ + w1 += 2; \ + /* no-op */ \ + r0 += r1; \ + /* access at offset 0 */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after shift with oversized count operand") +__failure __msg("R0 max value is outside of the allowed memory range") +__failure_unpriv +__naked void shift_with_oversized_count_operand(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r2 = 32; \ + r1 = 1; \ + /* r1 = (u32)1 << (u32)32 = ? */ \ + w1 <<= w2; \ + /* r1 = [0x0000, 0xffff] */ \ + r1 &= 0xffff; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after right shift of maybe-negative number") +__failure __msg("R0 unbounded memory access") +__failure_unpriv +__naked void shift_of_maybe_negative_number(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + /* r1 = [0x00, 0xff] */ \ + r1 = *(u8*)(r0 + 0); \ + /* r1 = [-0x01, 0xfe] */ \ + r1 -= 1; \ + /* r1 = 0 or 0xff'ffff'ffff'ffff */ \ + r1 >>= 8; \ + /* r1 = 0 or 0xffff'ffff'ffff */ \ + r1 >>= 8; \ + /* computes unknown pointer, potentially OOB */ \ + r0 += r1; \ + /* potentially OOB access */ \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: /* exit */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check after 32-bit right shift with 64-bit input") +__failure __msg("math between map_value pointer and 4294967294 is not allowed") +__failure_unpriv +__naked void shift_with_64_bit_input(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 2; \ + /* r1 = 1<<32 */ \ + r1 <<= 31; \ + /* r1 = 0 (NOT 2!) */ \ + w1 >>= 31; \ + /* r1 = 0xffff'fffe (NOT 0!) */ \ + w1 -= 2; \ + /* error on computing OOB pointer */ \ + r0 += r1; \ + /* exit */ \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test1") +__failure __msg("map_value pointer and 2147483646") +__failure_unpriv +__naked void size_signed_32bit_overflow_test1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x7ffffffe; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test2") +__failure __msg("pointer offset 1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 += 0x1fffffff; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test3") +__failure __msg("pointer offset -1073741822") +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void size_signed_32bit_overflow_test3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 -= 0x1fffffff; \ + r0 -= 0x1fffffff; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check map access with off+size signed 32bit overflow. test4") +__failure __msg("map_value pointer and 1000000000000") +__failure_unpriv +__naked void size_signed_32bit_overflow_test4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 1000000; \ + r1 *= 1000000; \ + r0 += r1; \ + r0 = *(u64*)(r0 + 2); \ + goto l1_%=; \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test1") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test1(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + if w1 > 1 goto l0_%=; \ + /* check ALU64 op keeps 32bit bounds */ \ + r1 += 1; \ + if w1 > 2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("bounds check mixed 32bit and 64bit arithmetic. test2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void _32bit_and_64bit_arithmetic_test2(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = -1; \ + r1 <<= 32; \ + r1 += 1; \ + /* r1 = 0xffffFFFF00000001 */ \ + r2 = 3; \ + /* r1 = 0x2 */ \ + w1 += 1; \ + /* check ALU32 op zero extends 64bit bounds */ \ + if r1 > r2 goto l0_%=; \ + goto l1_%=; \ +l0_%=: /* invalid ldx if bounds are lost above */ \ + r0 = *(u64*)(r0 - 1); \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("assigning 32bit bounds to 64bit for wA = 0, wB = wA") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void for_wa_0_wb_wa(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + w9 = 0; \ + w2 = w9; \ + r6 = r7; \ + r6 += r2; \ + r3 = r6; \ + r3 += 8; \ + if r3 > r8 goto l0_%=; \ + r5 = *(u32*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 0, reg xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 0; \ + r1 ^= 1; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = 0, reg32 xor 1") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: w1 = 0; \ + w1 ^= 1; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = 2, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_2_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = 2; \ + r1 ^= 3; \ + if r1 > 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg = any, reg xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg_any_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + r1 ^= 3; \ + if r1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 = any, reg32 xor 3") +__failure __msg("invalid access to map value") +__msg_unpriv("invalid access to map value") +__naked void reg32_any_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + w1 ^= 3; \ + if w1 != 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg > 0, reg xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg_0_reg_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if r1 <= 0 goto l1_%=; \ + r1 ^= 3; \ + if r1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds check for reg32 > 0, reg32 xor 3") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(0) +__naked void reg32_0_reg32_xor_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u64*)(r0 + 0); \ + if w1 <= 0 goto l1_%=; \ + w1 ^= 3; \ + if w1 >= 0 goto l1_%=; \ + r0 = *(u64*)(r0 + 8); \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 1") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + /* This used to reduce the max bound to 0x7fffffff */\ + if r1 == 0 goto l1_%=; \ + if r1 > 0x7fffffff goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("bounds checks after 32-bit truncation. test 2") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void _32_bit_truncation_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + if r1 s< 1 goto l1_%=; \ + if w1 s< 0 goto l0_%=; \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */\ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP_JSLT for crossing 64-bit signed boundary") +__success __retval(0) +__naked void crossing_64_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x7fffffffffffff10 ll; \ + r1 += r0; \ + r2 = 0x8000000000000fff ll; \ + r0 = 0x8000000000000000 ll; \ +l1_%=: r0 += 1; \ + if r0 s> r2 goto l0_%=; \ + /* r1 signed range is [S64_MIN, S64_MAX] */ \ + if r0 s< r1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check for loop upper bound greater than U32_MAX") +__success __retval(0) +__naked void bound_greater_than_u32_max(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + r0 = 0x100000000 ll; \ + r1 += r0; \ + r0 = 0x100000000 ll; \ +l1_%=: r0 += 1; \ + if r0 < r1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + /* r1 unsigned range is [0, 0x8000000f] */ \ + if w0 < w1 goto l1_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +SEC("xdp") +__description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") +__success __retval(0) +__naked void crossing_32_bit_signed_boundary_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[xdp_md_data]); \ + r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ + r1 = r2; \ + r1 += 1; \ + if r1 > r3 goto l0_%=; \ + r1 = *(u8*)(r2 + 0); \ + w0 = 0x7fffff10; \ + w1 += w0; \ + w2 = 0x80000fff; \ + w0 = 0x80000000; \ +l1_%=: w0 += 1; \ + if w0 s> w2 goto l0_%=; \ + /* r1 signed range is [S32_MIN, S32_MAX] */ \ + if w0 s< w1 goto l1_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), + __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c deleted file mode 100644 index 43942ce8cf15..000000000000 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ /dev/null @@ -1,884 +0,0 @@ -{ - "subtraction bounds (map value) variant 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 5), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 56), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT, -}, -{ - "subtraction bounds (map value) variant 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), - BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", - .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds", - .result = REJECT, -}, -{ - "check subtraction on pointers for unpriv", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 9), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_FP), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_ARG2, 0, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1, 9 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R9 pointer -= pointer prohibited", -}, -{ - "bounds check based on zero-extended MOV", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0x0000'0000'ffff'ffff */ - BPF_MOV32_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check based on sign-extended MOV. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32), - /* r0 = */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 4294967295", - .result = REJECT -}, -{ - "bounds check based on sign-extended MOV. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - /* r2 = 0xffff'ffff'ffff'ffff */ - BPF_MOV64_IMM(BPF_REG_2, 0xffffffff), - /* r2 = 0xfff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36), - /* r0 = */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - /* access to OOB pointer */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 min value is outside of the allowed memory range", - .result = REJECT -}, -{ - "bounds check based on reg_off + var_off + insn_off. test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value_size=8 off=1073741825", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "bounds check based on reg_off + var_off + insn_off. test2", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 4 }, - .errstr = "value 1073741823", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "bounds check after truncation of non-boundary-crossing range", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 1), - /* r2 = 0x10'0000'0000 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36), - /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), - /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = [0x00, 0xff] */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check after truncation of boundary-crossing range (1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* error on OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - /* not actually fully unbounded, but the bound is very high */ - .errstr = "value -4294967168 makes map_value pointer be out of bounds", - .result = REJECT, -}, -{ - "bounds check after truncation of boundary-crossing range (2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0x1'0000'007f] */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0xffff'ff80, 0xffff'ffff] or - * [0x0000'0000, 0x0000'007f] - * difference to previous test: truncation via MOV32 - * instead of ALU32. - */ - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* r1 = [0x00, 0xff] or - * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] - */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), - /* error on OOB pointer computation */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "value -4294967168 makes map_value pointer be out of bounds", - .result = REJECT, -}, -{ - "bounds check after wrapping 32-bit addition", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - /* r1 = 0x7fff'ffff */ - BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff), - /* r1 = 0xffff'fffe */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff), - /* r1 = 0 */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2), - /* no-op */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* access at offset 0 */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT -}, -{ - "bounds check after shift with oversized count operand", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_2, 32), - BPF_MOV64_IMM(BPF_REG_1, 1), - /* r1 = (u32)1 << (u32)32 = ? */ - BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2), - /* r1 = [0x0000, 0xffff] */ - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 max value is outside of the allowed memory range", - .result = REJECT -}, -{ - "bounds check after right shift of maybe-negative number", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = [0x00, 0xff] */ - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - /* r1 = [-0x01, 0xfe] */ - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - /* r1 = 0 or 0xff'ffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* r1 = 0 or 0xffff'ffff'ffff */ - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), - /* computes unknown pointer, potentially OOB */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* potentially OOB access */ - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R0 unbounded memory access", - .result = REJECT -}, -{ - "bounds check after 32-bit right shift with 64-bit input", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - /* r1 = 2 */ - BPF_MOV64_IMM(BPF_REG_1, 2), - /* r1 = 1<<32 */ - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), - /* r1 = 0 (NOT 2!) */ - BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 31), - /* r1 = 0xffff'fffe (NOT 0!) */ - BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 2), - /* error on computing OOB pointer */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - /* exit */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "math between map_value pointer and 4294967294 is not allowed", - .result = REJECT, -}, -{ - "bounds check map access with off+size signed 32bit overflow. test1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 2147483646", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset 1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "pointer offset -1073741822", - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .result = REJECT -}, -{ - "bounds check map access with off+size signed 32bit overflow. test4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 1000000), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "map_value pointer and 1000000000000", - .result = REJECT -}, -{ - "bounds check mixed 32bit and 64bit arithmetic. test1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - /* r1 = 0xffffFFFF00000001 */ - BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 1, 3), - /* check ALU64 op keeps 32bit bounds */ - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JGT, BPF_REG_1, 2, 1), - BPF_JMP_A(1), - /* invalid ldx if bounds are lost above */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT -}, -{ - "bounds check mixed 32bit and 64bit arithmetic. test2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - /* r1 = 0xffffFFFF00000001 */ - BPF_MOV64_IMM(BPF_REG_2, 3), - /* r1 = 0x2 */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1), - /* check ALU32 op zero extends 64bit bounds */ - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 1), - BPF_JMP_A(1), - /* invalid ldx if bounds are lost above */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT -}, -{ - "assigning 32bit bounds to 64bit for wA = 0, wB = wA", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV32_IMM(BPF_REG_9, 0), - BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "bounds check for reg = 0, reg xor 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg32 = 0, reg32 xor 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg = 2, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg = any, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - .errstr_unpriv = "invalid access to map value", -}, -{ - "bounds check for reg32 = any, reg32 xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", - .errstr_unpriv = "invalid access to map value", -}, -{ - "bounds check for reg > 0, reg xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3), - BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds check for reg32 > 0, reg32 xor 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3), - BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, -}, -{ - "bounds checks after 32-bit truncation. test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - /* This used to reduce the max bound to 0x7fffffff */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0x7fffffff, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "bounds checks after 32-bit truncation. test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_1, 1, 1), - BPF_JMP32_IMM(BPF_JSLT, BPF_REG_1, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "bound check with JMP_JLT for crossing 64-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - /* r1 unsigned range is [0x7fffffffffffff10, 0x800000000000000f] */ - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP_JSLT for crossing 64-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 13), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x7fffffffffffff10), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_2, 0x8000000000000fff), - BPF_LD_IMM64(BPF_REG_0, 0x8000000000000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), - /* r1 signed range is [S64_MIN, S64_MAX] */ - BPF_JMP_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check for loop upper bound greater than U32_MAX", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 8), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_LD_IMM64(BPF_REG_0, 0x100000000), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP32_JLT for crossing 32-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 6), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), - BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_MOV32_IMM(BPF_REG_0, 0x80000000), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), - /* r1 unsigned range is [0, 0x8000000f] */ - BPF_JMP32_REG(BPF_JLT, BPF_REG_0, BPF_REG_1, -2), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "bound check with JMP32_JSLT for crossing 32-bit signed boundary", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 10), - - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), - BPF_MOV32_IMM(BPF_REG_0, 0x7fffff10), - BPF_ALU32_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - - BPF_MOV32_IMM(BPF_REG_2, 0x80000fff), - BPF_MOV32_IMM(BPF_REG_0, 0x80000000), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP32_REG(BPF_JSGT, BPF_REG_0, BPF_REG_2, 3), - /* r1 signed range is [S32_MIN, S32_MAX] */ - BPF_JMP32_REG(BPF_JSLT, BPF_REG_0, BPF_REG_1, -3), - - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, -}, -- cgit v1.2.3-70-g09d2 From 965a3f913e723e2da9c0ec21b9825b583b0777e1 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:13 +0300 Subject: selftests/bpf: verifier/bpf_get_stack converted to inline assembly Test verifier/bpf_get_stack automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_bpf_get_stack.c | 124 +++++++++++++++++++++ .../testing/selftests/bpf/verifier/bpf_get_stack.c | 87 --------------- 3 files changed, 126 insertions(+), 87 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c delete mode 100644 tools/testing/selftests/bpf/verifier/bpf_get_stack.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d71cbd63d94d..0319b35241a0 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -10,6 +10,7 @@ #include "verifier_bounds_deduction.skel.h" #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" +#include "verifier_bpf_get_stack.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" @@ -87,6 +88,7 @@ void test_verifier_bounds(void) { RUN(verifier_bounds); } void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); } void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } +void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c new file mode 100644 index 000000000000..325a2bab4a71 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("bpf_get_stack return R0 within range") +__success +__naked void stack_return_r0_within_range(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + r9 = %[__imm_0]; \ + r1 = r6; \ + r2 = r7; \ + r3 = %[__imm_0]; \ + r4 = 256; \ + call %[bpf_get_stack]; \ + r1 = 0; \ + r8 = r0; \ + r8 <<= 32; \ + r8 s>>= 32; \ + if r1 s> r8 goto l0_%=; \ + r9 -= r8; \ + r2 = r7; \ + r2 += r8; \ + r1 = r9; \ + r1 <<= 32; \ + r1 s>>= 32; \ + r3 = r2; \ + r3 += r1; \ + r1 = r7; \ + r5 = %[__imm_0]; \ + r1 += r5; \ + if r3 >= r1 goto l0_%=; \ + r1 = r6; \ + r3 = r9; \ + r4 = 0; \ + call %[bpf_get_stack]; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_stack), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(__imm_0, sizeof(struct test_val) / 2) + : __clobber_all); +} + +SEC("iter/task") +__description("bpf_get_task_stack return R0 range is refined") +__success +__naked void return_r0_range_is_refined(void) +{ + asm volatile (" \ + r6 = *(u64*)(r1 + 0); \ + r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\ + r7 = *(u64*)(r1 + 8); /* ctx->task */\ + r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: if r7 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r1 = r7; \ + r2 = r0; \ + r9 = r0; /* keep buf for seq_write */\ + r3 = 48; \ + r4 = 0; \ + call %[bpf_get_task_stack]; \ + if r0 s> 0 goto l2_%=; \ + r0 = 0; \ + exit; \ +l2_%=: r1 = r6; \ + r2 = r9; \ + r3 = r0; \ + call %[bpf_seq_write]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_task_stack), + __imm(bpf_map_lookup_elem), + __imm(bpf_seq_write), + __imm_addr(map_array_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c b/tools/testing/selftests/bpf/verifier/bpf_get_stack.c deleted file mode 100644 index 3e024c891178..000000000000 --- a/tools/testing/selftests/bpf/verifier/bpf_get_stack.c +++ /dev/null @@ -1,87 +0,0 @@ -{ - "bpf_get_stack return R0 within range", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2), - BPF_MOV64_IMM(BPF_REG_4, 256), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32), - BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16), - BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32), - BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_9), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_get_stack), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bpf_get_task_stack return R0 range is refined", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq - BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task - BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write - BPF_MOV64_IMM(BPF_REG_3, 48), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_get_task_stack), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_seq_write), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_ITER, - .kfunc = "task", - .runs = -1, // Don't run, just load - .fixup_map_array_48b = { 3 }, -}, -- cgit v1.2.3-70-g09d2 From 37467c79e16a7b569fe5c1197e2ed507f72d2ab7 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:14 +0300 Subject: selftests/bpf: verifier/btf_ctx_access converted to inline assembly Test verifier/btf_ctx_access automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 ++ .../selftests/bpf/progs/verifier_btf_ctx_access.c | 32 ++++++++++++++++++++++ .../selftests/bpf/verifier/btf_ctx_access.c | 25 ----------------- 3 files changed, 34 insertions(+), 25 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/btf_ctx_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 0319b35241a0..3fb22d2b309a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -11,6 +11,7 @@ #include "verifier_bounds_deduction_non_const.skel.h" #include "verifier_bounds_mix_sign_unsign.skel.h" #include "verifier_bpf_get_stack.skel.h" +#include "verifier_btf_ctx_access.skel.h" #include "verifier_cfg.skel.h" #include "verifier_cgroup_inv_retcode.skel.h" #include "verifier_cgroup_skb.skel.h" @@ -89,6 +90,7 @@ void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); } void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); } void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); } +void test_verifier_btf_ctx_access(void) { RUN(verifier_btf_ctx_access); } void test_verifier_cfg(void) { RUN(verifier_cfg); } void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); } void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } diff --git a/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c new file mode 100644 index 000000000000..a570e48b917a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/btf_ctx_access.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("fentry/bpf_modify_return_test") +__description("btf_ctx_access accept") +__success __retval(0) +__naked void btf_ctx_access_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 8); /* load 2nd argument value (int pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("fentry/bpf_fentry_test9") +__description("btf_ctx_access u32 pointer accept") +__success __retval(0) +__naked void ctx_access_u32_pointer_accept(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + 0); /* load 1nd argument value (u32 pointer) */\ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c b/tools/testing/selftests/bpf/verifier/btf_ctx_access.c deleted file mode 100644 index 0484d3de040d..000000000000 --- a/tools/testing/selftests/bpf/verifier/btf_ctx_access.c +++ /dev/null @@ -1,25 +0,0 @@ -{ - "btf_ctx_access accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 8), /* load 2nd argument value (int pointer) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "bpf_modify_return_test", -}, - -{ - "btf_ctx_access u32 pointer accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), /* load 1nd argument value (u32 pointer) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "bpf_fentry_test9", -}, -- cgit v1.2.3-70-g09d2 From fcd36964f22bbe571a07b3cdb3040ec31642ba44 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:15 +0300 Subject: selftests/bpf: verifier/ctx converted to inline assembly Test verifier/ctx automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-6-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_ctx.c | 221 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/ctx.c | 186 ------------------ 3 files changed, 223 insertions(+), 186 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_ctx.c delete mode 100644 tools/testing/selftests/bpf/verifier/ctx.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 3fb22d2b309a..1a1fd3bca4b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -17,6 +17,7 @@ #include "verifier_cgroup_skb.skel.h" #include "verifier_cgroup_storage.skel.h" #include "verifier_const_or.skel.h" +#include "verifier_ctx.skel.h" #include "verifier_ctx_sk_msg.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" @@ -96,6 +97,7 @@ void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode) void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); } void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } +void test_verifier_ctx(void) { RUN(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ctx.c b/tools/testing/selftests/bpf/progs/verifier_ctx.c new file mode 100644 index 000000000000..a83809a1dbbf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ctx.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ctx.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("tc") +__description("context stores via BPF_ATOMIC") +__failure __msg("BPF_ATOMIC stores into R1 ctx is not allowed") +__naked void context_stores_via_bpf_atomic(void) +{ + asm volatile (" \ + r0 = 0; \ + lock *(u32 *)(r1 + %[__sk_buff_mark]) += w0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("arithmetic ops make PTR_TO_CTX unusable") +__failure __msg("dereference of modified ctx ptr") +__naked void make_ptr_to_ctx_unusable(void) +{ + asm volatile (" \ + r1 += %[__imm_0]; \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + exit; \ +" : + : __imm_const(__imm_0, + offsetof(struct __sk_buff, data) - offsetof(struct __sk_buff, mark)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("pass unmodified ctx pointer to helper") +__success __retval(0) +__naked void unmodified_ctx_pointer_to_helper(void) +{ + asm volatile (" \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 1") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_1(void) +{ + asm volatile (" \ + r1 += -612; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("socket") +__description("pass modified ctx pointer to helper, 2") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__failure_unpriv __msg_unpriv("negative offset ctx ptr R1 off=-612 disallowed") +__naked void ctx_pointer_to_helper_2(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("tc") +__description("pass modified ctx pointer to helper, 3") +__failure __msg("variable ctx access var_off=(0x0; 0x4)") +__naked void ctx_pointer_to_helper_3(void) +{ + asm volatile (" \ + r3 = *(u32*)(r1 + 0); \ + r3 &= 4; \ + r1 += r3; \ + r2 = 0; \ + call %[bpf_csum_update]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_csum_update) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 1: ctx") +__success +__naked void or_null_check_1_ctx(void) +{ + asm volatile (" \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 2: null") +__success +__naked void or_null_check_2_null(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 3: 1") +__failure __msg("R1 type=scalar expected=ctx") +__naked void or_null_check_3_1(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/sendmsg6") +__description("pass ctx or null check, 4: ctx - const") +__failure __msg("negative offset ctx ptr R1 off=-612 disallowed") +__naked void null_check_4_ctx_const(void) +{ + asm volatile (" \ + r1 += -612; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/connect4") +__description("pass ctx or null check, 5: null (connect)") +__success +__naked void null_check_5_null_connect(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 6: null (bind)") +__success +__naked void null_check_6_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_netns_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_netns_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 7: ctx (bind)") +__success +__naked void null_check_7_ctx_bind(void) +{ + asm volatile (" \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +SEC("cgroup/post_bind4") +__description("pass ctx or null check, 8: null (bind)") +__failure __msg("R1 type=scalar expected=ctx") +__naked void null_check_8_null_bind(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_get_socket_cookie]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_socket_cookie) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c deleted file mode 100644 index 2fd31612c0b8..000000000000 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ /dev/null @@ -1,186 +0,0 @@ -{ - "context stores via BPF_ATOMIC", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_1, BPF_REG_0, offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "BPF_ATOMIC stores into R1 ctx is not allowed", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "arithmetic ops make PTR_TO_CTX unusable", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, - offsetof(struct __sk_buff, data) - - offsetof(struct __sk_buff, mark)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .errstr = "dereference of modified ctx ptr", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "pass unmodified ctx pointer to helper", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "pass modified ctx pointer to helper, 1", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass modified ctx pointer to helper, 2", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result_unpriv = REJECT, - .result = REJECT, - .errstr_unpriv = "negative offset ctx ptr R1 off=-612 disallowed", - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass modified ctx pointer to helper, 3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_csum_update), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "variable ctx access var_off=(0x0; 0x4)", -}, -{ - "pass ctx or null check, 1: ctx", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 2: null", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 3: 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", -}, -{ - "pass ctx or null check, 4: ctx - const", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, - .result = REJECT, - .errstr = "negative offset ctx ptr R1 off=-612 disallowed", -}, -{ - "pass ctx or null check, 5: null (connect)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, - .expected_attach_type = BPF_CGROUP_INET4_CONNECT, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 6: null (bind)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_netns_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 7: ctx (bind)", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = ACCEPT, -}, -{ - "pass ctx or null check, 8: null (bind)", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_get_socket_cookie), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, - .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", -}, -- cgit v1.2.3-70-g09d2 From 6080280243846f3c5b3715af4334260522bf11f0 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:16 +0300 Subject: selftests/bpf: verifier/d_path converted to inline assembly Test verifier/d_path automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-7-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_d_path.c | 48 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/d_path.c | 37 ----------------- 3 files changed, 50 insertions(+), 37 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_d_path.c delete mode 100644 tools/testing/selftests/bpf/verifier/d_path.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 1a1fd3bca4b8..ec4c20150e41 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -19,6 +19,7 @@ #include "verifier_const_or.skel.h" #include "verifier_ctx.skel.h" #include "verifier_ctx_sk_msg.skel.h" +#include "verifier_d_path.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" @@ -99,6 +100,7 @@ void test_verifier_cgroup_storage(void) { RUN(verifier_cgroup_storage); } void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx(void) { RUN(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } +void test_verifier_d_path(void) { RUN(verifier_d_path); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } diff --git a/tools/testing/selftests/bpf/progs/verifier_d_path.c b/tools/testing/selftests/bpf/progs/verifier_d_path.c new file mode 100644 index 000000000000..ec79cbcfde91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_d_path.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/d_path.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("fentry/dentry_open") +__description("d_path accept") +__success __retval(0) +__naked void d_path_accept(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +SEC("fentry/d_path") +__description("d_path reject") +__failure __msg("helper call is not allowed in probe") +__naked void d_path_reject(void) +{ + asm volatile (" \ + r1 = *(u32*)(r1 + 0); \ + r2 = r10; \ + r2 += -8; \ + r6 = 0; \ + *(u64*)(r2 + 0) = r6; \ + r3 = 8 ll; \ + call %[bpf_d_path]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_d_path) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/d_path.c b/tools/testing/selftests/bpf/verifier/d_path.c deleted file mode 100644 index b988396379a7..000000000000 --- a/tools/testing/selftests/bpf/verifier/d_path.c +++ /dev/null @@ -1,37 +0,0 @@ -{ - "d_path accept", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_LD_IMM64(BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "dentry_open", -}, -{ - "d_path reject", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0), - BPF_LD_IMM64(BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "helper call is not allowed in probe", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACING, - .expected_attach_type = BPF_TRACE_FENTRY, - .kfunc = "d_path", -}, -- cgit v1.2.3-70-g09d2 From 0a372c9c0812ec5de4a740781fbf183ee523bf4a Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:17 +0300 Subject: selftests/bpf: verifier/direct_packet_access converted to inline assembly Test verifier/direct_packet_access automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-8-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_direct_packet_access.c | 803 +++++++++++++++++++++ .../selftests/bpf/verifier/direct_packet_access.c | 710 ------------------ 3 files changed, 805 insertions(+), 710 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c delete mode 100644 tools/testing/selftests/bpf/verifier/direct_packet_access.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index ec4c20150e41..ce2436b4b09f 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -20,6 +20,7 @@ #include "verifier_ctx.skel.h" #include "verifier_ctx_sk_msg.skel.h" #include "verifier_d_path.skel.h" +#include "verifier_direct_packet_access.skel.h" #include "verifier_direct_stack_access_wraparound.skel.h" #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" @@ -101,6 +102,7 @@ void test_verifier_const_or(void) { RUN(verifier_const_or); } void test_verifier_ctx(void) { RUN(verifier_ctx); } void test_verifier_ctx_sk_msg(void) { RUN(verifier_ctx_sk_msg); } void test_verifier_d_path(void) { RUN(verifier_d_path); } +void test_verifier_direct_packet_access(void) { RUN(verifier_direct_packet_access); } void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_stack_access_wraparound); } void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c new file mode 100644 index 000000000000..99a23dea8233 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("tc") +__description("pkt_end - pkt_start is allowed") +__success __retval(TEST_DATA_LEN) +__naked void end_pkt_start_is_allowed(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 -= r2; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test1") +__success __retval(0) +__naked void direct_packet_access_test1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test2") +__success __retval(0) +__naked void direct_packet_access_test2(void) +{ + asm volatile (" \ + r0 = 1; \ + r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r5 = r3; \ + r5 += 14; \ + if r5 > r4 goto l0_%=; \ + r0 = *(u8*)(r3 + 7); \ + r4 = *(u8*)(r3 + 12); \ + r4 *= 14; \ + r3 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 += r4; \ + r2 = *(u32*)(r1 + %[__sk_buff_len]); \ + r2 <<= 49; \ + r2 >>= 49; \ + r3 += r2; \ + r2 = r3; \ + r2 += 8; \ + r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + if r2 > r1 goto l1_%=; \ + r1 = *(u8*)(r3 + 4); \ +l1_%=: r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("direct packet access: test3") +__failure __msg("invalid bpf_context access off=76") +__failure_unpriv +__naked void direct_packet_access_test3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test4 (write)") +__success __retval(0) +__naked void direct_packet_access_test4_write(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test5 (pkt_end >= reg, good access)") +__success __retval(0) +__naked void pkt_end_reg_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test6 (pkt_end >= reg, bad access)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test7 (pkt_end >= reg, both accesses)") +__failure __msg("invalid access to packet") +__naked void pkt_end_reg_both_accesses(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test8 (double test, variant 1)") +__success __retval(0) +__naked void test8_double_test_variant_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test9 (double test, variant 2)") +__success __retval(0) +__naked void test9_double_test_variant_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 >= r0 goto l0_%=; \ + r0 = 1; \ + exit; \ +l0_%=: if r0 > r3 goto l1_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = *(u8*)(r2 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test10 (write invalid)") +__failure __msg("invalid access to packet") +__naked void packet_access_test10_write_invalid(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: *(u8*)(r2 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test11 (shift, good access)") +__success __retval(1) +__naked void access_test11_shift_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 >>= 3; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test12 (and, good access)") +__success __retval(1) +__naked void access_test12_and_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = 144; \ + r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test13 (branches, good access)") +__success __retval(1) +__naked void access_test13_branches_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r3 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r4 = 1; \ + if r3 > r4 goto l1_%=; \ + r3 = 14; \ + goto l2_%=; \ +l1_%=: r3 = 24; \ +l2_%=: r5 = r3; \ + r5 += 23; \ + r5 &= 15; \ + r6 = r2; \ + r6 += r5; \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)") +__success __retval(1) +__naked void _0_const_imm_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 22; \ + if r0 > r3 goto l0_%=; \ + r5 = 12; \ + r5 >>= 4; \ + r6 = r2; \ + r6 += r5; \ + r0 = *(u8*)(r6 + 0); \ + r0 = 1; \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test15 (spill with xadd)") +__failure __msg("R2 invalid mem access 'scalar'") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void access_test15_spill_with_xadd(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r5 = 4096; \ + r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r2; \ + lock *(u64 *)(r4 + 0) += r5; \ + r2 = *(u64*)(r4 + 0); \ + *(u32*)(r2 + 0) = r5; \ + r0 = 0; \ +l0_%=: exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test16 (arith on data_end)") +__failure __msg("R3 pointer arithmetic on pkt_end") +__naked void test16_arith_on_data_end(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + r3 += 16; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test17 (pruning, alignment)") +__failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4") +__flag(BPF_F_STRICT_ALIGNMENT) +__naked void packet_access_test17_pruning_alignment(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_mark]); \ + r0 = r2; \ + r0 += 14; \ + if r7 > 1 goto l0_%=; \ +l2_%=: if r0 > r3 goto l1_%=; \ + *(u32*)(r0 - 4) = r0; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 += 1; \ + goto l2_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test18 (imm += pkt_ptr, 1)") +__success __retval(0) +__naked void test18_imm_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 8; \ + r0 += r2; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test19 (imm += pkt_ptr, 2)") +__success __retval(0) +__naked void test19_imm_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 4; \ + r4 += r2; \ + *(u8*)(r4 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test20 (x += pkt_ptr, 1)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test20_x_pkt_ptr_1(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0x7fff; \ + r4 = r0; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test21 (x += pkt_ptr, 2)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test21_x_pkt_ptr_2(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r4 = 0xffffffff; \ + *(u64*)(r10 - 8) = r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 &= 0x7fff; \ + r4 += r2; \ + r5 = r4; \ + r4 += %[__imm_0]; \ + if r4 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test22 (x += pkt_ptr, 3)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test22_x_pkt_ptr_3(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + *(u64*)(r10 - 8) = r2; \ + *(u64*)(r10 - 16) = r3; \ + r3 = *(u64*)(r10 - 16); \ + if r0 > r3 goto l0_%=; \ + r2 = *(u64*)(r10 - 8); \ + r4 = 0xffffffff; \ + lock *(u64 *)(r10 - 8) += r4; \ + r4 = *(u64*)(r10 - 8); \ + r4 >>= 49; \ + r4 += r2; \ + r0 = r4; \ + r0 += 2; \ + if r0 > r3 goto l0_%=; \ + r2 = 1; \ + *(u16*)(r4 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test23 (x += pkt_ptr, 4)") +__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void test23_x_pkt_ptr_4(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = *(u32*)(r1 + %[__sk_buff_mark]); \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xffff; \ + r4 = r0; \ + r0 = 31; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0xffff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test24 (x += pkt_ptr, 5)") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void test24_x_pkt_ptr_5(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = 0xffffffff; \ + *(u64*)(r10 - 8) = r0; \ + r0 = *(u64*)(r10 - 8); \ + r0 &= 0xff; \ + r4 = r0; \ + r0 = 64; \ + r0 += r4; \ + r0 += r2; \ + r5 = r0; \ + r0 += %[__imm_0]; \ + if r0 > r3 goto l0_%=; \ + *(u64*)(r5 + 0) = r0; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, 0x7fff - 1), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test25 (marking on <, good access)") +__success __retval(0) +__naked void test25_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test26 (marking on <, bad access)") +__failure __msg("invalid access to packet") +__naked void test26_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 < r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l1_%=: r0 = 0; \ + exit; \ +l0_%=: goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test27 (marking on <=, good access)") +__success __retval(1) +__naked void test27_marking_on_good_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test28 (marking on <=, bad access)") +__failure __msg("invalid access to packet") +__naked void test28_marking_on_bad_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r3 <= r0 goto l0_%=; \ +l1_%=: r0 = 1; \ + exit; \ +l0_%=: r0 = *(u8*)(r2 + 0); \ + goto l1_%=; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("direct packet access: test29 (reg > pkt_end in subprog)") +__success __retval(0) +__naked void reg_pkt_end_in_subprog(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_data]); \ + r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r3 = r6; \ + r3 += 8; \ + call reg_pkt_end_in_subprog__1; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u8*)(r6 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reg_pkt_end_in_subprog__1(void) +{ + asm volatile (" \ + r0 = 0; \ + if r3 > r2 goto l0_%=; \ + r0 = 1; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("direct packet access: test30 (check_id() in regsafe(), bad access)") +__failure __msg("invalid access to packet, off=0 size=1, R2") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void id_in_regsafe_bad_access(void) +{ + asm volatile (" \ + /* r9 = ctx */ \ + r9 = r1; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r2 = ctx->data \ + * r3 = ctx->data \ + * r4 = ctx->data_end \ + */ \ + r2 = *(u32*)(r9 + %[__sk_buff_data]); \ + r3 = *(u32*)(r9 + %[__sk_buff_data]); \ + r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \ + /* if r6 > 100 goto exit \ + * if r7 > 100 goto exit \ + */ \ + if r6 > 100 goto l0_%=; \ + if r7 > 100 goto l0_%=; \ + /* r2 += r6 ; this forces assignment of ID to r2\ + * r2 += 1 ; get some fixed off for r2\ + * r3 += r7 ; this forces assignment of ID to r3\ + * r3 += 1 ; get some fixed off for r3\ + */ \ + r2 += r6; \ + r2 += 1; \ + r3 += r7; \ + r3 += 1; \ + /* if r6 > r7 goto +1 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * r2 = r3 ; optionally share ID between r2 and r3\ + */ \ + if r6 != r7 goto l1_%=; \ + r2 = r3; \ +l1_%=: /* if r3 > ctx->data_end goto exit */ \ + if r3 > r4 goto l0_%=; \ + /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\ + * ; this is not always safe\ + */ \ + r5 = *(u8*)(r2 - 1); \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c deleted file mode 100644 index dce2e28aeb43..000000000000 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ /dev/null @@ -1,710 +0,0 @@ -{ - "pkt_end - pkt_start is allowed", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = TEST_DATA_LEN, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test1", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), - BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test3", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid bpf_context access off=76", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, -}, -{ - "direct packet access: test4 (write)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test5 (pkt_end >= reg, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test6 (pkt_end >= reg, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test7 (pkt_end >= reg, both accesses)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test8 (double test, variant 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test9 (double test, variant 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test10 (write invalid)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid access to packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test11 (shift, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test12 (and, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_3, 144), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test13 (branches, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2), - BPF_MOV64_IMM(BPF_REG_3, 14), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 24), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_3), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23), - BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7), - BPF_MOV64_IMM(BPF_REG_5, 12), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test15 (spill with xadd)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8), - BPF_MOV64_IMM(BPF_REG_5, 4096), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_4, BPF_REG_5, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), - BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R2 invalid mem access 'scalar'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test16 (arith on data_end)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R3 pointer arithmetic on pkt_end", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test17 (pruning, alignment)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 14), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 1, 4), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, -4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_A(-6), - }, - .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = F_LOAD_WITH_STRICT_ALIGNMENT, -}, -{ - "direct packet access: test18 (imm += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 8), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test19 (imm += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), - BPF_MOV64_IMM(BPF_REG_4, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_STX_MEM(BPF_B, BPF_REG_4, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test20 (x += pkt_ptr, 1)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test21 (x += pkt_ptr, 2)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test22 (x += pkt_ptr, 3)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_3, -16), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -16), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_10, BPF_REG_4, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_2, 1), - BPF_STX_MEM(BPF_H, BPF_REG_4, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test23 (x += pkt_ptr, 4)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test24 (x += pkt_ptr, 5)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 64), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "direct packet access: test25 (marking on <, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test26 (marking on <, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test27 (marking on <=, good access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = 1, -}, -{ - "direct packet access: test28 (marking on <=, bad access)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, -4), - }, - .result = REJECT, - .errstr = "invalid access to packet", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test29 (reg > pkt_end in subprog)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "direct packet access: test30 (check_id() in regsafe(), bad access)", - .insns = { - /* r9 = ctx */ - BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), - /* r7 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r2 = ctx->data - * r3 = ctx->data - * r4 = ctx->data_end - */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_9, offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_9, offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_9, offsetof(struct __sk_buff, data_end)), - /* if r6 > 100 goto exit - * if r7 > 100 goto exit - */ - BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 100, 9), - BPF_JMP_IMM(BPF_JGT, BPF_REG_7, 100, 8), - /* r2 += r6 ; this forces assignment of ID to r2 - * r2 += 1 ; get some fixed off for r2 - * r3 += r7 ; this forces assignment of ID to r3 - * r3 += 1 ; get some fixed off for r3 - */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 1), - /* if r6 > r7 goto +1 ; no new information about the state is derived from - * ; this check, thus produced verifier states differ - * ; only in 'insn_idx' - * r2 = r3 ; optionally share ID between r2 and r3 - */ - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), - /* if r3 > ctx->data_end goto exit */ - BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 1), - /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2, - * ; this is not always safe - */ - BPF_LDX_MEM(BPF_B, BPF_REG_5, BPF_REG_2, -1), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .flags = BPF_F_TEST_STATE_FREQ, - .result = REJECT, - .errstr = "invalid access to packet, off=0 size=1, R2", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -- cgit v1.2.3-70-g09d2 From a5828e3154d17d9fec53910a1aa161d028339ada Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:18 +0300 Subject: selftests/bpf: verifier/jeq_infer_not_null converted to inline assembly Test verifier/jeq_infer_not_null automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-9-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_jeq_infer_not_null.c | 213 +++++++++++++++++++++ .../selftests/bpf/verifier/jeq_infer_not_null.c | 174 ----------------- 3 files changed, 215 insertions(+), 174 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c delete mode 100644 tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index ce2436b4b09f..6585432e088f 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -29,6 +29,7 @@ #include "verifier_helper_restricted.skel.h" #include "verifier_helper_value_access.skel.h" #include "verifier_int_ptr.skel.h" +#include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_map_ptr.skel.h" @@ -111,6 +112,7 @@ void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_acces void test_verifier_helper_restricted(void) { RUN(verifier_helper_restricted); } void test_verifier_helper_value_access(void) { RUN(verifier_helper_value_access); } void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } +void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c new file mode 100644 index 000000000000..bf16b00502f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +/* This is equivalent to the following program: + * + * r6 = skb->sk; + * r7 = sk_fullsock(r6); + * r0 = sk_fullsock(r6); + * if (r0 == 0) return 0; (a) + * if (r0 != r7) return 0; (b) + * *r7->type; (c) + * return 0; + * + * It is safe to dereference r7 at point (c), because of (a) and (b). + * The test verifies that relation r0 == r7 is propagated from (b) to (c). + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jne_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 == r7) r0 = *(r7->type); */ \ + if r0 != r7 goto l0_%=; /* Use ! JNE ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jne_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 != 0 goto l0_%=; \ + /* if (r0 == r7) return 0; */ \ + if r0 != r7 goto l1_%=; /* Use ! JNE ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0 */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as a first test, but not null should be inferred for JEQ branch */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch") +__success __failure_unpriv __msg_unpriv("R7 pointer comparison") +__retval(0) +__naked void socket_for_jeq_true_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) return 0; */ \ + if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\ + goto l0_%=; \ +l1_%=: /* r0 = *(r7->type); */ \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Same as above, but verify that another branch of JNE still + * prohibits access to PTR_MAYBE_NULL. + */ +SEC("cgroup/skb") +__description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch") +__failure __msg("R7 invalid mem access 'sock_or_null'") +__failure_unpriv __msg_unpriv("R7 pointer comparison") +__naked void unchanged_for_jeq_false_branch(void) +{ + asm volatile (" \ + /* r6 = skb->sk; */ \ + r6 = *(u64*)(r1 + %[__sk_buff_sk]); \ + /* if (r6 == null) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* r7 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + /* r0 = sk_fullsock(skb); */ \ + r1 = r6; \ + call %[bpf_sk_fullsock]; \ + /* if (r0 == null) return 0; */ \ + if r0 == 0 goto l0_%=; \ + /* if (r0 != r7) r0 = *(r7->type); */ \ + if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +/* Maps are treated in a different branch of `mark_ptr_not_null_reg`, + * so separate test for maps case. + */ +SEC("xdp") +__description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE") +__success __retval(0) +__naked void null_ptr_to_map_value(void) +{ + asm volatile (" \ + /* r9 = &some stack to use as key */ \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r9 = r10; \ + r9 += -8; \ + /* r8 = process local map */ \ + r8 = %[map_xskmap] ll; \ + /* r6 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r6 = r0; \ + /* r7 = map_lookup_elem(r8, r9); */ \ + r1 = r8; \ + r2 = r9; \ + call %[bpf_map_lookup_elem]; \ + r7 = r0; \ + /* if (r6 == 0) return 0; */ \ + if r6 == 0 goto l0_%=; \ + /* if (r6 != r7) return 0; */ \ + if r6 != r7 goto l0_%=; \ + /* read *r7; */ \ + r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \ +l0_%=: /* return 0; */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c b/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c deleted file mode 100644 index 67a1c07ead34..000000000000 --- a/tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c +++ /dev/null @@ -1,174 +0,0 @@ -{ - /* This is equivalent to the following program: - * - * r6 = skb->sk; - * r7 = sk_fullsock(r6); - * r0 = sk_fullsock(r6); - * if (r0 == 0) return 0; (a) - * if (r0 != r7) return 0; (b) - * *r7->type; (c) - * return 0; - * - * It is safe to dereference r7 at point (c), because of (a) and (b). - * The test verifies that relation r0 == r7 is propagated from (b) to (c). - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* if (r0 == r7) r0 = *(r7->type); */ - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as above, but verify that another branch of JNE still - * prohibits access to PTR_MAYBE_NULL. - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch", - .insns = { - /* r6 = skb->sk */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - /* if (r0 == r7) return 0; */ - BPF_JMP_REG(BPF_JNE, BPF_REG_0, BPF_REG_7, 1), /* Use ! JNE ! */ - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - /* r0 = *(r7->type); */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "R7 invalid mem access 'sock_or_null'", - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as a first test, but not null should be inferred for JEQ branch */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 9), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - /* if (r0 != r7) return 0; */ - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - /* r0 = *(r7->type); */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Same as above, but verify that another branch of JNE still - * prohibits access to PTR_MAYBE_NULL. - */ - "jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch", - .insns = { - /* r6 = skb->sk; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, sk)), - /* if (r6 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 8), - /* r7 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r0 = sk_fullsock(skb); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - /* if (r0 == null) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* if (r0 != r7) r0 = *(r7->type); */ - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_7, 1), /* Use ! JEQ ! */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "R7 invalid mem access 'sock_or_null'", - .result_unpriv = REJECT, - .errstr_unpriv = "R7 pointer comparison", -}, -{ - /* Maps are treated in a different branch of `mark_ptr_not_null_reg`, - * so separate test for maps case. - */ - "jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE", - .insns = { - /* r9 = &some stack to use as key */ - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_9, -8), - /* r8 = process local map */ - BPF_LD_MAP_FD(BPF_REG_8, 0), - /* r6 = map_lookup_elem(r8, r9); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r7 = map_lookup_elem(r8, r9); */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* if (r6 == 0) return 0; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), - /* if (r6 != r7) return 0; */ - BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_7, 1), - /* read *r7; */ - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_xdp_sock, queue_id)), - /* return 0; */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From a6fc14dc5e8d43871eebe5455719de6d7578a3bc Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:19 +0300 Subject: selftests/bpf: verifier/loops1 converted to inline assembly Test verifier/loops1 automatically converted to use inline assembly. There are a few modifications for the converted tests. "tracepoint" programs do not support test execution, change program type to "xdp" (which supports test execution) for the following tests that have __retval tags: - bounded loop, count to 4 - bonded loop containing forward jump Also, remove the __retval tag for test: - bounded loop, count from positive unknown to 4 As it's return value is a random number. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-10-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_loops1.c | 259 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/loops1.c | 206 ---------------- 3 files changed, 261 insertions(+), 206 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_loops1.c delete mode 100644 tools/testing/selftests/bpf/verifier/loops1.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 6585432e088f..632546912627 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -32,6 +32,7 @@ #include "verifier_jeq_infer_not_null.skel.h" #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" +#include "verifier_loops1.skel.h" #include "verifier_map_ptr.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" @@ -115,6 +116,7 @@ void test_verifier_int_ptr(void) { RUN(verifier_int_ptr); } void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null); } void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } +void test_verifier_loops1(void) { RUN(verifier_loops1); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c new file mode 100644 index 000000000000..5bc86af80a9a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/loops1.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("xdp") +__description("bounded loop, count to 4") +__success __retval(4) +__naked void bounded_loop_count_to_4(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 20") +__success +__naked void bounded_loop_count_to_20(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 3; \ + if r0 < 20 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from positive unknown to 4") +__success +__naked void from_positive_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + if r0 s< 0 goto l0_%=; \ +l1_%=: r0 += 1; \ + if r0 < 4 goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count from totally unknown to 4") +__success +__naked void from_totally_unknown_to_4(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ +l0_%=: r0 += 1; \ + if r0 < 4 goto l0_%=; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, count to 4 with equality") +__success +__naked void count_to_4_with_equality(void) +{ + asm volatile (" \ + r0 = 0; \ +l0_%=: r0 += 1; \ + if r0 != 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop, start in the middle") +__failure __msg("back-edge") +__naked void loop_start_in_the_middle(void) +{ + asm volatile (" \ + r0 = 0; \ + goto l0_%=; \ +l1_%=: r0 += 1; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("bounded loop containing a forward jump") +__success __retval(4) +__naked void loop_containing_a_forward_jump(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: r0 += 1; \ + if r0 == r0 goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded loop that jumps out rather than in") +__success +__naked void jumps_out_rather_than_in(void) +{ + asm volatile (" \ + r6 = 0; \ +l1_%=: r6 += 1; \ + if r6 > 10000 goto l0_%=; \ + call %[bpf_get_prandom_u32]; \ + goto l1_%=; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop after a conditional jump") +__failure __msg("program is too large") +__naked void loop_after_a_conditional_jump(void) +{ + asm volatile (" \ + r0 = 5; \ + if r0 < 4 goto l0_%=; \ +l1_%=: r0 += 1; \ + goto l1_%=; \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("bounded recursion") +__failure __msg("back-edge") +__naked void bounded_recursion(void) +{ + asm volatile (" \ + r1 = 0; \ + call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void bounded_recursion__1(void) +{ + asm volatile (" \ + r1 += 1; \ + r0 = r1; \ + if r1 < 4 goto l0_%=; \ + exit; \ +l0_%=: call bounded_recursion__1; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop in two jumps") +__failure __msg("loop detected") +__naked void infinite_loop_in_two_jumps(void) +{ + asm volatile (" \ + r0 = 0; \ +l1_%=: goto l0_%=; \ +l0_%=: if r0 < 4 goto l1_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("infinite loop: three-jump trick") +__failure __msg("loop detected") +__naked void infinite_loop_three_jump_trick(void) +{ + asm volatile (" \ + r0 = 0; \ +l2_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l0_%=; \ + exit; \ +l0_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l1_%=; \ + exit; \ +l1_%=: r0 += 1; \ + r0 &= 1; \ + if r0 < 2 goto l2_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("not-taken loop with back jump to 1st insn") +__success __retval(123) +__naked void back_jump_to_1st_insn_1(void) +{ + asm volatile (" \ +l0_%=: r0 = 123; \ + if r0 == 4 goto l0_%=; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn") +__success __retval(55) +__naked void back_jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call back_jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void back_jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if r1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +SEC("xdp") +__description("taken loop with back jump to 1st insn, 2") +__success __retval(55) +__naked void jump_to_1st_insn_2(void) +{ + asm volatile (" \ + r1 = 10; \ + r2 = 0; \ + call jump_to_1st_insn_2__1; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void jump_to_1st_insn_2__1(void) +{ + asm volatile (" \ +l0_%=: r2 += r1; \ + r1 -= 1; \ + if w1 != 0 goto l0_%=; \ + r0 = r2; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c deleted file mode 100644 index 1af37187dc12..000000000000 --- a/tools/testing/selftests/bpf/verifier/loops1.c +++ /dev/null @@ -1,206 +0,0 @@ -{ - "bounded loop, count to 4", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop, count to 20", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, count from positive unknown to 4", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop, count from totally unknown to 4", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, count to 4 with equality", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded loop, start in the middle", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_A(1), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "back-edge", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop containing a forward jump", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .retval = 4, -}, -{ - "bounded loop that jumps out rather than in", - .insns = { - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), - BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_JMP_A(-4), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop after a conditional jump", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 5), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_JMP_A(-2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "program is too large", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "bounded recursion", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "back-edge", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop in two jumps", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_A(0), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "loop detected", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "infinite loop: three-jump trick", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1), - BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1), - BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "loop detected", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "not-taken loop with back jump to 1st insn", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 123), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, -2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 123, -}, -{ - "taken loop with back jump to 1st insn", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, -3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 55, -}, -{ - "taken loop with back jump to 1st insn, 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1), - BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, -3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_XDP, - .retval = 55, -}, -- cgit v1.2.3-70-g09d2 From b427ca576f83d6853fc323639c5f4608e876527f Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:20 +0300 Subject: selftests/bpf: verifier/lwt converted to inline assembly Test verifier/lwt automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-11-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_lwt.c | 234 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/lwt.c | 189 ----------------- 3 files changed, 236 insertions(+), 189 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_lwt.c delete mode 100644 tools/testing/selftests/bpf/verifier/lwt.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 632546912627..32e55c8e3e3b 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -33,6 +33,7 @@ #include "verifier_ld_ind.skel.h" #include "verifier_leak_ptr.skel.h" #include "verifier_loops1.skel.h" +#include "verifier_lwt.skel.h" #include "verifier_map_ptr.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" @@ -117,6 +118,7 @@ void test_verifier_jeq_infer_not_null(void) { RUN(verifier_jeq_infer_not_null) void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_loops1(void) { RUN(verifier_loops1); } +void test_verifier_lwt(void) { RUN(verifier_lwt); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } diff --git a/tools/testing/selftests/bpf/progs/verifier_lwt.c b/tools/testing/selftests/bpf/progs/verifier_lwt.c new file mode 100644 index 000000000000..5ab746307309 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_lwt.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/lwt.c */ + +#include +#include +#include "bpf_misc.h" + +SEC("lwt_in") +__description("invalid direct packet write for LWT_IN") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("invalid direct packet write for LWT_OUT") +__failure __msg("cannot write into packet") +__naked void packet_write_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet write for LWT_XMIT") +__success __retval(0) +__naked void packet_write_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + *(u8*)(r2 + 0) = r2; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_in") +__description("direct packet read for LWT_IN") +__success __retval(0) +__naked void packet_read_for_lwt_in(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_out") +__description("direct packet read for LWT_OUT") +__success __retval(0) +__naked void packet_read_for_lwt_out(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("direct packet read for LWT_XMIT") +__success __retval(0) +__naked void packet_read_for_lwt_xmit(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r0 = *(u8*)(r2 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("overlapping checks for direct packet access") +__success __retval(0) +__naked void checks_for_direct_packet_access(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 8; \ + if r0 > r3 goto l0_%=; \ + r1 = r2; \ + r1 += 6; \ + if r1 > r3 goto l0_%=; \ + r0 = *(u16*)(r2 + 6); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("lwt_xmit") +__description("make headroom for LWT_XMIT") +__success __retval(0) +__naked void make_headroom_for_lwt_xmit(void) +{ + asm volatile (" \ + r6 = r1; \ + r2 = 34; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + /* split for s390 to succeed */ \ + r1 = r6; \ + r2 = 42; \ + r3 = 0; \ + call %[bpf_skb_change_head]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_skb_change_head) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_IN") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_in(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_OUT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_out(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("socket") +__description("invalid access of tc_classid for LWT_XMIT") +__failure __msg("invalid bpf_context access") +__failure_unpriv +__naked void tc_classid_for_lwt_xmit(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \ + exit; \ +" : + : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +SEC("lwt_in") +__description("check skb->tc_classid half load not permitted for lwt prog") +__failure __msg("invalid bpf_context access") +__naked void not_permitted_for_lwt_prog(void) +{ + asm volatile ( + "r0 = 0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);" +#else + "r0 = *(u16*)(r1 + %[__imm_0]);" +#endif + "exit;" + : + : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2), + __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/lwt.c b/tools/testing/selftests/bpf/verifier/lwt.c deleted file mode 100644 index 5c8944d0b091..000000000000 --- a/tools/testing/selftests/bpf/verifier/lwt.c +++ /dev/null @@ -1,189 +0,0 @@ -{ - "invalid direct packet write for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "invalid direct packet write for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "cannot write into packet", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, -}, -{ - "direct packet write for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "direct packet read for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "direct packet read for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_OUT, -}, -{ - "direct packet read for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "overlapping checks for direct packet access", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "make headroom for LWT_XMIT", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_2, 34), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - /* split for s390 to succeed */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_skb_change_head), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_LWT_XMIT, -}, -{ - "invalid access of tc_classid for LWT_IN", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "invalid access of tc_classid for LWT_OUT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "invalid access of tc_classid for LWT_XMIT", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", -}, -{ - "check skb->tc_classid half load not permitted for lwt prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid)), -#else - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, tc_classid) + 2), -#endif - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "invalid bpf_context access", - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -- cgit v1.2.3-70-g09d2 From 4a400ef9ba416494944db52f95d0f16ea04adfdf Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:21 +0300 Subject: selftests/bpf: verifier/map_in_map converted to inline assembly Test verifier/map_in_map automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-12-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_map_in_map.c | 142 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/map_in_map.c | 96 -------------- 3 files changed, 144 insertions(+), 96 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_map_in_map.c delete mode 100644 tools/testing/selftests/bpf/verifier/map_in_map.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 32e55c8e3e3b..a5ba0e198246 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -34,6 +34,7 @@ #include "verifier_leak_ptr.skel.h" #include "verifier_loops1.skel.h" #include "verifier_lwt.skel.h" +#include "verifier_map_in_map.skel.h" #include "verifier_map_ptr.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" @@ -119,6 +120,7 @@ void test_verifier_ld_ind(void) { RUN(verifier_ld_ind); } void test_verifier_leak_ptr(void) { RUN(verifier_leak_ptr); } void test_verifier_loops1(void) { RUN(verifier_loops1); } void test_verifier_lwt(void) { RUN(verifier_lwt); } +void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } diff --git a/tools/testing/selftests/bpf/progs/verifier_map_in_map.c b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c new file mode 100644 index 000000000000..4eaab1468eb7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_in_map.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_in_map.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +SEC("socket") +__description("map in map access") +__success __success_unpriv __retval(0) +__naked void map_in_map_access(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("xdp") +__description("map in map state pruning") +__success __msg("processed 26 insns") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void map_in_map_state_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r6 = r10; \ + r6 += -4; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + r2 = r6; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r2 = r6; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + 0); \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("invalid inner map pointer") +__failure __msg("R1 pointer arithmetic on map_ptr prohibited") +__failure_unpriv +__naked void invalid_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + r1 += 8; \ + call %[bpf_map_lookup_elem]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("forgot null checking on the inner map pointer") +__failure __msg("R1 type=map_value_or_null expected=map_ptr") +__failure_unpriv +__naked void on_the_inner_map_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_in_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = r0; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_in_map) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/map_in_map.c b/tools/testing/selftests/bpf/verifier/map_in_map.c deleted file mode 100644 index 128a348b762d..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_in_map.c +++ /dev/null @@ -1,96 +0,0 @@ -{ - "map in map access", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .result = ACCEPT, -}, -{ - "map in map state pruning", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 4, 14 }, - .flags = BPF_F_TEST_STATE_FREQ, - .result = VERBOSE_ACCEPT, - .errstr = "processed 25 insns", - .prog_type = BPF_PROG_TYPE_XDP, -}, -{ - "invalid inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 pointer arithmetic on map_ptr prohibited", - .result = REJECT, -}, -{ - "forgot null checking on the inner map pointer", - .insns = { - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_ST_MEM(0, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_in_map = { 3 }, - .errstr = "R1 type=map_value_or_null expected=map_ptr", - .result = REJECT, -}, -- cgit v1.2.3-70-g09d2 From aee1779f0deca501e904fbf007ccb7232972e518 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:22 +0300 Subject: selftests/bpf: verifier/map_ptr_mixing converted to inline assembly Test verifier/map_ptr_mixing automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-13-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_map_ptr_mixing.c | 265 +++++++++++++++++++++ .../selftests/bpf/verifier/map_ptr_mixing.c | 100 -------- 3 files changed, 267 insertions(+), 100 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c delete mode 100644 tools/testing/selftests/bpf/verifier/map_ptr_mixing.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index a5ba0e198246..58202c3fca07 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -36,6 +36,7 @@ #include "verifier_lwt.skel.h" #include "verifier_map_in_map.skel.h" #include "verifier_map_ptr.skel.h" +#include "verifier_map_ptr_mixing.skel.h" #include "verifier_map_ret_val.skel.h" #include "verifier_masking.skel.h" #include "verifier_meta_access.skel.h" @@ -122,6 +123,7 @@ void test_verifier_loops1(void) { RUN(verifier_loops1); } void test_verifier_lwt(void) { RUN(verifier_lwt); } void test_verifier_map_in_map(void) { RUN(verifier_map_in_map); } void test_verifier_map_ptr(void) { RUN(verifier_map_ptr); } +void test_verifier_map_ptr_mixing(void) { RUN(verifier_map_ptr_mixing); } void test_verifier_map_ret_val(void) { RUN(verifier_map_ret_val); } void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } diff --git a/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c new file mode 100644 index 000000000000..c5a7c1ddc562 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); + }); +} map_in_map SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, array)") +__success __retval(1) +__naked void pointers_for_lookup_hash_array(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call pointers_for_lookup_hash_array__1; \ + goto l1_%=; \ +l0_%=: call pointers_for_lookup_hash_array__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__1(void) +{ + asm volatile (" \ + r0 = %[map_hash_48b] ll; \ + exit; \ +" : + : __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void pointers_for_lookup_hash_array__2(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("calls: two calls returning different map pointers for lookup (hash, map in map)") +__failure __msg("only read from bpf_array is supported") +__naked void lookup_hash_map_in_map(void) +{ + asm volatile (" \ + /* main prog */ \ + if r1 != 0 goto l0_%=; \ + call lookup_hash_map_in_map__1; \ + goto l1_%=; \ +l0_%=: call lookup_hash_map_in_map__2; \ +l1_%=: r1 = r0; \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ + r0 = 1; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__1(void) +{ + asm volatile (" \ + r0 = %[map_array_48b] ll; \ + exit; \ +" : + : __imm_addr(map_array_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lookup_hash_map_in_map__2(void) +{ + asm volatile (" \ + r0 = %[map_in_map] ll; \ + exit; \ +" : + : __imm_addr(map_in_map) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning different map pointers for lookup (tail, tail)") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void pointers_for_lookup_tail_tail_1(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 != 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog1_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +SEC("socket") +__description("cond: two branches returning same map pointers for lookup (tail, tail)") +__success __success_unpriv __retval(42) +__naked void pointers_for_lookup_tail_tail_2(void) +{ + asm volatile (" \ + r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ + if r6 == 0 goto l0_%=; \ + r2 = %[map_prog2_socket] ll; \ + goto l1_%=; \ +l0_%=: r2 = %[map_prog2_socket] ll; \ +l1_%=: r3 = 7; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c deleted file mode 100644 index 1f2b8c4cb26d..000000000000 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ /dev/null @@ -1,100 +0,0 @@ -{ - "calls: two calls returning different map pointers for lookup (hash, array)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_hash_48b = { 13 }, - .fixup_map_array_48b = { 16 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "calls: two calls returning different map pointers for lookup (hash, map in map)", - .insns = { - /* main prog */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_CALL_REL(11), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_CALL_REL(12), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - /* subprog 1 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - /* subprog 2 */ - BPF_LD_MAP_FD(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .fixup_map_in_map = { 16 }, - .fixup_map_array_48b = { 13 }, - .result = REJECT, - .errstr = "only read from bpf_array is supported", -}, -{ - "cond: two branches returning different map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 2 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 42, -}, -{ - "cond: two branches returning same map pointers for lookup (tail, tail)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, - offsetof(struct __sk_buff, mark)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 2, 5 }, - .result_unpriv = ACCEPT, - .result = ACCEPT, - .retval = 42, -}, -- cgit v1.2.3-70-g09d2 From 8be632795996c198a26b6f39727d9c92007bb6a5 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:25 +0300 Subject: selftests/bpf: verifier/ref_tracking converted to inline assembly Test verifier/ref_tracking automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-16-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_ref_tracking.c | 1495 ++++++++++++++++++++ .../testing/selftests/bpf/verifier/ref_tracking.c | 1082 -------------- 3 files changed, 1497 insertions(+), 1082 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_ref_tracking.c delete mode 100644 tools/testing/selftests/bpf/verifier/ref_tracking.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 58202c3fca07..75efbc20fb8e 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -45,6 +45,7 @@ #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" +#include "verifier_ref_tracking.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" @@ -132,6 +133,7 @@ void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } +void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c new file mode 100644 index 000000000000..c4c6da21265e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_ref_tracking.c @@ -0,0 +1,1495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct bpf_key {} __attribute__((preserve_access_index)); + +extern void bpf_key_put(struct bpf_key *key) __ksym; +extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym; +extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym; + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void __kfunc_btf_root(void) +{ + bpf_key_put(0); + bpf_lookup_system_key(0); + bpf_lookup_user_key(0, 0); +} + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 4096); +} map_ringbuf SEC(".maps"); + +void dummy_prog_42_tc(void); +void dummy_prog_24_tc(void); +void dummy_prog_loop1_tc(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_tc SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_tc, + [1] = (void *)&dummy_prog_loop1_tc, + [2] = (void *)&dummy_prog_24_tc, + }, +}; + +SEC("tc") +__auxiliary +__naked void dummy_prog_42_tc(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_24_tc(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("tc") +__auxiliary +__naked void dummy_prog_loop1_tc(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_tc] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_tc) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_leak_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_1(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r6 = r0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack") +__failure __msg("Unreleased reference") +__naked void leak_potential_reference_on_stack(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak potential reference on stack 2") +__failure __msg("Unreleased reference") +__naked void potential_reference_on_stack_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r10; \ + r4 += -8; \ + *(u64*)(r4 + 0) = r0; \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r4 + 0) = r1; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference") +__failure __msg("Unreleased reference") +__naked void reference_tracking_zero_potential_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: zero potential reference to sock_common") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_sock_common_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r0 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: copy and zero potential references") +__failure __msg("Unreleased reference") +__naked void copy_and_zero_potential_references(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r7 = r0; \ + r0 = 0; \ + r7 = 0; /* leak reference */ \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release user key reference") +__success +__naked void acquire_release_user_key_reference(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: acquire/release system key reference") +__success +__naked void acquire_release_system_key_reference(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + if r0 == 0 goto l0_%=; \ + r1 = r0; \ + call %[bpf_key_put]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release user key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void user_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release system key reference without check") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void system_key_reference_without_check(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + r1 = r0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put), + __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: release with NULL key pointer") +__failure __msg("Possibly NULL pointer passed to trusted arg0") +__naked void release_with_null_key_pointer(void) +{ + asm volatile (" \ + r1 = 0; \ + call %[bpf_key_put]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_key_put) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to user key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_user_key(void) +{ + asm volatile (" \ + r1 = -3; \ + r2 = 0; \ + call %[bpf_lookup_user_key]; \ + exit; \ +" : + : __imm(bpf_lookup_user_key) + : __clobber_all); +} + +SEC("lsm.s/bpf") +__description("reference tracking: leak potential reference to system key") +__failure __msg("Unreleased reference") +__naked void potential_reference_to_system_key(void) +{ + asm volatile (" \ + r1 = 1; \ + call %[bpf_lookup_system_key]; \ + exit; \ +" : + : __imm(bpf_lookup_system_key) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference without check") +__failure __msg("type=sock_or_null expected=sock") +__naked void tracking_release_reference_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common without check") +__failure __msg("type=sock_common_or_null expected=sock") +__naked void to_sock_common_without_check(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" /* reference in r0 may be NULL */ \ + r1 = r0; \ + r2 = 0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference") +__success __retval(0) +__naked void reference_tracking_release_reference(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference to sock_common") +__success __retval(0) +__naked void release_reference_to_sock_common(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_skc_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_skc_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference 2") +__success __retval(0) +__naked void reference_tracking_release_reference_2(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice") +__failure __msg("type=scalar expected=sock") +__naked void reference_tracking_release_reference_twice(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference twice inside branch") +__failure __msg("type=scalar expected=sock") +__naked void release_reference_twice_inside_branch(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; /* goto end */ \ + call %[bpf_sk_release]; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in one subbranch") +__failure __msg("Unreleased reference") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_one_subbranch(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + /* Leak reference in R0 */ \ + exit; \ +l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: alloc, check, free in both subbranches") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void check_free_in_both_subbranches(void) +{ + asm volatile (" \ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 16; \ + /* if (offsetof(skb, mark) > data_len) exit; */ \ + if r0 <= r3 goto l0_%=; \ + exit; \ +l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r6 == 0 goto l1_%=; /* mark == 0? */\ + if r0 == 0 goto l2_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l2_%=: exit; \ +l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \ + r1 = r0; \ + call %[bpf_sk_release]; \ +l3_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog") +__success __retval(0) +__naked void call_free_reference_in_subprog(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + call call_free_reference_in_subprog__1; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void call_free_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: free reference in subprog and outside") +__failure __msg("type=scalar expected=sock") +__naked void reference_in_subprog_and_outside(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; /* unchecked reference */ \ + r6 = r0; \ + call reference_in_subprog_and_outside__1; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void reference_in_subprog_and_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r2 = r1; \ + if r2 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc & leak reference in subprog") +__failure __msg("Unreleased reference") +__naked void alloc_leak_reference_in_subprog(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call alloc_leak_reference_in_subprog__1; \ + r1 = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_leak_reference_in_subprog__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r6 = r4; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* spill unchecked sk_ptr into stack of caller */\ + *(u64*)(r6 + 0) = r0; \ + r1 = r0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: alloc in subprog, release outside") +__success __retval(POINTER_VALUE) +__naked void alloc_in_subprog_release_outside(void) +{ + asm volatile (" \ + r4 = r10; \ + call alloc_in_subprog_release_outside__1; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void alloc_in_subprog_release_outside__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; /* return sk */ \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr leak into caller stack") +__failure __msg("Unreleased reference") +__naked void ptr_leak_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_leak_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_leak_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_leak_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking in call: sk_ptr spill into caller stack") +__success __retval(0) +__naked void ptr_spill_into_caller_stack(void) +{ + asm volatile (" \ + r4 = r10; \ + r4 += -8; \ + call ptr_spill_into_caller_stack__1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__1(void) +{ + asm volatile (" \ + /* subprog 1 */ \ + r5 = r10; \ + r5 += -8; \ + *(u64*)(r5 + 0) = r4; \ + call ptr_spill_into_caller_stack__2; \ + /* spill unchecked sk_ptr into stack of caller */\ + r5 = r10; \ + r5 += -8; \ + r4 = *(u64*)(r5 + 0); \ + *(u64*)(r4 + 0) = r0; \ + if r0 == 0 goto l0_%=; \ + /* now the sk_ptr is verified, free the reference */\ + r1 = *(u64*)(r4 + 0); \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_release) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void ptr_spill_into_caller_stack__2(void) +{ + asm volatile (" \ + /* subprog 2 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_ABS") +__success __retval(0) +__naked void reference_tracking_allow_ld_abs(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_ABS while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_abs_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r0 = *(u8*)skb[0]; \ + r0 = *(u16*)skb[0]; \ + r0 = *(u32*)skb[0]; \ + r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: allow LD_IND") +__success __retval(1) +__naked void reference_tracking_allow_ld_ind(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: forbid LD_IND while holding reference") +__failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references") +__naked void ld_ind_while_holding_reference(void) +{ + asm volatile (" \ + r6 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r4 = r0; \ + r7 = 1; \ + .8byte %[ld_ind]; \ + r0 = r7; \ + r1 = r4; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)), + __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: check reference or tail call") +__success __retval(0) +__naked void check_reference_or_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 != 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: release reference then tail call") +__success __retval(0) +__naked void release_reference_then_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* if (sk) bpf_sk_release() */ \ + r1 = r0; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: /* bpf_tail_call() */ \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak possible reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void possible_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" /* bpf_tail_call() */ \ + r6 = r0; \ + r3 = 3; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + /* if (sk) bpf_sk_release() */ \ + r1 = r6; \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: leak checked reference over tail call") +__failure __msg("tail_call would lead to reference leak") +__naked void checked_reference_over_tail_call(void) +{ + asm volatile (" \ + r7 = r1; \ + /* Look up socket and store in REG_6 */ \ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + /* if (!sk) goto end */ \ + if r0 == 0 goto l0_%=; \ + /* bpf_tail_call() */ \ + r3 = 0; \ + r2 = %[map_prog1_tc] ll; \ + r1 = r7; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tail_call), + __imm_addr(map_prog1_tc), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock_or_null") +__failure __msg("R1 pointer arithmetic on sock_or_null prohibited") +__naked void and_release_sock_or_null(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + r1 += 5; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: mangle and release sock") +__failure __msg("R1 pointer arithmetic on sock prohibited") +__naked void tracking_mangle_and_release_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 += 5; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access member") +__success __retval(0) +__naked void reference_tracking_access_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: write to member") +__failure __msg("cannot write into sock") +__naked void reference_tracking_write_to_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r1 = r6; \ + r2 = 42 ll; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r2; \ + r1 = r6; \ +l0_%=: call %[bpf_sk_release]; \ + r0 = 0 ll; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: invalid 64-bit access of member") +__failure __msg("invalid sock access off=0 size=8") +__naked void _64_bit_access_of_member(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u64*)(r0 + 0); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: access after release") +__failure __msg("!read_ok") +__naked void reference_tracking_access_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r1 = r0; \ + if r0 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ + r2 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: direct access for lookup") +__success __retval(0) +__naked void tracking_direct_access_for_lookup(void) +{ + asm volatile (" \ + /* Check that the packet is at least 64B long */\ + r2 = *(u32*)(r1 + %[__sk_buff_data]); \ + r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r0 = r2; \ + r0 += 64; \ + if r0 > r3 goto l0_%=; \ + /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \ + r3 = %[sizeof_bpf_sock_tuple]; \ + r4 = 0; \ + r5 = 0; \ + call %[bpf_sk_lookup_tcp]; \ + r6 = r0; \ + if r0 == 0 goto l0_%=; \ + r2 = *(u32*)(r0 + 4); \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_tcp_sock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock() after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void bpf_sk_fullsock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r7 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void sk_fullsock_tp_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + if r6 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use sk after bpf_sk_release(tp)") +__failure __msg("invalid mem access") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void after_bpf_sk_release_tp(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)") +__success __retval(0) +__naked void after_bpf_sk_release_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r6; \ + r6 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(listen_sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_listen_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_get_listener_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = *(u32*)(r6 + %[bpf_sock_type]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_get_listener_sock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ +SEC("tc") +__description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)") +__failure __msg("invalid mem access") +__naked void and_bpf_tcp_sock_sk(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_sk_fullsock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_tcp_sock]; \ + r8 = r0; \ + if r7 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer null comparison") +__success __retval(0) +__naked void tracking_valid_pointer_null_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 != 0 goto l0_%=; \ + r3 = 0; \ +l0_%=: if r6 == 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l1_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: branch tracking valid pointer value comparison") +__failure __msg("Unreleased reference") +__naked void tracking_valid_pointer_value_comparison(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r6 = r0; \ + r3 = 1; \ + if r6 == 0 goto l0_%=; \ + r3 = 0; \ + if r6 == 1234 goto l0_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ +l0_%=: exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: bpf_sk_release(btf_tcp_sock)") +__success +__retval(0) +__naked void sk_release_btf_tcp_sock(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release") +__failure __msg("invalid mem access") +__naked void to_tcp_sock_after_release(void) +{ + asm volatile ( + BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + call %[bpf_skc_to_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + call %[bpf_sk_release]; \ + r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm(bpf_skc_to_tcp_sock), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("reference tracking: try to leak released ptr reg") +__success __failure_unpriv __msg_unpriv("R8 !read_ok") +__retval(0) +__naked void to_leak_released_ptr_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + *(u32*)(r10 - 4) = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r9 = r0; \ + r0 = 0; \ + r1 = %[map_ringbuf] ll; \ + r2 = 8; \ + r3 = 0; \ + call %[bpf_ringbuf_reserve]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r8; \ + r2 = 0; \ + call %[bpf_ringbuf_discard]; \ + r0 = 0; \ + *(u64*)(r9 + 0) = r8; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_discard), + __imm(bpf_ringbuf_reserve), + __imm_addr(map_array_48b), + __imm_addr(map_ringbuf) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c deleted file mode 100644 index 5a2e154dd1e0..000000000000 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ /dev/null @@ -1,1082 +0,0 @@ -{ - "reference tracking: leak potential reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference on stack", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: leak potential reference on stack 2", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: zero potential reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: zero potential reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: copy and zero potential references", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: acquire/release user key reference", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - { "bpf_key_put", 5 }, - }, - .result = ACCEPT, -}, -{ - "reference tracking: acquire/release system key reference", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - { "bpf_key_put", 4 }, - }, - .result = ACCEPT, -}, -{ - "reference tracking: release user key reference without check", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - { "bpf_key_put", 4 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release system key reference without check", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - { "bpf_key_put", 3 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release with NULL key pointer", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Possibly NULL pointer passed to trusted arg0", - .fixup_kfunc_btf_id = { - { "bpf_key_put", 1 }, - }, - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to user key", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, -3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Unreleased reference", - .fixup_kfunc_btf_id = { - { "bpf_lookup_user_key", 2 }, - }, - .result = REJECT, -}, -{ - "reference tracking: leak potential reference to system key", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_LSM, - .kfunc = "bpf", - .expected_attach_type = BPF_LSM_MAC, - .flags = BPF_F_SLEEPABLE, - .errstr = "Unreleased reference", - .fixup_kfunc_btf_id = { - { "bpf_lookup_system_key", 1 }, - }, - .result = REJECT, -}, -{ - "reference tracking: release reference without check", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - /* reference in r0 may be NULL */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=sock_or_null expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference to sock_common without check", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - /* reference in r0 may be NULL */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=sock_common_or_null expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference to sock_common", - .insns = { - BPF_SK_LOOKUP(skc_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference 2", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference twice", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking: release reference twice inside branch", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking: alloc, check, free in one subbranch", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ - /* Leak reference in R0 */ - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: alloc, check, free in both subbranches", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 16), - /* if (offsetof(skb, mark) > data_len) exit; */ - BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, - offsetof(struct __sk_buff, mark)), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking in call: free reference in subprog", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking in call: free reference in subprog and outside", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=scalar expected=sock", - .result = REJECT, -}, -{ - "reference tracking in call: alloc & leak reference in subprog", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* spill unchecked sk_ptr into stack of caller */ - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking in call: alloc in subprog, release outside", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), /* return sk */ - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .retval = POINTER_VALUE, - .result = ACCEPT, -}, -{ - "reference tracking in call: sk_ptr leak into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking in call: sk_ptr spill into caller stack", - .insns = { - BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - - /* subprog 1 */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), - /* spill unchecked sk_ptr into stack of caller */ - BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_5, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - /* now the sk_ptr is verified, free the reference */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_4, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - - /* subprog 2 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: allow LD_ABS", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: forbid LD_ABS while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_LD_ABS(BPF_B, 0), - BPF_LD_ABS(BPF_H, 0), - BPF_LD_ABS(BPF_W, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, -}, -{ - "reference tracking: allow LD_IND", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 1, -}, -{ - "reference tracking: forbid LD_IND while holding reference", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_7, 1), - BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_4), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "BPF_LD_[ABS|IND] cannot be mixed with socket references", - .result = REJECT, -}, -{ - "reference tracking: check reference or tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: release reference then tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_SK_LOOKUP(sk_lookup_tcp), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 18 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: leak possible reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - /* bpf_tail_call() */ - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - /* if (sk) bpf_sk_release() */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 16 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, -}, -{ - "reference tracking: leak checked reference over tail call", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - /* Look up socket and store in REG_6 */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if (!sk) goto end */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - /* bpf_tail_call() */ - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 17 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "tail_call would lead to reference leak", - .result = REJECT, -}, -{ - "reference tracking: mangle and release sock_or_null", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock_or_null prohibited", - .result = REJECT, -}, -{ - "reference tracking: mangle and release sock", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "R1 pointer arithmetic on sock prohibited", - .result = REJECT, -}, -{ - "reference tracking: access member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: write to member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_LD_IMM64(BPF_REG_2, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_2, - offsetof(struct bpf_sock, mark)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LD_IMM64(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "cannot write into sock", - .result = REJECT, -}, -{ - "reference tracking: invalid 64-bit access of member", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "invalid sock access off=0 size=8", - .result = REJECT, -}, -{ - "reference tracking: access after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "!read_ok", - .result = REJECT, -}, -{ - "reference tracking: direct access for lookup", - .insns = { - /* Check that the packet is at least 64B long */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 64), - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 9), - /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ - BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_5, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: use ptr from bpf_tcp_sock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_sk_fullsock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_sk_fullsock(tp) after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use sk after bpf_sk_release(tp)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: bpf_sk_release(listen_sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_listener_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ - "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", -}, -{ - "reference tracking: branch tracking valid pointer null comparison", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "reference tracking: branch tracking valid pointer value comparison", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 1234, 2), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "Unreleased reference", - .result = REJECT, -}, -{ - "reference tracking: bpf_sk_release(btf_tcp_sock)", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, -{ - "reference tracking: use ptr from bpf_skc_to_tcp_sock() after release", - .insns = { - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, -{ - "reference tracking: try to leak released ptr reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_ringbuf_reserve), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_ringbuf_discard), - BPF_MOV64_IMM(BPF_REG_0, 0), - - BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_8, 0), - BPF_EXIT_INSN() - }, - .fixup_map_array_48b = { 4 }, - .fixup_map_ringbuf = { 11 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R8 !read_ok" -}, -- cgit v1.2.3-70-g09d2 From 16a42573c253ad490a66cd0ca990f07297c0e221 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:26 +0300 Subject: selftests/bpf: verifier/regalloc converted to inline assembly Test verifier/regalloc automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-17-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_regalloc.c | 364 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/regalloc.c | 277 ---------------- 3 files changed, 366 insertions(+), 277 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_regalloc.c delete mode 100644 tools/testing/selftests/bpf/verifier/regalloc.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 75efbc20fb8e..990efde06624 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -46,6 +46,7 @@ #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" #include "verifier_ref_tracking.skel.h" +#include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" @@ -134,6 +135,7 @@ void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } +void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } diff --git a/tools/testing/selftests/bpf/progs/verifier_regalloc.c b/tools/testing/selftests/bpf/progs/verifier_regalloc.c new file mode 100644 index 000000000000..ee5ddea87c91 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_regalloc.c @@ -0,0 +1,364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/regalloc.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("tracepoint") +__description("regalloc basic") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_basic(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=1") +__naked void regalloc_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 24 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u8*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg mark") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_mark(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc src_reg negative") +__failure __msg("invalid access to map value, value_size=48 off=44 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_src_reg_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 22 goto l0_%=; \ + r3 = 0; \ + if r3 s>= r2 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 20 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */ \ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc and spill negative") +__failure __msg("invalid access to map value, value_size=48 off=48 size=8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_and_spill_negative(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r0 s> 48 goto l0_%=; \ + /* r0 has upper bound that should propagate into r2 */\ + *(u64*)(r10 - 8) = r2; /* spill r2 */ \ + r0 = 0; \ + r2 = 0; /* clear r0 and r2 */\ + r3 = *(u64*)(r10 - 8); /* fill r3 */\ + if r0 s>= r3 goto l0_%=; \ + /* r3 has lower and upper bounds */ \ + r7 += r3; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc three regs") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_three_regs(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + r4 = r2; \ + if r0 s> 12 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r7 += r0; \ + r7 += r2; \ + r7 += r4; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("tracepoint") +__description("regalloc after call") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_after_call(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r8 = r0; \ + r9 = r0; \ + call regalloc_after_call__1; \ + if r8 s> 20 goto l0_%=; \ + if r9 s< 0 goto l0_%=; \ + r7 += r8; \ + r7 += r9; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_after_call__1(void) +{ + asm volatile (" \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc in callee") +__success __flag(BPF_F_ANY_ALIGNMENT) +__naked void regalloc_in_callee(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r7 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r2 = r0; \ + r3 = r7; \ + call regalloc_in_callee__1; \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void regalloc_in_callee__1(void) +{ + asm volatile (" \ + if r1 s> 20 goto l0_%=; \ + if r2 s< 0 goto l0_%=; \ + r3 += r1; \ + r3 += r2; \ + r0 = *(u64*)(r3 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("regalloc, spill, JEQ") +__success +__naked void regalloc_spill_jeq(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + *(u64*)(r10 - 8) = r0; /* spill r0 */ \ + if r0 == 0 goto l0_%=; \ +l0_%=: /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */\ + call %[bpf_get_prandom_u32]; \ + r2 = r0; \ + if r2 == 20 goto l1_%=; \ +l1_%=: /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */\ + r3 = *(u64*)(r10 - 8); /* fill r3 with map_value */\ + if r3 == 0 goto l2_%=; /* skip ldx if map_value == NULL */\ + /* Buggy verifier will think that r3 == 20 here */\ + r0 = *(u64*)(r3 + 0); /* read from map_value */\ +l2_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/regalloc.c b/tools/testing/selftests/bpf/verifier/regalloc.c deleted file mode 100644 index bb0dd89dd212..000000000000 --- a/tools/testing/selftests/bpf/verifier/regalloc.c +++ /dev/null @@ -1,277 +0,0 @@ -{ - "regalloc basic", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 24, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=48 size=1", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "regalloc src_reg mark", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 5), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc src_reg negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 22, 5), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=44 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc and spill", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 7), - /* r0 has upper bound that should propagate into r2 */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */ - BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2), - /* r3 has lower and upper bounds */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc and spill negative", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 48, 7), - /* r0 has upper bound that should propagate into r2 */ - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */ - BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2), - /* r3 has lower and upper bounds */ - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = REJECT, - .errstr = "invalid access to map value, value_size=48 off=48 size=8", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc three regs", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 12, 5), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_4), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc after call", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 20, 4), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_9, 0, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_8), - BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_9), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc in callee", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), - BPF_EXIT_INSN(), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 20, 5), - BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "regalloc, spill, JEQ", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), /* spill r0 */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0), - /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 20, 0), - /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */ - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 with map_value */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), /* skip ldx if map_value == NULL */ - /* Buggy verifier will think that r3 == 20 here */ - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), /* read from map_value */ - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 4 }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -- cgit v1.2.3-70-g09d2 From 65222842ca04be5df505592f9f845acce0463f8b Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:27 +0300 Subject: selftests/bpf: verifier/runtime_jit converted to inline assembly Test verifier/runtime_jit automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-18-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_runtime_jit.c | 360 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/runtime_jit.c | 231 ------------- 3 files changed, 362 insertions(+), 231 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_runtime_jit.c delete mode 100644 tools/testing/selftests/bpf/verifier/runtime_jit.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 990efde06624..2c1e07b59e8a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -48,6 +48,7 @@ #include "verifier_ref_tracking.skel.h" #include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" +#include "verifier_runtime_jit.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" @@ -137,6 +138,7 @@ void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } +void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } diff --git a/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c new file mode 100644 index 000000000000..27ebfc1fd9ee --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_runtime_jit.c @@ -0,0 +1,360 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/runtime_jit.c */ + +#include +#include +#include "bpf_misc.h" + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); +void dummy_prog_loop2_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps") = { + .values = { + [1] = (void *)&dummy_prog_loop2_socket, + [2] = (void *)&dummy_prog_24_socket, + [7] = (void *)&dummy_prog_42_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop2_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog2_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog once") +__success __success_unpriv __retval(42) +__naked void call_within_bounds_prog_once(void) +{ + asm volatile (" \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, prog loop") +__success __success_unpriv __retval(41) +__naked void call_within_bounds_prog_loop(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, no prog") +__success __success_unpriv __retval(1) +__naked void call_within_bounds_no_prog(void) +{ + asm volatile (" \ + r3 = 3; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2") +__success __success_unpriv __retval(24) +__naked void call_within_bounds_key_2(void) +{ + asm volatile (" \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 2 / key 2, second branch") +__success __success_unpriv __retval(24) +__naked void _2_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, first branch") +__success __success_unpriv __retval(24) +__naked void _0_key_2_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, key 0 / key 2, second branch") +__success __success_unpriv __retval(42) +__naked void _0_key_2_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 2; \ + r2 = %[map_prog1_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, first branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(1) +__naked void bounds_different_maps_first_branch(void) +{ + asm volatile (" \ + r0 = 13; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call within bounds, different maps, second branch") +__success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") +__retval(42) +__naked void bounds_different_maps_second_branch(void) +{ + asm volatile (" \ + r0 = 14; \ + *(u8*)(r1 + %[__sk_buff_cb_0]) = r0; \ + r0 = *(u8*)(r1 + %[__sk_buff_cb_0]); \ + if r0 == 13 goto l0_%=; \ + r3 = 0; \ + r2 = %[map_prog1_socket] ll; \ + goto l1_%=; \ +l0_%=: r3 = 0; \ + r2 = %[map_prog2_socket] ll; \ +l1_%=: call %[bpf_tail_call]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket), + __imm_addr(map_prog2_socket), + __imm_const(__sk_buff_cb_0, offsetof(struct __sk_buff, cb[0])) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: tail_call out of bounds") +__success __success_unpriv __retval(2) +__naked void tail_call_out_of_bounds(void) +{ + asm volatile (" \ + r3 = 256; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass negative index to tail_call") +__success __success_unpriv __retval(2) +__naked void negative_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = -1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("runtime/jit: pass > 32bit index to tail_call") +__success __success_unpriv __retval(42) +/* Verifier rewrite for unpriv skips tail call here. */ +__retval_unpriv(2) +__naked void _32bit_index_to_tail_call(void) +{ + asm volatile (" \ + r3 = 0x100000000 ll; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 2; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/runtime_jit.c b/tools/testing/selftests/bpf/verifier/runtime_jit.c deleted file mode 100644 index 94c399d1faca..000000000000 --- a/tools/testing/selftests/bpf/verifier/runtime_jit.c +++ /dev/null @@ -1,231 +0,0 @@ -{ - "runtime/jit: tail_call within bounds, prog once", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call within bounds, prog loop", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 41, -}, -{ - "runtime/jit: tail_call within bounds, no prog", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 3), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "runtime/jit: tail_call within bounds, key 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 2 / key 2, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 2 / key 2, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 0 / key 2, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 24, -}, -{ - "runtime/jit: tail_call within bounds, key 0 / key 2, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 2), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5, 9 }, - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call within bounds, different maps, first branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 13), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 9 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 1, -}, -{ - "runtime/jit: tail_call within bounds, different maps, second branch", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 14), - BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, - offsetof(struct __sk_buff, cb[0])), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, cb[0])), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 5 }, - .fixup_prog2 = { 9 }, - .result_unpriv = REJECT, - .errstr_unpriv = "tail_call abusing map_ptr", - .result = ACCEPT, - .retval = 42, -}, -{ - "runtime/jit: tail_call out of bounds", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 256), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, -}, -{ - "runtime/jit: pass negative index to tail_call", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, -1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .result = ACCEPT, - .retval = 2, -}, -{ - "runtime/jit: pass > 32bit index to tail_call", - .insns = { - BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 2 }, - .result = ACCEPT, - .retval = 42, - /* Verifier rewrite for unpriv skips tail call here. */ - .retval_unpriv = 2, -}, -- cgit v1.2.3-70-g09d2 From 034d9ad25db3569bee306850c6a276bca0d1e12d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:28 +0300 Subject: selftests/bpf: verifier/search_pruning converted to inline assembly Test verifier/search_pruning automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-19-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_search_pruning.c | 339 +++++++++++++++++++++ .../selftests/bpf/verifier/search_pruning.c | 266 ---------------- 3 files changed, 341 insertions(+), 266 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_search_pruning.c delete mode 100644 tools/testing/selftests/bpf/verifier/search_pruning.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 2c1e07b59e8a..fd4a6e7b41de 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -49,6 +49,7 @@ #include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_runtime_jit.skel.h" +#include "verifier_search_pruning.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" @@ -139,6 +140,7 @@ void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } +void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c new file mode 100644 index 000000000000..5a14498d352f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 1)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r0 + 0); \ + goto l1_%=; \ +l0_%=: r0 = r10; \ +l1_%=: goto l2_%=; \ +l2_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("pointer/scalar confusion in state equality check (way 2)") +__success __failure_unpriv __msg_unpriv("R0 leaks addr as return value") +__retval(POINTER_VALUE) +__naked void state_equality_check_way_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + r0 = r10; \ + goto l1_%=; \ +l0_%=: r0 = *(u64*)(r0 + 0); \ +l1_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("lwt_in") +__description("liveness pruning and write screening") +__failure __msg("R0 !read_ok") +__naked void liveness_pruning_and_write_screening(void) +{ + asm volatile (" \ + /* Get an unknown value */ \ + r2 = *(u32*)(r1 + 0); \ + /* branch conditions teach us nothing about R2 */\ + if r2 >= 0 goto l0_%=; \ + r0 = 0; \ +l0_%=: if r2 >= 0 goto l1_%=; \ + r0 = 0; \ +l1_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("varlen_map_value_access pruning") +__failure __msg("R0 unbounded memory access") +__failure_unpriv __msg_unpriv("R0 leaks addr") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void varlen_map_value_access_pruning(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + w2 = %[max_entries]; \ + if r2 s> r1 goto l1_%=; \ + w1 = 0; \ +l1_%=: w1 <<= 2; \ + r0 += r1; \ + goto l2_%=; \ +l2_%=: r1 = %[test_val_foo]; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b), + __imm_const(max_entries, MAX_ENTRIES), + __imm_const(test_val_foo, offsetof(struct test_val, foo)) + : __clobber_all); +} + +SEC("tracepoint") +__description("search pruning: all branches should be verified (nop operation)") +__failure __msg("R6 invalid mem access 'scalar'") +__naked void should_be_verified_nop_operation(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + if r3 == 0xbeef goto l1_%=; \ + r4 = 0; \ + goto l2_%=; \ +l1_%=: r4 = 1; \ +l2_%=: *(u64*)(r10 - 16) = r4; \ + call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ + if r5 == 0 goto l0_%=; \ + r6 = 0; \ + r1 = 0xdead; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("search pruning: all branches should be verified (invalid stack access)") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -16+0 size 8") +__retval(0) +__naked void be_verified_invalid_stack_access(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = *(u64*)(r0 + 0); \ + r4 = 0; \ + if r3 == 0xbeef goto l1_%=; \ + *(u64*)(r10 - 16) = r4; \ + goto l2_%=; \ +l1_%=: *(u64*)(r10 - 24) = r4; \ +l2_%=: call %[bpf_ktime_get_ns]; \ + r5 = *(u64*)(r10 - 16); \ +l0_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spill/fill") +__failure __msg("R0 min value is outside of the allowed memory range") +__naked void tracking_for_u32_spill_fill(void) +{ + asm volatile (" \ + r7 = r1; \ + call %[bpf_get_prandom_u32]; \ + w6 = 32; \ + if r0 == 0 goto l0_%=; \ + w6 = 4; \ +l0_%=: /* Additional insns to introduce a pruning point. */\ + call %[bpf_get_prandom_u32]; \ + r3 = 0; \ + r3 = 0; \ + if r0 == 0 goto l1_%=; \ + r3 = 0; \ +l1_%=: /* u32 spill/fill */ \ + *(u32*)(r10 - 8) = r6; \ + r8 = *(u32*)(r10 - 8); \ + /* out-of-bound map value access for r6=32 */ \ + r1 = 0; \ + *(u64*)(r10 - 16) = r1; \ + r2 = r10; \ + r2 += -16; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r0 += r8; \ + r1 = *(u32*)(r0 + 0); \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("tracepoint") +__description("precision tracking for u32 spills, u64 fill") +__failure __msg("div by zero") +__naked void for_u32_spills_u64_fill(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + w7 = 0xffffffff; \ + /* Additional insns to introduce a pruning point. */\ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + r3 = 1; \ + call %[bpf_get_prandom_u32]; \ + if r0 == 0 goto l0_%=; \ + r3 = 1; \ +l0_%=: w3 /= 0; \ + /* u32 spills, u64 fill */ \ + *(u32*)(r10 - 4) = r6; \ + *(u32*)(r10 - 8) = r7; \ + r8 = *(u64*)(r10 - 8); \ + /* if r8 != X goto pc+1 r8 known in fallthrough branch */\ + if r8 != 0xffffffff goto l1_%=; \ + r3 = 1; \ +l1_%=: /* if r8 == X goto pc+1 condition always true on first\ + * traversal, so starts backtracking to mark r8 as requiring\ + * precision. r7 marked as needing precision. r6 not marked\ + * since it's not tracked. \ + */ \ + if r8 == 0xffffffff goto l2_%=; \ + /* fails if r8 correctly marked unknown after fill. */\ + w3 /= 0; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("allocated_stack") +__success __msg("processed 15 insns") +__success_unpriv __msg_unpriv("") __log_level(1) __retval(0) +__naked void allocated_stack(void) +{ + asm volatile (" \ + r6 = r1; \ + call %[bpf_get_prandom_u32]; \ + r7 = r0; \ + if r0 == 0 goto l0_%=; \ + r0 = 0; \ + *(u64*)(r10 - 8) = r6; \ + r6 = *(u64*)(r10 - 8); \ + *(u8*)(r10 - 9) = r7; \ + r7 = *(u8*)(r10 - 9); \ +l0_%=: if r0 != 0 goto l1_%=; \ +l1_%=: if r0 != 0 goto l2_%=; \ +l2_%=: if r0 != 0 goto l3_%=; \ +l3_%=: if r0 != 0 goto l4_%=; \ +l4_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* The test performs a conditional 64-bit write to a stack location + * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], + * then data is read from fp[-8]. This sequence is unsafe. + * + * The test would be mistakenly marked as safe w/o dst register parent + * preservation in verifier.c:copy_register_state() function. + * + * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the + * checkpoint state after conditional 64-bit assignment. + */ + +SEC("socket") +__description("write tracking and register parent chain bug") +/* in privileged mode reads from uninitialized stack locations are permitted */ +__success __failure_unpriv +__msg_unpriv("invalid read from stack off -8+1 size 8") +__retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void and_register_parent_chain_bug(void) +{ + asm volatile (" \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* r0 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + /* if r0 > r6 goto +1 */ \ + if r0 > r6 goto l0_%=; \ + /* *(u64 *)(r10 - 8) = 0xdeadbeef */ \ + r0 = 0xdeadbeef; \ + *(u64*)(r10 - 8) = r0; \ +l0_%=: r1 = 42; \ + *(u8*)(r10 - 8) = r1; \ + r2 = *(u64*)(r10 - 8); \ + /* exit(0) */ \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c deleted file mode 100644 index 745d6b5842fd..000000000000 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ /dev/null @@ -1,266 +0,0 @@ -{ - "pointer/scalar confusion in state equality check (way 1)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_JMP_A(1), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" -}, -{ - "pointer/scalar confusion in state equality check (way 2)", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_JMP_A(1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .result = ACCEPT, - .retval = POINTER_VALUE, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr as return value" -}, -{ - "liveness pruning and write screening", - .insns = { - /* Get an unknown value */ - BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), - /* branch conditions teach us nothing about R2 */ - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "R0 !read_ok", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_LWT_IN, -}, -{ - "varlen_map_value_access pruning", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), - BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), - BPF_MOV32_IMM(BPF_REG_1, 0), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_JMP_IMM(BPF_JA, 0, 0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .errstr = "R0 unbounded memory access", - .result_unpriv = REJECT, - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "search pruning: all branches should be verified (nop operation)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_A(1), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), - BPF_MOV64_IMM(BPF_REG_6, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr = "R6 invalid mem access 'scalar'", - .result = REJECT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "search pruning: all branches should be verified (invalid stack access)", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), - BPF_JMP_A(1), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "invalid read from stack off -16+0 size 8", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -{ - "precision tracking for u32 spill/fill", - .insns = { - BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV32_IMM(BPF_REG_6, 32), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV32_IMM(BPF_REG_6, 4), - /* Additional insns to introduce a pruning point. */ - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 0), - /* u32 spill/fill */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8), - /* out-of-bound map value access for r6=32 */ - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 15 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "precision tracking for u32 spills, u64 fill", - .insns = { - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV32_IMM(BPF_REG_7, 0xffffffff), - /* Additional insns to introduce a pruning point. */ - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), - /* u32 spills, u64 fill */ - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), - BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8), - /* if r8 != X goto pc+1 r8 known in fallthrough branch */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - /* if r8 == X goto pc+1 condition always true on first - * traversal, so starts backtracking to mark r8 as requiring - * precision. r7 marked as needing precision. r6 not marked - * since it's not tracked. - */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1), - /* fails if r8 correctly marked unknown after fill. */ - BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "div by zero", - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "allocated_stack", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8), - BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9), - BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = ACCEPT, - .insn_processed = 15, -}, -/* The test performs a conditional 64-bit write to a stack location - * fp[-8], this is followed by an unconditional 8-bit write to fp[-8], - * then data is read from fp[-8]. This sequence is unsafe. - * - * The test would be mistakenly marked as safe w/o dst register parent - * preservation in verifier.c:copy_register_state() function. - * - * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the - * checkpoint state after conditional 64-bit assignment. - */ -{ - "write tracking and register parent chain bug", - .insns = { - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* r0 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - /* if r0 > r6 goto +1 */ - BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1), - /* *(u64 *)(r10 - 8) = 0xdeadbeef */ - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef), - /* r1 = 42 */ - BPF_MOV64_IMM(BPF_REG_1, 42), - /* *(u8 *)(r10 - 8) = r1 */ - BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8), - /* r2 = *(u64 *)(r10 - 8) */ - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .flags = BPF_F_TEST_STATE_FREQ, - .errstr_unpriv = "invalid read from stack off -8+1 size 8", - .result_unpriv = REJECT, - /* in privileged mode reads from uninitialized stack locations are permitted */ - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From 426fc0e3fce272c92e804b2d844a41641f9b888e Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:29 +0300 Subject: selftests/bpf: verifier/sock converted to inline assembly Test verifier/sock automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-20-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + tools/testing/selftests/bpf/progs/verifier_sock.c | 980 ++++++++++++++++++++++ tools/testing/selftests/bpf/verifier/sock.c | 706 ---------------- 3 files changed, 982 insertions(+), 706 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_sock.c delete mode 100644 tools/testing/selftests/bpf/verifier/sock.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index fd4a6e7b41de..906f1c532cb9 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -50,6 +50,7 @@ #include "verifier_ringbuf.skel.h" #include "verifier_runtime_jit.skel.h" #include "verifier_search_pruning.skel.h" +#include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" @@ -141,6 +142,7 @@ void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } +void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } diff --git a/tools/testing/selftests/bpf/progs/verifier_sock.c b/tools/testing/selftests/bpf/progs/verifier_sock.c new file mode 100644 index 000000000000..ee76b51005ab --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_sock.c @@ -0,0 +1,980 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/sock.c */ + +#include +#include +#include "bpf_misc.h" + +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_reuseport_array SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockhash SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_sockmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} map_xskmap SEC(".maps"); + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(max_entries, 0); + __type(key, int); + __type(value, struct val); + __uint(map_flags, BPF_F_NO_PREALLOC); +} sk_storage_map SEC(".maps"); + +SEC("cgroup/skb") +__description("skb->sk: no NULL check") +__failure __msg("invalid mem access 'sock_common_or_null'") +__failure_unpriv +__naked void skb_sk_no_null_check(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + r0 = *(u32*)(r1 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("skb->sk: sk->type [fullsock field]") +__failure __msg("invalid sock_common access") +__failure_unpriv +__naked void sk_sk_type_fullsock_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_sk_fullsock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_sk_fullsock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->family [non fullsock field]") +__success __success_unpriv __retval(0) +__naked void sk_family_non_fullsock_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->state [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_state_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)") +__success __success_unpriv __retval(0) +__naked void port_word_load_backward_compatibility(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_half_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load]") +__success __success_unpriv __retval(0) +__naked void sk_dst_port_byte_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \ + r2 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_byte_load_invalid(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void dst_port_half_load_invalid_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]") +__success __success_unpriv __retval(0) +__naked void dst_ip6_load_2nd_byte(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->type [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_type_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): sk->protocol [narrow load]") +__success __success_unpriv __retval(0) +__naked void sk_sk_protocol_narrow_load(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("sk_fullsock(skb->sk): beyond last field") +__failure __msg("invalid sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no !skb->sk check") +__failure __msg("type=sock_common_or_null expected=sock_common") +__failure_unpriv +__naked void sk_no_skb_sk_check_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + call %[bpf_tcp_sock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): no NULL check on ret") +__failure __msg("invalid mem access 'tcp_sock_or_null'") +__failure_unpriv +__naked void no_null_check_on_ret_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): tp->bytes_acked") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_bytes_acked(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(skb->sk): beyond last field") +__failure __msg("invalid tcp_sock access") +__failure_unpriv +__naked void skb_sk_beyond_last_field_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked)) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd") +__success __success_unpriv __retval(0) +__naked void skb_sk_tp_snd_cwnd_2(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_tcp_sock]; \ + if r0 != 0 goto l2_%=; \ + exit; \ +l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)), + __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(skb->sk)") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_release_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 == 0 goto l0_%=; \ + call %[bpf_sk_release]; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_sk_fullsock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_sk_fullsock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_release), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_sk_release(bpf_tcp_sock(skb->sk))") +__failure __msg("R1 must be referenced when passed to release function") +__naked void bpf_tcp_sock_skb_sk(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_tcp_sock]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r1 = r0; \ + call %[bpf_sk_release]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_sk_release), + __imm(bpf_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL") +__success __retval(0) +__naked void sk_null_0_value_null(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 0; \ + r3 = 0; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, 1, 1): value == 1") +__failure __msg("R3 type=scalar expected=fp") +__naked void sk_1_1_value_1(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = 1; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value") +__success __retval(0) +__naked void stack_value_1_stack_value(void) +{ + asm volatile (" \ + r2 = 0; \ + *(u64*)(r10 - 8) = r2; \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: call %[bpf_sk_fullsock]; \ + if r0 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r4 = 1; \ + r3 = r10; \ + r3 += -8; \ + r2 = r0; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_sk_storage_get]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_fullsock), + __imm(bpf_sk_storage_get), + __imm_addr(sk_storage_map), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +SEC("tc") +__description("bpf_map_lookup_elem(smap, &key)") +__failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem") +__naked void map_lookup_elem_smap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[sk_storage_map] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(sk_storage_map) + : __clobber_all); +} + +SEC("xdp") +__description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id") +__success __retval(0) +__naked void xskmap_key_xs_queue_id(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_xskmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap), + __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockmap_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key)") +__failure __msg("Unreleased reference id=2 alloc_insn=6") +__naked void map_lookup_elem_sockhash_key(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_sockhash) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockmap] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockmap), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_skb") +__description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)") +__success +__naked void field_bpf_sk_release_sk_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_sockhash] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = r0; \ + r0 = *(u32*)(r0 + %[bpf_sock_type]); \ + call %[bpf_sk_release]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_sk_release), + __imm_addr(map_sockhash), + __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)") +__success +__naked void ctx_reuseport_array_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_reuseport_array] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_reuseport_array) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)") +__success +__naked void reuseport_ctx_sockmap_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("sk_reuseport") +__description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)") +__success +__naked void reuseport_ctx_sockhash_key_flags(void) +{ + asm volatile (" \ + r4 = 0; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r3 = r10; \ + r3 += -4; \ + r2 = %[map_sockmap] ll; \ + call %[bpf_sk_select_reuseport]; \ + exit; \ +" : + : __imm(bpf_sk_select_reuseport), + __imm_addr(map_sockmap) + : __clobber_all); +} + +SEC("tc") +__description("mark null check on return value of bpf_skc_to helpers") +__failure __msg("invalid mem access") +__naked void of_bpf_skc_to_helpers(void) +{ + asm volatile (" \ + r1 = *(u64*)(r1 + %[__sk_buff_sk]); \ + if r1 != 0 goto l0_%=; \ + r0 = 0; \ + exit; \ +l0_%=: r6 = r1; \ + call %[bpf_skc_to_tcp_sock]; \ + r7 = r0; \ + r1 = r6; \ + call %[bpf_skc_to_tcp_request_sock]; \ + r8 = r0; \ + if r8 != 0 goto l1_%=; \ + r0 = 0; \ + exit; \ +l1_%=: r0 = *(u8*)(r7 + 0); \ + exit; \ +" : + : __imm(bpf_skc_to_tcp_request_sock), + __imm(bpf_skc_to_tcp_sock), + __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c deleted file mode 100644 index 108dd3ee1edd..000000000000 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ /dev/null @@ -1,706 +0,0 @@ -{ - "skb->sk: no NULL check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'sock_common_or_null'", -}, -{ - "skb->sk: sk->family [non fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, family)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "skb->sk: sk->type [fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock_common access", -}, -{ - "bpf_sk_fullsock(skb->sk): no !skb->sk check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "type=sock_common_or_null expected=sock_common", -}, -{ - "sk_fullsock(skb->sk): no NULL check on ret", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'sock_or_null'", -}, -{ - "sk_fullsock(skb->sk): sk->type [fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->family [non fullsock field]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, family)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->state [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, state)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [half load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [byte load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port)), - BPF_LDX_MEM(BPF_B, BPF_REG_2, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_port) + 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, dst_port)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->type [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): sk->protocol [narrow load]", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, protocol)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "sk_fullsock(skb->sk): beyond last field", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_sock, rx_queue_mapping)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid sock access", -}, -{ - "bpf_tcp_sock(skb->sk): no !skb->sk check", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "type=sock_common_or_null expected=sock_common", -}, -{ - "bpf_tcp_sock(skb->sk): no NULL check on ret", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid mem access 'tcp_sock_or_null'", -}, -{ - "bpf_tcp_sock(skb->sk): tp->snd_cwnd", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_tcp_sock(skb->sk): tp->bytes_acked", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, bytes_acked)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_tcp_sock(skb->sk): beyond last field", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, offsetofend(struct bpf_tcp_sock, bytes_acked)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = REJECT, - .errstr = "invalid tcp_sock access", -}, -{ - "bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_tcp_sock, snd_cwnd)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .result = ACCEPT, -}, -{ - "bpf_sk_release(skb->sk)", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "bpf_sk_release(bpf_sk_fullsock(skb->sk))", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "bpf_sk_release(bpf_tcp_sock(skb->sk))", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_tcp_sock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R1 must be referenced when passed to release function", -}, -{ - "sk_storage_get(map, skb->sk, NULL, 0): value == NULL", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 11 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "sk_storage_get(map, skb->sk, 1, 1): value == 1", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_MOV64_IMM(BPF_REG_3, 1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 11 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "R3 type=scalar expected=fp", -}, -{ - "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value", - .insns = { - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_EMIT_CALL(BPF_FUNC_sk_fullsock), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_4, 1), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_storage_get), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 14 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(smap, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_sk_storage_map = { 3 }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem", -}, -{ - "bpf_map_lookup_elem(xskmap, &key); xs->queue_id", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_xskmap = { 3 }, - .prog_type = BPF_PROG_TYPE_XDP, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(sockmap, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = REJECT, - .errstr = "Unreleased reference id=2 alloc_insn=5", -}, -{ - "bpf_map_lookup_elem(sockhash, &key)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_sockhash = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = REJECT, - .errstr = "Unreleased reference id=2 alloc_insn=5", -}, -{ - "bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = ACCEPT, -}, -{ - "bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_sock, type)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_EXIT_INSN(), - }, - .fixup_map_sockhash = { 3 }, - .prog_type = BPF_PROG_TYPE_SK_SKB, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_reuseport_array = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, sockmap, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "bpf_sk_select_reuseport(ctx, sockhash, &key, flags)", - .insns = { - BPF_MOV64_IMM(BPF_REG_4, 0), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -4), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_EMIT_CALL(BPF_FUNC_sk_select_reuseport), - BPF_EXIT_INSN(), - }, - .fixup_map_sockmap = { 4 }, - .prog_type = BPF_PROG_TYPE_SK_REUSEPORT, - .result = ACCEPT, -}, -{ - "mark null check on return value of bpf_skc_to helpers", - .insns = { - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_request_sock), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = REJECT, - .errstr = "invalid mem access", - .result_unpriv = REJECT, - .errstr_unpriv = "unknown func", -}, -- cgit v1.2.3-70-g09d2 From f323a81806bd79adafa6e88607b388166e92a1b0 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:30 +0300 Subject: selftests/bpf: verifier/spin_lock converted to inline assembly Test verifier/spin_lock automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-21-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../selftests/bpf/progs/verifier_spin_lock.c | 533 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/spin_lock.c | 447 ----------------- 3 files changed, 535 insertions(+), 447 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_spin_lock.c delete mode 100644 tools/testing/selftests/bpf/verifier/spin_lock.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 906f1c532cb9..1582cb5bdcc4 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -52,6 +52,7 @@ #include "verifier_search_pruning.skel.h" #include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" +#include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" #include "verifier_uninit.skel.h" #include "verifier_value_adj_spill.skel.h" @@ -144,6 +145,7 @@ void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } +void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c new file mode 100644 index 000000000000..9c1aa69650f8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */ + +#include +#include +#include "bpf_misc.h" + +struct val { + int cnt; + struct bpf_spin_lock l; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct val); +} map_spin_lock SEC(".maps"); + +SEC("cgroup/skb") +__description("spin_lock: test1 success") +__success __failure_unpriv __msg_unpriv("") +__retval(0) +__naked void spin_lock_test1_success(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test2 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__naked void lock_test2_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r1 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test3 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test3_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 1); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test4 direct ld/st") +__failure __msg("cannot be accessed directly") +__failure_unpriv __msg_unpriv("") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void lock_test4_direct_ld_st(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u16*)(r6 + 3); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test5 call within a locked region") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void call_within_a_locked_region(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + call %[bpf_get_prandom_u32]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test6 missing unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test6_missing_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + if r0 != 0 goto l1_%=; \ + call %[bpf_spin_unlock]; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test7 unlock without lock") +__failure __msg("without taking a lock") +__failure_unpriv __msg_unpriv("") +__naked void lock_test7_unlock_without_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + if r1 != 0 goto l1_%=; \ + call %[bpf_spin_lock]; \ +l1_%=: r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test8 double lock") +__failure __msg("calls are not allowed") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test8_double_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r6; \ + r1 += 4; \ + r0 = *(u32*)(r6 + 0); \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test9 different lock") +__failure __msg("unlock of different lock") +__failure_unpriv __msg_unpriv("") +__naked void spin_lock_test9_different_lock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r7 = r0; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("cgroup/skb") +__description("spin_lock: test10 lock in subprog without unlock") +__failure __msg("unlock is missing") +__failure_unpriv __msg_unpriv("") +__naked void lock_in_subprog_without_unlock(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r6 = r0; \ + r1 = r0; \ + r1 += 4; \ + call lock_in_subprog_without_unlock__1; \ + r1 = r6; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +static __naked __noinline __attribute__((used)) +void lock_in_subprog_without_unlock__1(void) +{ + asm volatile (" \ + call %[bpf_spin_lock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: test11 ld_abs under lock") +__failure __msg("inside bpf_spin_lock") +__naked void test11_ld_abs_under_lock(void) +{ + asm volatile (" \ + r6 = r1; \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r0; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r0 = *(u8*)skb[0]; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +SEC("tc") +__description("spin_lock: regsafe compare reg->id for map value") +__failure __msg("bpf_spin_unlock of different lock") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void reg_id_for_map_value(void) +{ + asm volatile (" \ + r6 = r1; \ + r6 = *(u32*)(r6 + %[__sk_buff_mark]); \ + r1 = %[map_spin_lock] ll; \ + r9 = r1; \ + r2 = 0; \ + *(u32*)(r10 - 4) = r2; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r7 = r0; \ + r1 = r9; \ + r2 = r10; \ + r2 += -4; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l1_%=; \ + exit; \ +l1_%=: r8 = r0; \ + r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + if r6 == 0 goto l2_%=; \ + goto l3_%=; \ +l2_%=: r7 = r8; \ +l3_%=: r1 = r7; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Make sure that regsafe() compares ids for spin lock records using + * check_ids(): + * 1: r9 = map_lookup_elem(...) ; r9.id == 1 + * 2: r8 = map_lookup_elem(...) ; r8.id == 2 + * 3: r7 = ktime_get_ns() + * 4: r6 = ktime_get_ns() + * 5: if r6 > r7 goto <9> + * 6: spin_lock(r8) + * 7: r9 = r8 + * 8: goto <10> + * 9: spin_lock(r9) + * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active, + * ; second visit to (10) should be considered safe + * ; if check_ids() is used. + * 11: exit(0) + */ + +SEC("cgroup/skb") +__description("spin_lock: regsafe() check_ids() similar id mappings") +__success __msg("29: safe") +__failure_unpriv __msg_unpriv("") +__log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_similar_id_mappings(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u32*)(r10 - 4) = r1; \ + /* r9 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r9 = r0; \ + /* r8 = map_lookup_elem(...) */ \ + r2 = r10; \ + r2 += -4; \ + r1 = %[map_spin_lock] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l1_%=; \ + r8 = r0; \ + /* r7 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r7 = r0; \ + /* r6 = ktime_get_ns() */ \ + call %[bpf_ktime_get_ns]; \ + r6 = r0; \ + /* if r6 > r7 goto +5 ; no new information about the state is derived from\ + * ; this check, thus produced verifier states differ\ + * ; only in 'insn_idx' \ + * spin_lock(r8) \ + * r9 = r8 \ + * goto unlock \ + */ \ + if r6 > r7 goto l2_%=; \ + r1 = r8; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ + r9 = r8; \ + goto l3_%=; \ +l2_%=: /* spin_lock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_lock]; \ +l3_%=: /* spin_unlock(r9) */ \ + r1 = r9; \ + r1 += 4; \ + call %[bpf_spin_unlock]; \ +l0_%=: /* exit(0) */ \ + r0 = 0; \ +l1_%=: exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm(bpf_map_lookup_elem), + __imm(bpf_spin_lock), + __imm(bpf_spin_unlock), + __imm_addr(map_spin_lock) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c deleted file mode 100644 index eaf114f07e2e..000000000000 --- a/tools/testing/selftests/bpf/verifier/spin_lock.c +++ /dev/null @@ -1,447 +0,0 @@ -{ - "spin_lock: test1 success", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test2 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test3 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "spin_lock: test4 direct ld/st", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 3), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "cannot be accessed directly", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "spin_lock: test5 call within a locked region", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "calls are not allowed", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test6 missing unlock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "unlock is missing", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test7 unlock without lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "without taking a lock", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test8 double lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "calls are not allowed", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test9 different lock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3, 11 }, - .result = REJECT, - .errstr = "unlock of different lock", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test10 lock in subprog without unlock", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3 }, - .result = REJECT, - .errstr = "unlock is missing", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, -}, -{ - "spin_lock: test11 ld_abs under lock", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_LD_ABS(BPF_B, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 4 }, - .result = REJECT, - .errstr = "inside bpf_spin_lock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "spin_lock: regsafe compare reg->id for map value", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_6, offsetof(struct __sk_buff, mark)), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_lock), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_spin_unlock), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 2 }, - .result = REJECT, - .errstr = "bpf_spin_unlock of different lock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .flags = BPF_F_TEST_STATE_FREQ, -}, -/* Make sure that regsafe() compares ids for spin lock records using - * check_ids(): - * 1: r9 = map_lookup_elem(...) ; r9.id == 1 - * 2: r8 = map_lookup_elem(...) ; r8.id == 2 - * 3: r7 = ktime_get_ns() - * 4: r6 = ktime_get_ns() - * 5: if r6 > r7 goto <9> - * 6: spin_lock(r8) - * 7: r9 = r8 - * 8: goto <10> - * 9: spin_lock(r9) - * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active, - * ; second visit to (10) should be considered safe - * ; if check_ids() is used. - * 11: exit(0) - */ -{ - "spin_lock: regsafe() check_ids() similar id mappings", - .insns = { - BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), - /* r9 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 24), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), - /* r8 = map_lookup_elem(...) */ - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), - BPF_LD_MAP_FD(BPF_REG_1, - 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 18), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), - /* r7 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - /* r6 = ktime_get_ns() */ - BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - /* if r6 > r7 goto +5 ; no new information about the state is derived from - * ; this check, thus produced verifier states differ - * ; only in 'insn_idx' - * spin_lock(r8) - * r9 = r8 - * goto unlock - */ - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 5), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), - BPF_JMP_A(3), - /* spin_lock(r9) */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_lock), - /* spin_unlock(r9) */ - BPF_MOV64_REG(BPF_REG_1, BPF_REG_9), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4), - BPF_EMIT_CALL(BPF_FUNC_spin_unlock), - /* exit(0) */ - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_spin_lock = { 3, 10 }, - .result = VERBOSE_ACCEPT, - .errstr = "28: safe", - .result_unpriv = REJECT, - .errstr_unpriv = "", - .prog_type = BPF_PROG_TYPE_CGROUP_SKB, - .flags = BPF_F_TEST_STATE_FREQ, -}, -- cgit v1.2.3-70-g09d2 From 81d1d6dd4037755b98bf9b9f9d0dbd715b1734e5 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:31 +0300 Subject: selftests/bpf: verifier/subreg converted to inline assembly Test verifier/subreg automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-22-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../testing/selftests/bpf/progs/verifier_subreg.c | 673 +++++++++++++++++++++ tools/testing/selftests/bpf/verifier/subreg.c | 533 ---------------- 3 files changed, 675 insertions(+), 533 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_subreg.c delete mode 100644 tools/testing/selftests/bpf/verifier/subreg.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 1582cb5bdcc4..8eb36cbf00bd 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -54,6 +54,7 @@ #include "verifier_spill_fill.skel.h" #include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_subreg.skel.h" #include "verifier_uninit.skel.h" #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" @@ -147,6 +148,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } diff --git a/tools/testing/selftests/bpf/progs/verifier_subreg.c b/tools/testing/selftests/bpf/progs/verifier_subreg.c new file mode 100644 index 000000000000..8613ea160dcd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subreg.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/subreg.c */ + +#include +#include +#include "bpf_misc.h" + +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ + +SEC("socket") +__description("add32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 += w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("add32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void add32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + /* An insn could have no effect on the low 32-bit, for example:\ + * a = a + 0 \ + * a = a | 0 \ + * a = a & -1 \ + * But, they should still zero high 32-bit. \ + */ \ + w0 += 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 += -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x1ffffffff ll; \ + w0 -= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("sub32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void sub32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 -= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 *= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mul32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mul32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 *= -1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 /= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("div32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void div32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 /= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000001 ll; \ + w0 |= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("or32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void or32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 |= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x1ffffffff ll; \ + w0 &= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("and32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void and32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 &= -2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 <<= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("lsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void lsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 <<= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 >>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("rsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void rsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 >>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("neg32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void neg32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = -w0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = -1; \ + w0 %%= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mod32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mod32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 1; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 %%= 2; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = r0; \ + r0 = 0x100000000 ll; \ + w0 ^= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("xor32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void xor32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 ^= 1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x100000000 ll; \ + r1 |= r0; \ + r0 = 0x100000000 ll; \ + w0 = w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("mov32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void mov32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 = 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 reg zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_reg_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r1 = 1; \ + w0 s>>= w1; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("arsh32 imm zero extend check") +__success __success_unpriv __retval(0) +__naked void arsh32_imm_zero_extend_check(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 0; \ + r0 >>= 32; \ + r6 = r0; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + w0 s>>= 1; \ + r0 >>= 32; \ + r0 |= r6; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_le) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void le_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = le32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end16 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_1(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be16 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("end32 (to_be) reg zero extend check") +__success __success_unpriv __retval(0) +__naked void be_reg_zero_extend_check_2(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r6 = r0; \ + r6 <<= 32; \ + call %[bpf_get_prandom_u32]; \ + r0 |= r6; \ + r0 = be32 r0; \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_b zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_b_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u8*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_h zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_h_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u16*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("socket") +__description("ldx_w zero extend check") +__success __success_unpriv __retval(0) +__naked void ldx_w_zero_extend_check(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -4; \ + r7 = 0xfaceb00c; \ + *(u32*)(r6 + 0) = r7; \ + call %[bpf_get_prandom_u32]; \ + r1 = 0x1000000000 ll; \ + r0 |= r1; \ + r0 = *(u32*)(r6 + 0); \ + r0 >>= 32; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c deleted file mode 100644 index 4c4133c80440..000000000000 --- a/tools/testing/selftests/bpf/verifier/subreg.c +++ /dev/null @@ -1,533 +0,0 @@ -/* This file contains sub-register zero extension checks for insns defining - * sub-registers, meaning: - * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width - * forms (BPF_END) could define sub-registers. - * - Narrow direct loads, BPF_B/H/W | BPF_LDX. - * - BPF_LD is not exposed to JIT back-ends, so no need for testing. - * - * "get_prandom_u32" is used to initialize low 32-bit of some registers to - * prevent potential optimizations done by verifier or JIT back-ends which could - * optimize register back into constant when range info shows one register is a - * constant. - */ -{ - "add32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "add32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - /* An insn could have no effect on the low 32-bit, for example: - * a = a + 0 - * a = a | 0 - * a = a & -1 - * But, they should still zero high 32-bit. - */ - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "sub32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), - BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "sub32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mul32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), - BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mul32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "div32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "div32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "or32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), - BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "or32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "and32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), - BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "and32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "lsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "lsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "rsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "rsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "neg32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mod32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mod32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "xor32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "xor32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mov32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), - BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), - BPF_MOV32_REG(BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "mov32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV32_IMM(BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "arsh32 reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "arsh32 imm zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end16 (to_le) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end32 (to_le) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end16 (to_be) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "end32 (to_be) reg zero extend check", - .insns = { - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), - BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_b zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_h zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -{ - "ldx_w zero extend check", - .insns = { - BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), - BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), - BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), - BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = 0, -}, -- cgit v1.2.3-70-g09d2 From 82887c2568e4f78d5cfcb85bf3cdc056dfd97c85 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:32 +0300 Subject: selftests/bpf: verifier/unpriv converted to inline assembly Test verifier/unpriv semi-automatically converted to use inline assembly. The verifier/unpriv.c had to be split in two parts: - the bulk of the tests is in the progs/verifier_unpriv.c; - the single test that needs `struct bpf_perf_event_data` definition is in the progs/verifier_unpriv_perf.c. The tests above can't be in a single file because: - first requires inclusion of the filter.h header (to get access to BPF_ST_MEM macro, inline assembler does not support this isntruction); - the second requires vmlinux.h, which contains definitions conflicting with filter.h. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-23-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 4 + .../testing/selftests/bpf/progs/verifier_unpriv.c | 726 +++++++++++++++++++++ .../selftests/bpf/progs/verifier_unpriv_perf.c | 34 + tools/testing/selftests/bpf/verifier/unpriv.c | 562 ---------------- 4 files changed, 764 insertions(+), 562 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_unpriv.c create mode 100644 tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c delete mode 100644 tools/testing/selftests/bpf/verifier/unpriv.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 8eb36cbf00bd..a2754ea49b19 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -56,6 +56,8 @@ #include "verifier_stack_ptr.skel.h" #include "verifier_subreg.skel.h" #include "verifier_uninit.skel.h" +#include "verifier_unpriv.skel.h" +#include "verifier_unpriv_perf.skel.h" #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" #include "verifier_value_or_null.skel.h" @@ -150,6 +152,8 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_uninit(void) { RUN(verifier_uninit); } +void test_verifier_unpriv(void) { RUN(verifier_unpriv); } +void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv.c b/tools/testing/selftests/bpf/progs/verifier_unpriv.c new file mode 100644 index 000000000000..7ea535bfbacd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv.c @@ -0,0 +1,726 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include +#include +#include "../../../include/linux/filter.h" +#include "bpf_misc.h" + +#define BPF_SK_LOOKUP(func) \ + /* struct bpf_sock_tuple tuple = {} */ \ + "r2 = 0;" \ + "*(u32*)(r10 - 8) = r2;" \ + "*(u64*)(r10 - 16) = r2;" \ + "*(u64*)(r10 - 24) = r2;" \ + "*(u64*)(r10 - 32) = r2;" \ + "*(u64*)(r10 - 40) = r2;" \ + "*(u64*)(r10 - 48) = r2;" \ + /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ + "r2 = r10;" \ + "r2 += -48;" \ + "r3 = %[sizeof_bpf_sock_tuple];"\ + "r4 = 0;" \ + "r5 = 0;" \ + "call %[" #func "];" + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, long long); +} map_hash_8b SEC(".maps"); + +void dummy_prog_42_socket(void); +void dummy_prog_24_socket(void); +void dummy_prog_loop1_socket(void); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog1_socket SEC(".maps") = { + .values = { + [0] = (void *)&dummy_prog_42_socket, + [1] = (void *)&dummy_prog_loop1_socket, + [2] = (void *)&dummy_prog_24_socket, + }, +}; + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_42_socket(void) +{ + asm volatile ("r0 = 42; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_24_socket(void) +{ + asm volatile ("r0 = 24; exit;"); +} + +SEC("socket") +__auxiliary __auxiliary_unpriv +__naked void dummy_prog_loop1_socket(void) +{ + asm volatile (" \ + r3 = 1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 41; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: return pointer") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(POINTER_VALUE) +__naked void unpriv_return_pointer(void) +{ + asm volatile (" \ + r0 = r10; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add const to pointer") +__success __success_unpriv __retval(0) +__naked void unpriv_add_const_to_pointer(void) +{ + asm volatile (" \ + r1 += 8; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: add pointer to pointer") +__failure __msg("R1 pointer += pointer") +__failure_unpriv +__naked void unpriv_add_pointer_to_pointer(void) +{ + asm volatile (" \ + r1 += r10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: neg pointer") +__success __failure_unpriv __msg_unpriv("R1 pointer arithmetic") +__retval(0) +__naked void unpriv_neg_pointer(void) +{ + asm volatile (" \ + r1 = -r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with const") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_const(void) +{ + asm volatile (" \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp pointer with pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_pointer_with_pointer(void) +{ + asm volatile (" \ + if r1 == r10 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tracepoint") +__description("unpriv: check that printk is disallowed") +__success +__naked void check_that_printk_is_disallowed(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r1 = r10; \ + r1 += -8; \ + r2 = 8; \ + r3 = r1; \ + call %[bpf_trace_printk]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_trace_printk) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to helper function") +__success __failure_unpriv __msg_unpriv("R4 leaks addr") +__retval(0) +__naked void pass_pointer_to_helper_function(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + r3 = r2; \ + r4 = r2; \ + call %[bpf_map_update_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_update_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: indirectly pass pointer on stack to helper function") +__success __failure_unpriv +__msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8") +__retval(0) +__naked void on_stack_to_helper_function(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 1") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_1(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u32*)(r10 - 8) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: mangle pointer on stack 2") +__success __failure_unpriv __msg_unpriv("attempt to corrupt spilled") +__retval(0) +__naked void mangle_pointer_on_stack_2(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = 0; \ + *(u8*)(r10 - 1) = r0; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: read pointer from stack in small chunks") +__failure __msg("invalid size") +__failure_unpriv +__naked void from_stack_in_small_chunks(void) +{ + asm volatile (" \ + *(u64*)(r10 - 8) = r10; \ + r0 = *(u32*)(r10 - 8); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into ctx") +__failure __msg("invalid bpf_context access") +__failure_unpriv __msg_unpriv("R1 leaks addr") +__naked void unpriv_write_pointer_into_ctx(void) +{ + asm volatile (" \ + *(u64*)(r1 + 0) = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill of ctx") +__success __success_unpriv __retval(0) +__naked void unpriv_spill_fill_of_ctx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 2") +__success __retval(0) +__naked void spill_fill_of_ctx_2(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 3") +__failure __msg("R1 type=fp expected=ctx") +__naked void spill_fill_of_ctx_3(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + *(u64*)(r6 + 0) = r10; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of ctx 4") +__failure __msg("R1 type=scalar expected=ctx") +__naked void spill_fill_of_ctx_4(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r1; \ + r0 = 1; \ + lock *(u64 *)(r10 - 8) += r0; \ + r1 = *(u64*)(r6 + 0); \ + call %[bpf_get_hash_recalc]; \ + exit; \ +" : + : __imm(bpf_get_hash_recalc) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_stx(void) +{ + asm volatile (" \ + r3 = 42; \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) + : __clobber_all); +} + +/* Same as above, but use BPF_ST_MEM to save 42 + * instead of BPF_STX_MEM. + */ +SEC("tc") +__description("unpriv: spill/fill of different pointers st") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_st(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += -16; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + .8byte %[st_mem]; \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_insn(st_mem, + BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - ctx and sock") +__failure __msg("type=ctx expected=sock") +__naked void pointers_stx_ctx_and_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + /* if (sk) bpf_sk_release(sk) */ \ + if r1 == 0 goto l2_%=; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - leak sock") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("Unreleased reference") +__naked void different_pointers_stx_leak_sock(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb == NULL) *target = sock; */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: /* else *target = skb; */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: /* struct __sk_buff *skb = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* skb->mark = 42; */ \ + r3 = 42; \ + *(u32*)(r1 + %[__sk_buff_mark]) = r3; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (read)") +__failure __msg("same insn cannot be used with different pointers") +__naked void stx_sock_and_ctx_read(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = *(u32*)(r1 + %[bpf_sock_mark]); \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("tc") +__description("unpriv: spill/fill of different pointers stx - sock and ctx (write)") +__failure +//.errstr = "same insn cannot be used with different pointers", +__msg("cannot write into sock") +__naked void stx_sock_and_ctx_write(void) +{ + asm volatile (" \ + r8 = r1; \ + /* struct bpf_sock *sock = bpf_sock_lookup(...); */\ +" BPF_SK_LOOKUP(bpf_sk_lookup_tcp) +" r2 = r0; \ + /* u64 foo; */ \ + /* void *target = &foo; */ \ + r6 = r10; \ + r6 += -8; \ + r1 = r8; \ + /* if (skb) *target = skb */ \ + if r1 == 0 goto l0_%=; \ + *(u64*)(r6 + 0) = r1; \ +l0_%=: /* else *target = sock */ \ + if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r2; \ +l1_%=: /* struct bpf_sock *sk = *target; */ \ + r1 = *(u64*)(r6 + 0); \ + /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\ + if r1 == 0 goto l2_%=; \ + r3 = 42; \ + *(u32*)(r1 + %[bpf_sock_mark]) = r3; \ + call %[bpf_sk_release]; \ +l2_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_sk_lookup_tcp), + __imm(bpf_sk_release), + __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)), + __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write pointer into map elem value") +__success __failure_unpriv __msg_unpriv("R0 leaks addr") +__retval(0) +__naked void pointer_into_map_elem_value(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_hash_8b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + *(u64*)(r0 + 0) = r0; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("alu32: mov u32 const") +__success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'") +__retval(0) +__naked void alu32_mov_u32_const(void) +{ + asm volatile (" \ + w7 = 0; \ + w7 &= 1; \ + w0 = w7; \ + if r0 == 0 goto l0_%=; \ + r0 = *(u64*)(r7 + 0); \ +l0_%=: exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: partial copy of pointer") +__success __failure_unpriv __msg_unpriv("R10 partial copy") +__retval(0) +__naked void unpriv_partial_copy_of_pointer(void) +{ + asm volatile (" \ + w1 = w10; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: pass pointer to tail_call") +__success __failure_unpriv __msg_unpriv("R3 leaks addr into helper") +__retval(0) +__naked void pass_pointer_to_tail_call(void) +{ + asm volatile (" \ + r3 = r1; \ + r2 = %[map_prog1_socket] ll; \ + call %[bpf_tail_call]; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_tail_call), + __imm_addr(map_prog1_socket) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp map pointer with zero") +__success __failure_unpriv __msg_unpriv("R1 pointer comparison") +__retval(0) +__naked void cmp_map_pointer_with_zero(void) +{ + asm volatile (" \ + r1 = 0; \ + r1 = %[map_hash_8b] ll; \ + if r1 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_addr(map_hash_8b) + : __clobber_all); +} + +SEC("socket") +__description("unpriv: write into frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_write_into_frame_pointer(void) +{ + asm volatile (" \ + r10 = r1; \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: spill/fill frame pointer") +__failure __msg("frame pointer is read only") +__failure_unpriv +__naked void unpriv_spill_fill_frame_pointer(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + *(u64*)(r6 + 0) = r10; \ + r10 = *(u64*)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of frame pointer") +__success __failure_unpriv __msg_unpriv("R10 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_frame_pointer(void) +{ + asm volatile (" \ + if r10 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, reg") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + r1 += r10; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: adding of fp, imm") +__success __failure_unpriv +__msg_unpriv("R1 stack pointer arithmetic goes out of range") +__retval(0) +__naked void unpriv_adding_of_fp_imm(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = r10; \ + r1 += 0; \ + *(u64*)(r1 - 8) = r0; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unpriv: cmp of stack pointer") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison") +__retval(0) +__naked void unpriv_cmp_of_stack_pointer(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + if r2 == 0 goto l0_%=; \ +l0_%=: r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c new file mode 100644 index 000000000000..4d77407a0a79 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */ + +#include "vmlinux.h" +#include +#include "bpf_misc.h" + +SEC("perf_event") +__description("unpriv: spill/fill of different pointers ldx") +__failure __msg("same insn cannot be used with different pointers") +__naked void fill_of_different_pointers_ldx(void) +{ + asm volatile (" \ + r6 = r10; \ + r6 += -8; \ + if r1 == 0 goto l0_%=; \ + r2 = r10; \ + r2 += %[__imm_0]; \ + *(u64*)(r6 + 0) = r2; \ +l0_%=: if r1 != 0 goto l1_%=; \ + *(u64*)(r6 + 0) = r1; \ +l1_%=: r1 = *(u64*)(r6 + 0); \ + r1 = *(u64*)(r1 + %[sample_period]); \ + r0 = 0; \ + exit; \ +" : + : __imm_const(__imm_0, + -(__s32) offsetof(struct bpf_perf_event_data, sample_period) - 8), + __imm_const(sample_period, + offsetof(struct bpf_perf_event_data, sample_period)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c deleted file mode 100644 index af0c0f336625..000000000000 --- a/tools/testing/selftests/bpf/verifier/unpriv.c +++ /dev/null @@ -1,562 +0,0 @@ -{ - "unpriv: return pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 leaks addr", - .retval = POINTER_VALUE, -}, -{ - "unpriv: add const to pointer", - .insns = { - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "unpriv: add pointer to pointer", - .insns = { - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 pointer += pointer", -}, -{ - "unpriv: neg pointer", - .insns = { - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer arithmetic", -}, -{ - "unpriv: cmp pointer with const", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer comparison", -}, -{ - "unpriv: cmp pointer with pointer", - .insns = { - BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R10 pointer comparison", -}, -{ - "unpriv: check that printk is disallowed", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), - BPF_MOV64_IMM(BPF_REG_2, 8), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_trace_printk), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "unknown func bpf_trace_printk#6", - .result_unpriv = REJECT, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_TRACEPOINT, -}, -{ - "unpriv: pass pointer to helper function", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_2), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_update_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R4 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: indirectly pass pointer on stack to helper function", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "invalid indirect read from stack R2 off -8+0 size 8", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: mangle pointer on stack 1", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: mangle pointer on stack 2", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "attempt to corrupt spilled", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: read pointer from stack in small chunks", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "invalid size", - .result = REJECT, -}, -{ - "unpriv: write pointer into ctx", - .insns = { - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 leaks addr", - .result_unpriv = REJECT, - .errstr = "invalid bpf_context access", - .result = REJECT, -}, -{ - "unpriv: spill/fill of ctx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, -}, -{ - "unpriv: spill/fill of ctx 2", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of ctx 3", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=fp expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of ctx 4", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_RAW_INSN(BPF_STX | BPF_ATOMIC | BPF_DW, - BPF_REG_10, BPF_REG_0, -8, BPF_ADD), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_hash_recalc), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "R1 type=scalar expected=ctx", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx", - .insns = { - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - /* Same as above, but use BPF_ST_MEM to save 42 - * instead of BPF_STX_MEM. - */ - "unpriv: spill/fill of different pointers st", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - ctx and sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - /* if (sk) bpf_sk_release(sk) */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "type=ctx expected=sock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - leak sock", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb == NULL) *target = sock; */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* else *target = skb; */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* struct __sk_buff *skb = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* skb->mark = 42; */ - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct __sk_buff, mark)), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "Unreleased reference", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - sock and ctx (read)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), - BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers stx - sock and ctx (write)", - .insns = { - BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), - /* struct bpf_sock *sock = bpf_sock_lookup(...); */ - BPF_SK_LOOKUP(sk_lookup_tcp), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - /* u64 foo; */ - /* void *target = &foo; */ - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), - /* if (skb) *target = skb */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - /* else *target = sock */ - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - /* struct bpf_sock *sk = *target; */ - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - /* if (sk) sk->mark = 42; bpf_sk_release(sk); */ - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_3, 42), - BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3, - offsetof(struct bpf_sock, mark)), - BPF_EMIT_CALL(BPF_FUNC_sk_release), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - //.errstr = "same insn cannot be used with different pointers", - .errstr = "cannot write into sock", - .prog_type = BPF_PROG_TYPE_SCHED_CLS, -}, -{ - "unpriv: spill/fill of different pointers ldx", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, - -(__s32)offsetof(struct bpf_perf_event_data, - sample_period) - 8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, - offsetof(struct bpf_perf_event_data, sample_period)), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .result = REJECT, - .errstr = "same insn cannot be used with different pointers", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "unpriv: write pointer into map elem value", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 3 }, - .errstr_unpriv = "R0 leaks addr", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "alu32: mov u32 const", - .insns = { - BPF_MOV32_IMM(BPF_REG_7, 0), - BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1), - BPF_MOV32_REG(BPF_REG_0, BPF_REG_7), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R7 invalid mem access 'scalar'", - .result_unpriv = REJECT, - .result = ACCEPT, - .retval = 0, -}, -{ - "unpriv: partial copy of pointer", - .insns = { - BPF_MOV32_REG(BPF_REG_1, BPF_REG_10), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 partial copy", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: pass pointer to tail_call", - .insns = { - BPF_MOV64_REG(BPF_REG_3, BPF_REG_1), - BPF_LD_MAP_FD(BPF_REG_2, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_prog1 = { 1 }, - .errstr_unpriv = "R3 leaks addr into helper", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: cmp map pointer with zero", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_8b = { 1 }, - .errstr_unpriv = "R1 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: write into frame pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_10, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, -}, -{ - "unpriv: spill/fill frame pointer", - .insns = { - BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr = "frame pointer is read only", - .result = REJECT, -}, -{ - "unpriv: cmp of frame pointer", - .insns = { - BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R10 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: adding of fp, reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: adding of fp, imm", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), - BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -{ - "unpriv: cmp of stack pointer", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .errstr_unpriv = "R2 pointer comparison", - .result_unpriv = REJECT, - .result = ACCEPT, -}, -- cgit v1.2.3-70-g09d2 From efe25a330b106f39e5d27760cc57def3e8c5a3f9 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:33 +0300 Subject: selftests/bpf: verifier/value_illegal_alu converted to inline assembly Test verifier/value_illegal_alu automatically converted to use inline assembly. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-24-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_value_illegal_alu.c | 149 +++++++++++++++++++++ .../selftests/bpf/verifier/value_illegal_alu.c | 95 ------------- 3 files changed, 151 insertions(+), 95 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c delete mode 100644 tools/testing/selftests/bpf/verifier/value_illegal_alu.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index a2754ea49b19..8695d39f84dd 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -60,6 +60,7 @@ #include "verifier_unpriv_perf.skel.h" #include "verifier_value_adj_spill.skel.h" #include "verifier_value.skel.h" +#include "verifier_value_illegal_alu.skel.h" #include "verifier_value_or_null.skel.h" #include "verifier_var_off.skel.h" #include "verifier_xadd.skel.h" @@ -156,6 +157,7 @@ void test_verifier_unpriv(void) { RUN(verifier_unpriv); } void test_verifier_unpriv_perf(void) { RUN(verifier_unpriv_perf); } void test_verifier_value_adj_spill(void) { RUN(verifier_value_adj_spill); } void test_verifier_value(void) { RUN(verifier_value); } +void test_verifier_value_illegal_alu(void) { RUN(verifier_value_illegal_alu); } void test_verifier_value_or_null(void) { RUN(verifier_value_or_null); } void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } diff --git a/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c new file mode 100644 index 000000000000..71814a753216 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_illegal_alu.c */ + +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map element value illegal alu op, 1") +__failure __msg("R0 bitwise operator &= on pointer") +__failure_unpriv +__naked void value_illegal_alu_op_1(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 &= 8; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 2") +__failure __msg("R0 32-bit pointer arithmetic prohibited") +__failure_unpriv +__naked void value_illegal_alu_op_2(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w0 += 0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 3") +__failure __msg("R0 pointer arithmetic with /= operator") +__failure_unpriv +__naked void value_illegal_alu_op_3(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 /= 42; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 4") +__failure __msg("invalid mem access 'scalar'") +__failure_unpriv __msg_unpriv("R0 pointer arithmetic prohibited") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_4(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 = be64 r0; \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +SEC("socket") +__description("map element value illegal alu op, 5") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("leaking pointer from stack off -8") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void value_illegal_alu_op_5(void) +{ + asm volatile (" \ + r2 = r10; \ + r2 += -8; \ + r1 = 0; \ + *(u64*)(r2 + 0) = r1; \ + r1 = %[map_hash_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r3 = 4096; \ + r2 = r10; \ + r2 += -8; \ + *(u64*)(r2 + 0) = r0; \ + lock *(u64 *)(r2 + 0) += r3; \ + r0 = *(u64*)(r2 + 0); \ + r1 = 22; \ + *(u64*)(r0 + 0) = r1; \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_hash_48b) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c deleted file mode 100644 index d6f29eb4bd57..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c +++ /dev/null @@ -1,95 +0,0 @@ -{ - "map element value illegal alu op, 1", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 bitwise operator &= on pointer", - .result = REJECT, -}, -{ - "map element value illegal alu op, 2", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 32-bit pointer arithmetic prohibited", - .result = REJECT, -}, -{ - "map element value illegal alu op, 3", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr = "R0 pointer arithmetic with /= operator", - .result = REJECT, -}, -{ - "map element value illegal alu op, 4", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "invalid mem access 'scalar'", - .result = REJECT, - .result_unpriv = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map element value illegal alu op, 5", - .insns = { - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_3, 4096), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), - BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_2, BPF_REG_3, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 3 }, - .errstr_unpriv = "leaking pointer from stack off -8", - .errstr = "R0 invalid mem access 'scalar'", - .result = REJECT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -- cgit v1.2.3-70-g09d2 From 4db10a8243df665ced10b027c2d4862173a7a3ec Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 20:42:34 +0300 Subject: selftests/bpf: verifier/value_ptr_arith converted to inline assembly Test verifier/value_ptr_arith automatically converted to use inline assembly. Test cases "sanitation: alu with different scalars 2" and "sanitation: alu with different scalars 3" are updated to avoid -ENOENT as return value, as __retval() annotation only supports numeric literals. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421174234.2391278-25-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 34 +- .../selftests/bpf/progs/verifier_value_ptr_arith.c | 1423 ++++++++++++++++++++ .../selftests/bpf/verifier/value_ptr_arith.c | 1140 ---------------- 3 files changed, 1451 insertions(+), 1146 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c delete mode 100644 tools/testing/selftests/bpf/verifier/value_ptr_arith.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 8695d39f84dd..c8bab8b1a6a4 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -62,6 +62,7 @@ #include "verifier_value.skel.h" #include "verifier_value_illegal_alu.skel.h" #include "verifier_value_or_null.skel.h" +#include "verifier_value_ptr_arith.skel.h" #include "verifier_var_off.skel.h" #include "verifier_xadd.skel.h" #include "verifier_xdp.skel.h" @@ -164,29 +165,50 @@ void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } -static int init_array_access_maps(struct bpf_object *obj) +static int init_test_val_map(struct bpf_object *obj, char *map_name) { - struct bpf_map *array_ro; struct test_val value = { .index = (6 + 1) * sizeof(int), .foo[6] = 0xabcdef12, }; + struct bpf_map *map; int err, key = 0; - array_ro = bpf_object__find_map_by_name(obj, "map_array_ro"); - if (!ASSERT_OK_PTR(array_ro, "lookup map_array_ro")) + map = bpf_object__find_map_by_name(obj, map_name); + if (!map) { + PRINT_FAIL("Can't find map '%s'\n", map_name); return -EINVAL; + } - err = bpf_map_update_elem(bpf_map__fd(array_ro), &key, &value, 0); - if (!ASSERT_OK(err, "map_array_ro update")) + err = bpf_map_update_elem(bpf_map__fd(map), &key, &value, 0); + if (err) { + PRINT_FAIL("Error while updating map '%s': %d\n", map_name, err); return err; + } return 0; } +static int init_array_access_maps(struct bpf_object *obj) +{ + return init_test_val_map(obj, "map_array_ro"); +} + void test_verifier_array_access(void) { run_tests_aux("verifier_array_access", verifier_array_access__elf_bytes, init_array_access_maps); } + +static int init_value_ptr_arith_maps(struct bpf_object *obj) +{ + return init_test_val_map(obj, "map_array_48b"); +} + +void test_verifier_value_ptr_arith(void) +{ + run_tests_aux("verifier_value_ptr_arith", + verifier_value_ptr_arith__elf_bytes, + init_value_ptr_arith_maps); +} diff --git a/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c new file mode 100644 index 000000000000..5ba6e53571c8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c @@ -0,0 +1,1423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/value_ptr_arith.c */ + +#include +#include +#include +#include "bpf_misc.h" + +#define MAX_ENTRIES 11 + +struct test_val { + unsigned int index; + int foo[MAX_ENTRIES]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct test_val); +} map_array_48b SEC(".maps"); + +struct other_val { + long long foo; + long long bar; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct other_val); +} map_hash_16b SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, long long); + __type(value, struct test_val); +} map_hash_48b SEC(".maps"); + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs const") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_unknown_vs_const(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs unknown") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void value_ptr_const_vs_unknown(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (ne)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_const_vs_const_ne(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 3; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr const vs const (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_const_vs_const_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 5; \ + goto l4_%=; \ +l3_%=: r1 = 5; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (eq)") +__success __success_unpriv __retval(1) +__naked void ptr_unknown_vs_unknown_eq(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (lt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_lt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr unknown vs unknown (gt)") +__success __failure_unpriv +__msg_unpriv("R1 tried to add from different maps, paths or scalars") +__retval(1) +__naked void ptr_unknown_vs_unknown_gt(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r4 = *(u8*)(r0 + 0); \ + if r4 == 1 goto l3_%=; \ + r1 = 6; \ + r1 = -r1; \ + r1 &= 0x7; \ + goto l4_%=; \ +l3_%=: r1 = 6; \ + r1 = -r1; \ + r1 &= 0x3; \ +l4_%=: r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps") +__success __success_unpriv __retval(1) +__naked void value_ptr_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar from different maps") +__success __failure_unpriv +__msg_unpriv("R0 min value is outside of the allowed memory range") +__retval(1) +__naked void known_scalar_from_different_maps(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_16b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r0 -= r1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_16b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr from different maps, but same value properties") +__success __success_unpriv __retval(1) +__naked void maps_but_same_value_properties(void) +{ + asm volatile (" \ + r0 = *(u32*)(r1 + %[__sk_buff_len]); \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + if r0 == 1 goto l0_%=; \ + r1 = %[map_hash_48b] ll; \ + if r0 != 1 goto l1_%=; \ +l0_%=: r1 = %[map_array_48b] ll; \ +l1_%=: call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l2_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l2_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_addr(map_hash_48b), + __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 1") +__success __failure_unpriv __msg_unpriv("R2 pointer comparison prohibited") +__retval(0) +__naked void value_pointer_and_scalar_1(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l1_%=; \ + /* branch A */ \ + r2 = r0; \ + r3 = 0; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = 0; \ + r3 = 0x100000; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: mixing value pointer and scalar, 2") +__success __failure_unpriv __msg_unpriv("R0 invalid mem access 'scalar'") +__retval(0) +__naked void value_pointer_and_scalar_2(void) +{ + asm volatile (" \ + /* load map value pointer into r0 and r2 */ \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: /* load some number from the map into r1 */ \ + r1 = *(u8*)(r0 + 0); \ + /* depending on r1, branch: */ \ + if r1 == 0 goto l1_%=; \ + /* branch A */ \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: /* branch B */ \ + r2 = r0; \ + r3 = 0; \ +l2_%=: /* common instruction */ \ + r2 += r3; \ + /* depending on r1, branch: */ \ + if r1 != 0 goto l3_%=; \ + /* branch A */ \ + goto l4_%=; \ +l3_%=: /* branch B */ \ + r0 = 0x13371337; \ + /* verifier follows fall-through */ \ + if r2 != 0x100000 goto l4_%=; \ + r0 = 0; \ + exit; \ +l4_%=: /* fake-dead code; targeted from branch A to \ + * prevent dead code sanitization, rejected \ + * via branch B however \ + */ \ + r0 = *(u8*)(r0 + 0); \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 1") +__success __success_unpriv __retval(0x100000) +__naked void alu_with_different_scalars_1(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r2 = r10; \ + r2 += -16; \ + r6 = 0; \ + *(u64*)(r10 - 16) = r6; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r1 = *(u32*)(r0 + 0); \ + if r1 == 0 goto l1_%=; \ + r2 = 0; \ + r3 = 0x100000; \ + goto l2_%=; \ +l1_%=: r2 = 42; \ + r3 = 0x100001; \ +l2_%=: r2 += r3; \ + r0 = r2; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 2") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_2(void) +{ + asm volatile (" \ + r0 = 1; \ + r1 = %[map_array_48b] ll; \ + r6 = r1; \ + r2 = r10; \ + r2 += -16; \ + r7 = 0; \ + *(u64*)(r10 - 16) = r7; \ + call %[bpf_map_delete_elem]; \ + r7 = r0; \ + r1 = r6; \ + r2 = r10; \ + r2 += -16; \ + call %[bpf_map_delete_elem]; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm(bpf_map_delete_elem), + __imm_addr(map_array_48b), + __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("sanitation: alu with different scalars 3") +__success __success_unpriv __retval(0) +__naked void alu_with_different_scalars_3(void) +{ + asm volatile (" \ + r0 = %[einval]; \ + r0 *= -1; \ + r7 = r0; \ + r0 = %[einval]; \ + r0 *= -1; \ + r6 = r0; \ + r8 = r6; \ + r8 += r7; \ + r0 = r8; \ + r0 += %[einval]; \ + r0 += %[einval]; \ + exit; \ +" : + : __imm_const(einval, EINVAL) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 1") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 48; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void upper_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, upper oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void upper_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 1") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__naked void lower_oob_arith_test_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void lower_oob_arith_test_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 48; \ + r0 -= r1; \ + r1 = 1; \ + r0 += r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, lower oob arith, test 3") +__success __success_unpriv __retval(1) +__naked void lower_oob_arith_test_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 47; \ + r0 += r1; \ + r1 = 47; \ + r0 -= r1; \ + r0 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar += value_ptr") +__success __success_unpriv __retval(1) +__naked void access_known_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 2") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 49; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 3") +__failure __msg("invalid access to map value") +__failure_unpriv +__naked void value_ptr_known_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 4") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 5; \ + r0 += r1; \ + r1 = -2; \ + r0 += r1; \ + r1 = -1; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 5") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_5(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (6 + 1) * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += known scalar, 6") +__success __success_unpriv __retval(0xabcdef12) +__naked void value_ptr_known_scalar_6(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = %[__imm_0]; \ + r0 += r1; \ + r1 = %[__imm_1]; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b), + __imm_const(__imm_0, (3 + 1) * sizeof(int)), + __imm_const(__imm_1, 3 * sizeof(int)) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += N, value_ptr -= N known scalar") +__success __success_unpriv __retval(0x12345678) +__naked void value_ptr_n_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + w1 = 0x12345678; \ + *(u32*)(r0 + 0) = r1; \ + r0 += 2; \ + r1 = 2; \ + r0 -= r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 1") +__success __success_unpriv __retval(1) +__naked void unknown_scalar_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 += r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 3") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = -1; \ + r0 += r1; \ + r1 = 1; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar += value_ptr, 4") +__failure __msg("R1 max value is outside of the allowed memory range") +__msg_unpriv("R1 pointer arithmetic of map value goes out of range") +__flag(BPF_F_ANY_ALIGNMENT) +__naked void unknown_scalar_value_ptr_4(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 19; \ + r0 += r1; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r1 += r0; \ + r0 = *(u32*)(r1 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 1") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 2") +__success __success_unpriv __retval(0xabcdef12) __flag(BPF_F_ANY_ALIGNMENT) +__naked void value_ptr_unknown_scalar_2_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u32*)(r0 + 0); \ + r1 &= 31; \ + r0 += r1; \ + r0 = *(u32*)(r0 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += unknown scalar, 3") +__success __success_unpriv __retval(1) +__naked void value_ptr_unknown_scalar_3(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u64*)(r0 + 0); \ + r2 = *(u64*)(r0 + 8); \ + r3 = *(u64*)(r0 + 16); \ + r1 &= 0xf; \ + r3 &= 1; \ + r3 |= 1; \ + if r2 > r3 goto l0_%=; \ + r0 += r3; \ + r0 = *(u8*)(r0 + 0); \ + r0 = 1; \ +l1_%=: exit; \ +l0_%=: r0 = 2; \ + goto l1_%=; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr += value_ptr") +__failure __msg("R0 pointer += pointer prohibited") +__failure_unpriv +__naked void access_value_ptr_value_ptr_1(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 += r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: known scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_known_scalar_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar") +__failure __msg("R0 min value is outside of the allowed memory range") +__failure_unpriv +__naked void access_value_ptr_known_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 4; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= known scalar, 2") +__success __success_unpriv __retval(1) +__naked void value_ptr_known_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = 6; \ + r2 = 4; \ + r0 += r1; \ + r0 -= r2; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: unknown scalar -= value_ptr") +__failure __msg("R1 tried to subtract pointer from scalar") +__failure_unpriv +__naked void access_unknown_scalar_value_ptr(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 -= r0; \ + r0 = *(u8*)(r1 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar") +__failure __msg("R0 min value is negative") +__failure_unpriv +__naked void access_value_ptr_unknown_scalar(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= unknown scalar, 2") +__success __failure_unpriv +__msg_unpriv("R0 pointer arithmetic of map value goes out of range") +__retval(1) +__naked void value_ptr_unknown_scalar_2_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0xf; \ + r1 |= 0x7; \ + r0 += r1; \ + r1 = *(u8*)(r0 + 0); \ + r1 &= 0x7; \ + r0 -= r1; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: value_ptr -= value_ptr") +__failure __msg("R0 invalid mem access 'scalar'") +__msg_unpriv("R0 pointer -= pointer prohibited") +__naked void access_value_ptr_value_ptr_2(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 == 0 goto l0_%=; \ + r0 -= r0; \ + r1 = *(u8*)(r0 + 0); \ +l0_%=: r0 = 1; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("socket") +__description("map access: trying to leak tainted dst reg") +__failure __msg("math between map_value pointer and 4294967295 is not allowed") +__failure_unpriv +__naked void to_leak_tainted_dst_reg(void) +{ + asm volatile (" \ + r0 = 0; \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_array_48b] ll; \ + call %[bpf_map_lookup_elem]; \ + if r0 != 0 goto l0_%=; \ + exit; \ +l0_%=: r2 = r0; \ + w1 = 0xFFFFFFFF; \ + w1 = w1; \ + r2 -= r1; \ + *(u64*)(r0 + 0) = r2; \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_array_48b) + : __clobber_all); +} + +SEC("tc") +__description("32bit pkt_ptr -= scalar") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_pkt_ptr_scalar(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w7; \ + w6 -= w4; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +SEC("tc") +__description("32bit scalar -= pkt_ptr") +__success __retval(0) __flag(BPF_F_ANY_ALIGNMENT) +__naked void _32bit_scalar_pkt_ptr(void) +{ + asm volatile (" \ + r8 = *(u32*)(r1 + %[__sk_buff_data_end]); \ + r7 = *(u32*)(r1 + %[__sk_buff_data]); \ + r6 = r7; \ + r6 += 40; \ + if r6 > r8 goto l0_%=; \ + w4 = w6; \ + w4 -= w7; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)), + __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c deleted file mode 100644 index 249187d3c530..000000000000 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ /dev/null @@ -1,1140 +0,0 @@ -{ - "map access: known scalar += value_ptr unknown vs const", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs unknown", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs const (ne)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr const vs const (eq)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 2), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (eq)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (lt)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr unknown vs unknown (gt)", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 1, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_JMP_IMM(BPF_JA, 0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x3), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 tried to add from different maps, paths or scalars", - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar from different maps", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_16b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 min value is outside of the allowed memory range", - .retval = 1, -}, -{ - "map access: known scalar += value_ptr from different maps, but same value properties", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, - offsetof(struct __sk_buff, len)), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 1, 3), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_hash_48b = { 5 }, - .fixup_map_array_48b = { 8 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: mixing value pointer and scalar, 1", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_JMP_A(2), - // branch B - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R2 pointer comparison prohibited", - .retval = 0, -}, -{ - "map access: mixing value pointer and scalar, 2", - .insns = { - // load map value pointer into r0 and r2 - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - // load some number from the map into r1 - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - // branch A - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - // branch B - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - // common instruction - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - // depending on r1, branch: - BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), - // branch A - BPF_JMP_A(4), - // branch B - BPF_MOV64_IMM(BPF_REG_0, 0x13371337), - // verifier follows fall-through - BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0x100000, 2), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - // fake-dead code; targeted from branch A to - // prevent dead code sanitization, rejected - // via branch B however - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 invalid mem access 'scalar'", - .retval = 0, -}, -{ - "sanitation: alu with different scalars 1", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_ARG1, 0), - BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_3, 0x100000), - BPF_JMP_A(2), - BPF_MOV64_IMM(BPF_REG_2, 42), - BPF_MOV64_IMM(BPF_REG_3, 0x100001), - BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .retval = 0x100000, -}, -{ - "sanitation: alu with different scalars 2", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_ST_MEM(BPF_DW, BPF_REG_FP, -16, 0), - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), - BPF_EMIT_CALL(BPF_FUNC_map_delete_elem), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 1 }, - .result = ACCEPT, - .retval = -EINVAL * 2, -}, -{ - "sanitation: alu with different scalars 3", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, EINVAL), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_0, EINVAL), - BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, -1), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), - BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_7), - BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), - BPF_EXIT_INSN(), - }, - .result = ACCEPT, - .retval = -EINVAL * 2, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, upper oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 48), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr -= known scalar, lower oob arith, test 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 47), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: known scalar += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 49), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", -}, -{ - "map access: value_ptr += known scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "invalid access to map value", -}, -{ - "map access: value_ptr += known scalar, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_MOV64_IMM(BPF_REG_1, 5), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += known scalar, 5", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, (6 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, -}, -{ - "map access: value_ptr += known scalar, 6", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, (3 + 1) * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 3 * sizeof(int)), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, -}, -{ - "map access: value_ptr += N, value_ptr -= N known scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV32_IMM(BPF_REG_1, 0x12345678), - BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0x12345678, -}, -{ - "map access: unknown scalar += value_ptr, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: unknown scalar += value_ptr, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: unknown scalar += value_ptr, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_MOV64_IMM(BPF_REG_1, -1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: unknown scalar += value_ptr, 4", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), - BPF_MOV64_IMM(BPF_REG_1, 19), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 max value is outside of the allowed memory range", - .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: value_ptr += unknown scalar, 1", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 31), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 0xabcdef12, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "map access: value_ptr += unknown scalar, 3", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), - BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 8), - BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 16), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 1), - BPF_ALU64_IMM(BPF_OR, BPF_REG_3, 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_IMM(BPF_REG_0, 2), - BPF_JMP_IMM(BPF_JA, 0, 0, -3), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: value_ptr += value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 pointer += pointer prohibited", -}, -{ - "map access: known scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", -}, -{ - "map access: value_ptr -= known scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), - BPF_MOV64_IMM(BPF_REG_1, 4), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is outside of the allowed memory range", -}, -{ - "map access: value_ptr -= known scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), - BPF_MOV64_IMM(BPF_REG_1, 6), - BPF_MOV64_IMM(BPF_REG_2, 4), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .retval = 1, -}, -{ - "map access: unknown scalar -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R1 tried to subtract pointer from scalar", -}, -{ - "map access: value_ptr -= unknown scalar", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 min value is negative", -}, -{ - "map access: value_ptr -= unknown scalar, 2", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf), - BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", - .retval = 1, -}, -{ - "map access: value_ptr -= value_ptr", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), - BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0), - BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), - BPF_MOV64_IMM(BPF_REG_0, 1), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 3 }, - .result = REJECT, - .errstr = "R0 invalid mem access 'scalar'", - .errstr_unpriv = "R0 pointer -= pointer prohibited", -}, -{ - "map access: trying to leak tainted dst reg", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), - BPF_EXIT_INSN(), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF), - BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), - BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), - BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map_array_48b = { 4 }, - .result = REJECT, - .errstr = "math between map_value pointer and 4294967295 is not allowed", -}, -{ - "32bit pkt_ptr -= scalar", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7), - BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -{ - "32bit scalar -= pkt_ptr", - .insns = { - BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, - offsetof(struct __sk_buff, data_end)), - BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, - offsetof(struct __sk_buff, data)), - BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), - BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), - BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6), - BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, -}, -- cgit v1.2.3-70-g09d2 From 7041101ff6c3073fd8f2e99920f535b111c929cb Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Thu, 20 Apr 2023 16:59:46 +0200 Subject: net/sched: sch_fq: fix integer overflow of "credit" if sch_fq is configured with "initial quantum" having values greater than INT_MAX, the first assignment of "credit" does signed integer overflow to a very negative value. In this situation, the syzkaller script provided by Cristoph triggers the CPU soft-lockup warning even with few sockets. It's not an infinite loop, but "credit" wasn't probably meant to be minus 2Gb for each new flow. Capping "initial quantum" to INT_MAX proved to fix the issue. v2: validation of "initial quantum" is done in fq_policy, instead of open coding in fq_change() _ suggested by Jakub Kicinski Reported-by: Christoph Paasch Link: https://github.com/multipath-tcp/mptcp_net-next/issues/377 Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler") Reviewed-by: Eric Dumazet Signed-off-by: Davide Caratti Link: https://lore.kernel.org/r/7b3a3c7e36d03068707a021760a194a8eb5ad41a.1682002300.git.dcaratti@redhat.com Signed-off-by: Jakub Kicinski --- net/sched/sch_fq.c | 6 +++++- .../selftests/tc-testing/tc-tests/qdiscs/fq.json | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) (limited to 'tools/testing') diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 48d14fb90ba0..f59a2cb2c803 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log) return 0; } +static struct netlink_range_validation iq_range = { + .max = INT_MAX, +}; + static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK }, [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, - [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, + [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range), [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json index 8acb904d1419..3593fb8f79ad 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json @@ -114,6 +114,28 @@ "$IP link del dev $DUMMY type dummy" ] }, + { + "id": "10f7", + "name": "Create FQ with invalid initial_quantum setting", + "category": [ + "qdisc", + "fq" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true" + ], + "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq initial_quantum 0x80000000", + "expExitCode": "2", + "verifyCmd": "$TC qdisc show dev $DUMMY", + "matchPattern": "qdisc fq 1: root.*initial_quantum 2048Mb", + "matchCount": "0", + "teardown": [ + "$IP link del dev $DUMMY type dummy" + ] + }, { "id": "9398", "name": "Create FQ with maxrate setting", -- cgit v1.2.3-70-g09d2 From 35150203e30b52d657165e325e3abc3b29c2086d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Fri, 21 Apr 2023 23:45:14 +0300 Subject: selftests/bpf: verifier/prevent_map_lookup converted to inline assembly Test verifier/prevent_map_lookup automatically converted to use inline assembly. This was a part of a series [1] but could not be applied becuase another patch from a series had to be witheld. [1] https://lore.kernel.org/bpf/20230421174234.2391278-1-eddyz87@gmail.com/ Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20230421204514.2450907-1-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 2 + .../bpf/progs/verifier_prevent_map_lookup.c | 61 ++++++++++++++++++++++ .../selftests/bpf/verifier/prevent_map_lookup.c | 29 ---------- 3 files changed, 63 insertions(+), 29 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c delete mode 100644 tools/testing/selftests/bpf/verifier/prevent_map_lookup.c (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index c8bab8b1a6a4..2497716ee379 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -42,6 +42,7 @@ #include "verifier_meta_access.skel.h" #include "verifier_netfilter_ctx.skel.h" #include "verifier_netfilter_retcode.skel.h" +#include "verifier_prevent_map_lookup.skel.h" #include "verifier_raw_stack.skel.h" #include "verifier_raw_tp_writable.skel.h" #include "verifier_reg_equal.skel.h" @@ -140,6 +141,7 @@ void test_verifier_masking(void) { RUN(verifier_masking); } void test_verifier_meta_access(void) { RUN(verifier_meta_access); } void test_verifier_netfilter_ctx(void) { RUN(verifier_netfilter_ctx); } void test_verifier_netfilter_retcode(void) { RUN(verifier_netfilter_retcode); } +void test_verifier_prevent_map_lookup(void) { RUN(verifier_prevent_map_lookup); } void test_verifier_raw_stack(void) { RUN(verifier_raw_stack); } void test_verifier_raw_tp_writable(void) { RUN(verifier_raw_tp_writable); } void test_verifier_reg_equal(void) { RUN(verifier_reg_equal); } diff --git a/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c new file mode 100644 index 000000000000..8d27c780996f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Converted from tools/testing/selftests/bpf/verifier/prevent_map_lookup.c */ + +#include +#include +#include "bpf_misc.h" + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} map_stacktrace SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 8); + __uint(key_size, sizeof(int)); + __array(values, void (void)); +} map_prog2_socket SEC(".maps"); + +SEC("perf_event") +__description("prevent map lookup in stack trace") +__failure __msg("cannot pass map_type 7 into func bpf_map_lookup_elem") +__naked void map_lookup_in_stack_trace(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_stacktrace] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_stacktrace) + : __clobber_all); +} + +SEC("socket") +__description("prevent map lookup in prog array") +__failure __msg("cannot pass map_type 3 into func bpf_map_lookup_elem") +__failure_unpriv +__naked void map_lookup_in_prog_array(void) +{ + asm volatile (" \ + r1 = 0; \ + *(u64*)(r10 - 8) = r1; \ + r2 = r10; \ + r2 += -8; \ + r1 = %[map_prog2_socket] ll; \ + call %[bpf_map_lookup_elem]; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_prog2_socket) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c deleted file mode 100644 index fc4e301260f6..000000000000 --- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c +++ /dev/null @@ -1,29 +0,0 @@ -{ - "prevent map lookup in stack trace", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_map_stacktrace = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 7 into func bpf_map_lookup_elem", - .prog_type = BPF_PROG_TYPE_PERF_EVENT, -}, -{ - "prevent map lookup in prog array", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), - BPF_EXIT_INSN(), - }, - .fixup_prog2 = { 3 }, - .result = REJECT, - .errstr = "cannot pass map_type 3 into func bpf_map_lookup_elem", -}, -- cgit v1.2.3-70-g09d2 From 7eb060a51a3ba44acefafefd242dcbf12dd91fb9 Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Sat, 22 Apr 2023 12:56:12 -0300 Subject: selftests: tc-testing: add more tests for sch_qfq The QFQ qdisc class has parameter bounds that are not being checked for correctness. Acked-by: Jamal Hadi Salim Signed-off-by: Pedro Tammela Signed-off-by: David S. Miller --- .../selftests/tc-testing/tc-tests/qdiscs/qfq.json | 72 ++++++++++++++++++++++ 1 file changed, 72 insertions(+) (limited to 'tools/testing') diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json index 330f1a25e0ab..147899a868d3 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json @@ -46,6 +46,30 @@ "$IP link del dev $DUMMY type dummy" ] }, + { + "id": "d364", + "name": "Test QFQ with max class weight setting", + "category": [ + "qdisc", + "qfq" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY handle 1: root qfq" + ], + "cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq weight 9999", + "expExitCode": "2", + "verifyCmd": "$TC class show dev $DUMMY", + "matchPattern": "class qfq 1:1 root weight 9999 maxpkt", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1: root", + "$IP link del dev $DUMMY type dummy" + ] + }, { "id": "8452", "name": "Create QFQ with class maxpkt setting", @@ -70,6 +94,54 @@ "$IP link del dev $DUMMY type dummy" ] }, + { + "id": "22df", + "name": "Test QFQ class maxpkt setting lower bound", + "category": [ + "qdisc", + "qfq" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY handle 1: root qfq" + ], + "cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 128", + "expExitCode": "2", + "verifyCmd": "$TC class show dev $DUMMY", + "matchPattern": "class qfq 1:1 root weight 1 maxpkt 128", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1: root", + "$IP link del dev $DUMMY type dummy" + ] + }, + { + "id": "92ee", + "name": "Test QFQ class maxpkt setting upper bound", + "category": [ + "qdisc", + "qfq" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$IP link add dev $DUMMY type dummy || /bin/true", + "$TC qdisc add dev $DUMMY handle 1: root qfq" + ], + "cmdUnderTest": "$TC class add dev $DUMMY parent 1: classid 1:1 qfq maxpkt 99999", + "expExitCode": "2", + "verifyCmd": "$TC class show dev $DUMMY", + "matchPattern": "class qfq 1:1 root weight 1 maxpkt 99999", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY handle 1: root", + "$IP link del dev $DUMMY type dummy" + ] + }, { "id": "d920", "name": "Create QFQ with multiple class setting", -- cgit v1.2.3-70-g09d2 From 7deca5eae83389ca40ac1b1bde96e4af17cca84f Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 24 Apr 2023 13:43:21 -0700 Subject: bpf: Disable bpf_refcount_acquire kfunc calls until race conditions are fixed As reported by Kumar in [0], the shared ownership implementation for BPF programs has some race conditions which need to be addressed before it can safely be used. This patch does so in a minimal way instead of ripping out shared ownership entirely, as proper fixes for the issues raised will follow ASAP, at which point this patch's commit can be reverted to re-enable shared ownership. The patch removes the ability to call bpf_refcount_acquire_impl from BPF programs. Programs can only bump refcount and obtain a new owning reference using this kfunc, so removing the ability to call it effectively disables shared ownership. Instead of changing success / failure expectations for bpf_refcount-related selftests, this patch just disables them from running for now. [0]: https://lore.kernel.org/bpf/d7hyspcow5wtjcmw4fugdgyp3fwhljwuscp3xyut5qnwivyeru@ysdq543otzv2/ Reported-by: Kumar Kartikeya Dwivedi Signed-off-by: Dave Marchevsky Link: https://lore.kernel.org/r/20230424204321.2680232-1-davemarchevsky@fb.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 5 ++++- tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c | 2 -- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'tools/testing') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0d73139ee4d8..5c4aa393f65a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10509,7 +10509,10 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ verbose(env, "arg#%d doesn't point to a type with bpf_refcount field\n", i); return -EINVAL; } - + if (rec->refcount_off >= 0) { + verbose(env, "bpf_refcount_acquire calls are disabled for now\n"); + return -EINVAL; + } meta->arg_refcount_acquire.btf = reg->btf; meta->arg_refcount_acquire.btf_id = reg->btf_id; break; diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index 2ab23832062d..595cbf92bff5 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -9,10 +9,8 @@ void test_refcounted_kptr(void) { - RUN_TESTS(refcounted_kptr); } void test_refcounted_kptr_fail(void) { - RUN_TESTS(refcounted_kptr_fail); } -- cgit v1.2.3-70-g09d2 From be7dbd275dc6b911a5b9a22c4f9cb71b2c7fd847 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 24 Apr 2023 16:51:28 -0700 Subject: selftests/bpf: avoid mark_all_scalars_precise() trigger in one of iter tests iter_pass_iter_ptr_to_subprog subtest is relying on actual array size being passed as subprog parameter. This combined with recent fixes to precision tracking in conditional jumps ([0]) is now causing verifier to backtrack all the way to the point where sum() and fill() subprogs are called, at which point precision backtrack bails out and forces all the states to have precise SCALAR registers. This in turn causes each possible value of i within fill() and sum() subprogs to cause a different non-equivalent state, preventing iterator code to converge. For now, change the test to assume fixed size of passed in array. Once BPF verifier supports precision tracking across subprogram calls, these changes will be reverted as unnecessary. [0] 71b547f56124 ("bpf: Fix incorrect verifier pruning due to missing register precision taints") Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20230424235128.1941726-1-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/iters.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'tools/testing') diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 6b9b3c56f009..be16143ae292 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -651,25 +651,29 @@ int iter_stack_array_loop(const void *ctx) return sum; } -static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) +#define ARR_SZ 16 + +static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) { - int *t, i; + int *t; + __u64 i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= n) + if (i >= ARR_SZ) break; arr[i] = i * mul; } } -static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) +static __noinline int sum(struct bpf_iter_num *it, int *arr) { - int *t, i, sum = 0;; + int *t, sum = 0;; + __u64 i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= n) + if (i >= ARR_SZ) break; sum += arr[i]; } @@ -681,7 +685,7 @@ SEC("raw_tp") __success int iter_pass_iter_ptr_to_subprog(const void *ctx) { - int arr1[16], arr2[32]; + int arr1[ARR_SZ], arr2[ARR_SZ]; struct bpf_iter_num it; int n, sum1, sum2; @@ -690,25 +694,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) /* fill arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - fill(&it, arr1, n, 2); + fill(&it, arr1, 2); bpf_iter_num_destroy(&it); /* fill arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - fill(&it, arr2, n, 10); + fill(&it, arr2, 10); bpf_iter_num_destroy(&it); /* sum arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - sum1 = sum(&it, arr1, n); + sum1 = sum(&it, arr1); bpf_iter_num_destroy(&it); /* sum arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - sum2 = sum(&it, arr2, n); + sum2 = sum(&it, arr2); bpf_iter_num_destroy(&it); bpf_printk("sum1=%d, sum2=%d", sum1, sum2); -- cgit v1.2.3-70-g09d2