From 8618f5ffba4d381610f6bb4c472a6148c2bfde96 Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Mon, 25 Nov 2024 20:53:07 +0100 Subject: bpf, lsm: Remove getlsmprop hooks BTF IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These hooks are not useful for BPF LSM currently. Furthermore a recent renaming introduced build warnings: BTFIDS vmlinux WARN: resolve_btfids: unresolved symbol bpf_lsm_task_getsecid_obj WARN: resolve_btfids: unresolved symbol bpf_lsm_current_getsecid_subj Link: https://lore.kernel.org/lkml/20241123-bpf_lsm_task_getsecid_obj-v1-1-0d0f94649e05@weissschuh.net/ Fixes: 37f670aacd48 ("lsm: use lsm_prop in security_current_getsecid") Signed-off-by: Thomas Weißschuh Link: https://lore.kernel.org/r/20241125-bpf_lsm_task_getsecid_obj-v2-1-c8395bde84e0@weissschuh.net Signed-off-by: Alexei Starovoitov --- kernel/bpf/bpf_lsm.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 3bc61628ab25..967492b65185 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -375,8 +375,6 @@ BTF_ID(func, bpf_lsm_socket_socketpair) BTF_ID(func, bpf_lsm_syslog) BTF_ID(func, bpf_lsm_task_alloc) -BTF_ID(func, bpf_lsm_current_getsecid_subj) -BTF_ID(func, bpf_lsm_task_getsecid_obj) BTF_ID(func, bpf_lsm_task_prctl) BTF_ID(func, bpf_lsm_task_setscheduler) BTF_ID(func, bpf_lsm_task_to_inode) -- cgit v1.2.3-70-g09d2 From 9f0fc98145218ff8f50d8cfa3b393785056c53e1 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 18 Nov 2024 22:03:41 +0100 Subject: bpf, vsock: Fix poll() missing a queue When a verdict program simply passes a packet without redirection, sk_msg is enqueued on sk_psock::ingress_msg. Add a missing check to poll(). Fixes: 634f1a7110b4 ("vsock: support sockmap") Signed-off-by: Michal Luczaj Reviewed-by: Stefano Garzarella Reviewed-by: Luigi Leonardi Link: https://lore.kernel.org/r/20241118-vsock-bpf-poll-close-v1-1-f1b9669cacdc@rbox.co Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend --- net/vmw_vsock/af_vsock.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 25b28b1434f5..725da7203f2d 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1054,6 +1054,9 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock, mask |= EPOLLRDHUP; } + if (sk_is_readable(sk)) + mask |= EPOLLIN | EPOLLRDNORM; + if (sock->type == SOCK_DGRAM) { /* For datagram sockets we can read if there is something in * the queue and write as long as the socket isn't shutdown for -- cgit v1.2.3-70-g09d2 From 9c2a2a45136de428b73907195a4a99eb78dc3aca Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 18 Nov 2024 22:03:42 +0100 Subject: selftest/bpf: Add test for af_vsock poll() Verify that vsock's poll() notices when sk_psock::ingress_msg isn't empty. Signed-off-by: Michal Luczaj Acked-by: Stefano Garzarella Link: https://lore.kernel.org/r/20241118-vsock-bpf-poll-close-v1-2-f1b9669cacdc@rbox.co Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend --- .../selftests/bpf/prog_tests/sockmap_basic.c | 46 ++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index a2041f8e32eb..6a9aab2d051a 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -937,6 +937,50 @@ out: test_sockmap_pass_prog__destroy(skel); } +static void test_sockmap_skb_verdict_vsock_poll(void) +{ + struct test_sockmap_pass_prog *skel; + int err, map, conn, peer; + struct bpf_program *prog; + struct bpf_link *link; + char buf = 'x'; + int zero = 0; + + skel = test_sockmap_pass_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + if (create_pair(AF_VSOCK, SOCK_STREAM, &conn, &peer)) + goto destroy; + + prog = skel->progs.prog_skb_verdict; + map = bpf_map__fd(skel->maps.sock_map_rx); + link = bpf_program__attach_sockmap(prog, map); + if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap")) + goto close; + + err = bpf_map_update_elem(map, &zero, &conn, BPF_ANY); + if (!ASSERT_OK(err, "bpf_map_update_elem")) + goto detach; + + if (xsend(peer, &buf, 1, 0) != 1) + goto detach; + + err = poll_read(conn, IO_TIMEOUT_SEC); + if (!ASSERT_OK(err, "poll")) + goto detach; + + if (xrecv_nonblock(conn, &buf, 1, 0) != 1) + FAIL("xrecv_nonblock"); +detach: + bpf_link__detach(link); +close: + xclose(conn); + xclose(peer); +destroy: + test_sockmap_pass_prog__destroy(skel); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -997,4 +1041,6 @@ void test_sockmap_basic(void) test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP); if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link")) test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap skb_verdict vsock poll")) + test_sockmap_skb_verdict_vsock_poll(); } -- cgit v1.2.3-70-g09d2 From 135ffc7becc82cfb84936ae133da7969220b43b2 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 18 Nov 2024 22:03:43 +0100 Subject: bpf, vsock: Invoke proto::close on close() vsock defines a BPF callback to be invoked when close() is called. However, this callback is never actually executed. As a result, a closed vsock socket is not automatically removed from the sockmap/sockhash. Introduce a dummy vsock_close() and make vsock_release() call proto::close. Note: changes in __vsock_release() look messy, but it's only due to indent level reduction and variables xmas tree reorder. Fixes: 634f1a7110b4 ("vsock: support sockmap") Signed-off-by: Michal Luczaj Reviewed-by: Stefano Garzarella Reviewed-by: Luigi Leonardi Link: https://lore.kernel.org/r/20241118-vsock-bpf-poll-close-v1-3-f1b9669cacdc@rbox.co Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend --- net/vmw_vsock/af_vsock.c | 67 +++++++++++++++++++++++++++++------------------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 725da7203f2d..5cf8109f672a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -117,12 +117,14 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +static void vsock_close(struct sock *sk, long timeout); /* Protocol family. */ struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), + .close = vsock_close, #ifdef CONFIG_BPF_SYSCALL .psock_update_sk_prot = vsock_bpf_update_proto, #endif @@ -797,39 +799,37 @@ static bool sock_type_connectible(u16 type) static void __vsock_release(struct sock *sk, int level) { - if (sk) { - struct sock *pending; - struct vsock_sock *vsk; - - vsk = vsock_sk(sk); - pending = NULL; /* Compiler warning. */ + struct vsock_sock *vsk; + struct sock *pending; - /* When "level" is SINGLE_DEPTH_NESTING, use the nested - * version to avoid the warning "possible recursive locking - * detected". When "level" is 0, lock_sock_nested(sk, level) - * is the same as lock_sock(sk). - */ - lock_sock_nested(sk, level); + vsk = vsock_sk(sk); + pending = NULL; /* Compiler warning. */ - if (vsk->transport) - vsk->transport->release(vsk); - else if (sock_type_connectible(sk->sk_type)) - vsock_remove_sock(vsk); + /* When "level" is SINGLE_DEPTH_NESTING, use the nested + * version to avoid the warning "possible recursive locking + * detected". When "level" is 0, lock_sock_nested(sk, level) + * is the same as lock_sock(sk). + */ + lock_sock_nested(sk, level); - sock_orphan(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + if (vsk->transport) + vsk->transport->release(vsk); + else if (sock_type_connectible(sk->sk_type)) + vsock_remove_sock(vsk); - skb_queue_purge(&sk->sk_receive_queue); + sock_orphan(sk); + sk->sk_shutdown = SHUTDOWN_MASK; - /* Clean up any sockets that never were accepted. */ - while ((pending = vsock_dequeue_accept(sk)) != NULL) { - __vsock_release(pending, SINGLE_DEPTH_NESTING); - sock_put(pending); - } + skb_queue_purge(&sk->sk_receive_queue); - release_sock(sk); - sock_put(sk); + /* Clean up any sockets that never were accepted. */ + while ((pending = vsock_dequeue_accept(sk)) != NULL) { + __vsock_release(pending, SINGLE_DEPTH_NESTING); + sock_put(pending); } + + release_sock(sk); + sock_put(sk); } static void vsock_sk_destruct(struct sock *sk) @@ -901,9 +901,22 @@ void vsock_data_ready(struct sock *sk) } EXPORT_SYMBOL_GPL(vsock_data_ready); +/* Dummy callback required by sockmap. + * See unconditional call of saved_close() in sock_map_close(). + */ +static void vsock_close(struct sock *sk, long timeout) +{ +} + static int vsock_release(struct socket *sock) { - __vsock_release(sock->sk, 0); + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + sk->sk_prot->close(sk, 0); + __vsock_release(sk, 0); sock->sk = NULL; sock->state = SS_FREE; -- cgit v1.2.3-70-g09d2 From 515745445e92500e8916ffe29cc4893ad97517b8 Mon Sep 17 00:00:00 2001 From: Michal Luczaj Date: Mon, 18 Nov 2024 22:03:44 +0100 Subject: selftest/bpf: Add test for vsock removal from sockmap on close() Make sure the proto::close callback gets invoked on vsock release. Signed-off-by: Michal Luczaj Acked-by: Stefano Garzarella Link: https://lore.kernel.org/r/20241118-vsock-bpf-poll-close-v1-4-f1b9669cacdc@rbox.co Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend --- .../selftests/bpf/prog_tests/sockmap_basic.c | 31 ++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 6a9aab2d051a..fdff0652d7ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -108,6 +108,35 @@ out: close(s); } +static void test_sockmap_vsock_delete_on_close(void) +{ + int err, c, p, map; + const int zero = 0; + + err = create_pair(AF_VSOCK, SOCK_STREAM, &c, &p); + if (!ASSERT_OK(err, "create_pair(AF_VSOCK)")) + return; + + map = bpf_map_create(BPF_MAP_TYPE_SOCKMAP, NULL, sizeof(int), + sizeof(int), 1, NULL); + if (!ASSERT_GE(map, 0, "bpf_map_create")) { + close(c); + goto out; + } + + err = bpf_map_update_elem(map, &zero, &c, BPF_NOEXIST); + close(c); + if (!ASSERT_OK(err, "bpf_map_update")) + goto out; + + err = bpf_map_update_elem(map, &zero, &p, BPF_NOEXIST); + ASSERT_OK(err, "after close(), bpf_map_update"); + +out: + close(p); + close(map); +} + static void test_skmsg_helpers(enum bpf_map_type map_type) { struct test_skmsg_load_helpers *skel; @@ -987,6 +1016,8 @@ void test_sockmap_basic(void) test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP); if (test__start_subtest("sockhash create_update_free")) test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH); + if (test__start_subtest("sockmap vsock delete on close")) + test_sockmap_vsock_delete_on_close(); if (test__start_subtest("sockmap sk_msg load helpers")) test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP); if (test__start_subtest("sockhash sk_msg load helpers")) -- cgit v1.2.3-70-g09d2 From 32cd3db7de97c0c7a018756ce66244342fd583f0 Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Fri, 22 Nov 2024 13:10:29 +0100 Subject: xsk: fix OOB map writes when deleting elements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Jordy says: " In the xsk_map_delete_elem function an unsigned integer (map->max_entries) is compared with a user-controlled signed integer (k). Due to implicit type conversion, a large unsigned value for map->max_entries can bypass the intended bounds check: if (k >= map->max_entries) return -EINVAL; This allows k to hold a negative value (between -2147483648 and -2), which is then used as an array index in m->xsk_map[k], which results in an out-of-bounds access. spin_lock_bh(&m->lock); map_entry = &m->xsk_map[k]; // Out-of-bounds map_entry old_xs = unrcu_pointer(xchg(map_entry, NULL)); // Oob write if (old_xs) xsk_map_sock_delete(old_xs, map_entry); spin_unlock_bh(&m->lock); The xchg operation can then be used to cause an out-of-bounds write. Moreover, the invalid map_entry passed to xsk_map_sock_delete can lead to further memory corruption. " It indeed results in following splat: [76612.897343] BUG: unable to handle page fault for address: ffffc8fc2e461108 [76612.904330] #PF: supervisor write access in kernel mode [76612.909639] #PF: error_code(0x0002) - not-present page [76612.914855] PGD 0 P4D 0 [76612.917431] Oops: Oops: 0002 [#1] PREEMPT SMP [76612.921859] CPU: 11 UID: 0 PID: 10318 Comm: a.out Not tainted 6.12.0-rc1+ #470 [76612.929189] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019 [76612.939781] RIP: 0010:xsk_map_delete_elem+0x2d/0x60 [76612.944738] Code: 00 00 41 54 55 53 48 63 2e 3b 6f 24 73 38 4c 8d a7 f8 00 00 00 48 89 fb 4c 89 e7 e8 2d bf 05 00 48 8d b4 eb 00 01 00 00 31 ff <48> 87 3e 48 85 ff 74 05 e8 16 ff ff ff 4c 89 e7 e8 3e bc 05 00 31 [76612.963774] RSP: 0018:ffffc9002e407df8 EFLAGS: 00010246 [76612.969079] RAX: 0000000000000000 RBX: ffffc9002e461000 RCX: 0000000000000000 [76612.976323] RDX: 0000000000000001 RSI: ffffc8fc2e461108 RDI: 0000000000000000 [76612.983569] RBP: ffffffff80000001 R08: 0000000000000000 R09: 0000000000000007 [76612.990812] R10: ffffc9002e407e18 R11: ffff888108a38858 R12: ffffc9002e4610f8 [76612.998060] R13: ffff888108a38858 R14: 00007ffd1ae0ac78 R15: ffffc9002e4610c0 [76613.005303] FS: 00007f80b6f59740(0000) GS:ffff8897e0ec0000(0000) knlGS:0000000000000000 [76613.013517] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [76613.019349] CR2: ffffc8fc2e461108 CR3: 000000011e3ef001 CR4: 00000000007726f0 [76613.026595] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [76613.033841] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [76613.041086] PKRU: 55555554 [76613.043842] Call Trace: [76613.046331] [76613.048468] ? __die+0x20/0x60 [76613.051581] ? page_fault_oops+0x15a/0x450 [76613.055747] ? search_extable+0x22/0x30 [76613.059649] ? search_bpf_extables+0x5f/0x80 [76613.063988] ? exc_page_fault+0xa9/0x140 [76613.067975] ? asm_exc_page_fault+0x22/0x30 [76613.072229] ? xsk_map_delete_elem+0x2d/0x60 [76613.076573] ? xsk_map_delete_elem+0x23/0x60 [76613.080914] __sys_bpf+0x19b7/0x23c0 [76613.084555] __x64_sys_bpf+0x1a/0x20 [76613.088194] do_syscall_64+0x37/0xb0 [76613.091832] entry_SYSCALL_64_after_hwframe+0x4b/0x53 [76613.096962] RIP: 0033:0x7f80b6d1e88d [76613.100592] Code: 5b 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 73 b5 0f 00 f7 d8 64 89 01 48 [76613.119631] RSP: 002b:00007ffd1ae0ac68 EFLAGS: 00000206 ORIG_RAX: 0000000000000141 [76613.131330] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f80b6d1e88d [76613.142632] RDX: 0000000000000098 RSI: 00007ffd1ae0ad20 RDI: 0000000000000003 [76613.153967] RBP: 00007ffd1ae0adc0 R08: 0000000000000000 R09: 0000000000000000 [76613.166030] R10: 00007f80b6f77040 R11: 0000000000000206 R12: 00007ffd1ae0aed8 [76613.177130] R13: 000055ddf42ce1e9 R14: 000055ddf42d0d98 R15: 00007f80b6fab040 [76613.188129] Fix this by simply changing key type from int to u32. Fixes: fbfc504a24f5 ("bpf: introduce new bpf AF_XDP map type BPF_MAP_TYPE_XSKMAP") CC: stable@vger.kernel.org Reported-by: Jordy Zomer Suggested-by: Jordy Zomer Reviewed-by: Toke Høiland-Jørgensen Acked-by: John Fastabend Signed-off-by: Maciej Fijalkowski Link: https://lore.kernel.org/r/20241122121030.716788-2-maciej.fijalkowski@intel.com Signed-off-by: Alexei Starovoitov --- net/xdp/xskmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index e1c526f97ce3..afa457506274 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -224,7 +224,7 @@ static long xsk_map_delete_elem(struct bpf_map *map, void *key) struct xsk_map *m = container_of(map, struct xsk_map, map); struct xdp_sock __rcu **map_entry; struct xdp_sock *old_xs; - int k = *(u32 *)key; + u32 k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; -- cgit v1.2.3-70-g09d2 From ab244dd7cf4c291f82faacdc50b45cc0f55b674d Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Fri, 22 Nov 2024 13:10:30 +0100 Subject: bpf: fix OOB devmap writes when deleting elements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Jordy reported issue against XSKMAP which also applies to DEVMAP - the index used for accessing map entry, due to being a signed integer, causes the OOB writes. Fix is simple as changing the type from int to u32, however, when compared to XSKMAP case, one more thing needs to be addressed. When map is released from system via dev_map_free(), we iterate through all of the entries and an iterator variable is also an int, which implies OOB accesses. Again, change it to be u32. Example splat below: [ 160.724676] BUG: unable to handle page fault for address: ffffc8fc2c001000 [ 160.731662] #PF: supervisor read access in kernel mode [ 160.736876] #PF: error_code(0x0000) - not-present page [ 160.742095] PGD 0 P4D 0 [ 160.744678] Oops: Oops: 0000 [#1] PREEMPT SMP [ 160.749106] CPU: 1 UID: 0 PID: 520 Comm: kworker/u145:12 Not tainted 6.12.0-rc1+ #487 [ 160.757050] Hardware name: Intel Corporation S2600WFT/S2600WFT, BIOS SE5C620.86B.02.01.0008.031920191559 03/19/2019 [ 160.767642] Workqueue: events_unbound bpf_map_free_deferred [ 160.773308] RIP: 0010:dev_map_free+0x77/0x170 [ 160.777735] Code: 00 e8 fd 91 ed ff e8 b8 73 ed ff 41 83 7d 18 19 74 6e 41 8b 45 24 49 8b bd f8 00 00 00 31 db 85 c0 74 48 48 63 c3 48 8d 04 c7 <48> 8b 28 48 85 ed 74 30 48 8b 7d 18 48 85 ff 74 05 e8 b3 52 fa ff [ 160.796777] RSP: 0018:ffffc9000ee1fe38 EFLAGS: 00010202 [ 160.802086] RAX: ffffc8fc2c001000 RBX: 0000000080000000 RCX: 0000000000000024 [ 160.809331] RDX: 0000000000000000 RSI: 0000000000000024 RDI: ffffc9002c001000 [ 160.816576] RBP: 0000000000000000 R08: 0000000000000023 R09: 0000000000000001 [ 160.823823] R10: 0000000000000001 R11: 00000000000ee6b2 R12: dead000000000122 [ 160.831066] R13: ffff88810c928e00 R14: ffff8881002df405 R15: 0000000000000000 [ 160.838310] FS: 0000000000000000(0000) GS:ffff8897e0c40000(0000) knlGS:0000000000000000 [ 160.846528] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 160.852357] CR2: ffffc8fc2c001000 CR3: 0000000005c32006 CR4: 00000000007726f0 [ 160.859604] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 160.866847] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 160.874092] PKRU: 55555554 [ 160.876847] Call Trace: [ 160.879338] [ 160.881477] ? __die+0x20/0x60 [ 160.884586] ? page_fault_oops+0x15a/0x450 [ 160.888746] ? search_extable+0x22/0x30 [ 160.892647] ? search_bpf_extables+0x5f/0x80 [ 160.896988] ? exc_page_fault+0xa9/0x140 [ 160.900973] ? asm_exc_page_fault+0x22/0x30 [ 160.905232] ? dev_map_free+0x77/0x170 [ 160.909043] ? dev_map_free+0x58/0x170 [ 160.912857] bpf_map_free_deferred+0x51/0x90 [ 160.917196] process_one_work+0x142/0x370 [ 160.921272] worker_thread+0x29e/0x3b0 [ 160.925082] ? rescuer_thread+0x4b0/0x4b0 [ 160.929157] kthread+0xd4/0x110 [ 160.932355] ? kthread_park+0x80/0x80 [ 160.936079] ret_from_fork+0x2d/0x50 [ 160.943396] ? kthread_park+0x80/0x80 [ 160.950803] ret_from_fork_asm+0x11/0x20 [ 160.958482] Fixes: 546ac1ffb70d ("bpf: add devmap, a map for storing net device references") CC: stable@vger.kernel.org Reported-by: Jordy Zomer Suggested-by: Jordy Zomer Reviewed-by: Toke Høiland-Jørgensen Acked-by: John Fastabend Signed-off-by: Maciej Fijalkowski Link: https://lore.kernel.org/r/20241122121030.716788-3-maciej.fijalkowski@intel.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/devmap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 7878be18e9d2..3aa002a47a96 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -184,7 +184,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) static void dev_map_free(struct bpf_map *map) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - int i; + u32 i; /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, * so the programs (can be more than one that used this map) were @@ -821,7 +821,7 @@ static long dev_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab_netdev *old_dev; - int k = *(u32 *)key; + u32 k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; @@ -838,7 +838,7 @@ static long dev_map_hash_delete_elem(struct bpf_map *map, void *key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab_netdev *old_dev; - int k = *(u32 *)key; + u32 k = *(u32 *)key; unsigned long flags; int ret = -ENOENT; -- cgit v1.2.3-70-g09d2 From ac9a48a6f1610b094072b815e884e1668aea4401 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Fri, 22 Nov 2024 12:29:09 +0100 Subject: xsk: always clear DMA mapping information when unmapping the pool When the umem is shared, the DMA mapping is also shared between the xsk pools, therefore it should stay valid as long as at least 1 user remains. However, the pool also keeps the copies of DMA-related information that are initialized in the same way in xp_init_dma_info(), but cleared by xp_dma_unmap() only for the last remaining pool, this causes the problems below. The first one is that the commit adbf5a42341f ("ice: remove af_xdp_zc_qps bitmap") relies on pool->dev to determine the presence of a ZC pool on a given queue, avoiding internal bookkeeping. This works perfectly fine if the UMEM is not shared, but reliably fails otherwise as stated in the linked report. The second one is pool->dma_pages which is dynamically allocated and only freed in xp_dma_unmap(), this leads to a small memory leak. kmemleak does not catch it, but by printing the allocation results after terminating the userspace program it is possible to see that all addresses except the one belonging to the last detached pool are still accessible through the kmemleak dump functionality. Always clear the DMA mapping information from the pool and free pool->dma_pages when unmapping the pool, so that the only difference between results of the last remaining user's call and the ones before would be the destruction of the DMA mapping. Fixes: adbf5a42341f ("ice: remove af_xdp_zc_qps bitmap") Fixes: 921b68692abb ("xsk: Enable sharing of dma mappings") Reported-by: Alasdair McWilliam Closes: https://lore.kernel.org/PA4P194MB10056F208AF221D043F57A3D86512@PA4P194MB1005.EURP194.PROD.OUTLOOK.COM Acked-by: Maciej Fijalkowski Signed-off-by: Larysa Zaremba Link: https://lore.kernel.org/r/20241122112912.89881-1-larysa.zaremba@intel.com Signed-off-by: Alexei Starovoitov --- net/xdp/xsk_buff_pool.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index ae71da7d2cd6..1f7975b49657 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -387,10 +387,9 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) return; } - if (!refcount_dec_and_test(&dma_map->users)) - return; + if (refcount_dec_and_test(&dma_map->users)) + __xp_dma_unmap(dma_map, attrs); - __xp_dma_unmap(dma_map, attrs); kvfree(pool->dma_pages); pool->dma_pages = NULL; pool->dma_pages_cnt = 0; -- cgit v1.2.3-70-g09d2 From ef3ba8c258ee368a5343fa9329df85b4bcb9e8b5 Mon Sep 17 00:00:00 2001 From: Amir Mohammadi Date: Thu, 21 Nov 2024 12:04:13 +0330 Subject: bpftool: fix potential NULL pointer dereferencing in prog_dump() A NULL pointer dereference could occur if ksyms is not properly checked before usage in the prog_dump() function. Fixes: b053b439b72a ("bpf: libbpf: bpftool: Print bpf_line_info during prog dump") Signed-off-by: Amir Mohammadi Reviewed-by: Quentin Monnet Acked-by: John Fastabend Link: https://lore.kernel.org/r/20241121083413.7214-1-amiremohamadi@yahoo.com Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/prog.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2ff949ea82fa..e71be67f1d86 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -822,11 +822,18 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, printf("%s:\n", sym_name); } - if (disasm_print_insn(img, lens[i], opcodes, - name, disasm_opt, btf, - prog_linfo, ksyms[i], i, - linum)) - goto exit_free; + if (ksyms) { + if (disasm_print_insn(img, lens[i], opcodes, + name, disasm_opt, btf, + prog_linfo, ksyms[i], i, + linum)) + goto exit_free; + } else { + if (disasm_print_insn(img, lens[i], opcodes, + name, disasm_opt, btf, + NULL, 0, 0, false)) + goto exit_free; + } img += lens[i]; -- cgit v1.2.3-70-g09d2 From 6b64128a74ebcacc5a0de5a74834e3b9f47a354c Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 19 Nov 2024 17:18:19 +0100 Subject: selftests/bpf: Check for PREEMPTION instead of PREEMPT CONFIG_PREEMPT is a preemtion model the so called "Low-Latency Desktop". A different preemption model is PREEMPT_RT the so called "Real-Time". Both implement preemption in kernel and set CONFIG_PREEMPTION. There is also the so called "LAZY PREEMPT" which the "Scheduler controlled preemption model". Here we have also preemption in the kernel the rules are slightly different. Therefore the testsuite should not check for CONFIG_PREEMPT (as one model) but for CONFIG_PREEMPTION to figure out if preemption in the kernel is possible. Signed-off-by: Sebastian Andrzej Siewior Link: https://lore.kernel.org/r/20241119161819.qvEcs-n_@linutronix.de Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/map_tests/task_storage_map.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/task_local_storage.c | 2 +- tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c | 4 ++-- tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/map_tests/task_storage_map.c b/tools/testing/selftests/bpf/map_tests/task_storage_map.c index 62971dbf2996..a4121d2248ac 100644 --- a/tools/testing/selftests/bpf/map_tests/task_storage_map.c +++ b/tools/testing/selftests/bpf/map_tests/task_storage_map.c @@ -78,8 +78,8 @@ void test_task_storage_map_stress_lookup(void) CHECK(err, "open_and_load", "error %d\n", err); /* Only for a fully preemptible kernel */ - if (!skel->kconfig->CONFIG_PREEMPT) { - printf("%s SKIP (no CONFIG_PREEMPT)\n", __func__); + if (!skel->kconfig->CONFIG_PREEMPTION) { + printf("%s SKIP (no CONFIG_PREEMPTION)\n", __func__); read_bpf_task_storage_busy__destroy(skel); skips++; return; diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index 60f474d965a9..42e822ea352f 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -197,7 +197,7 @@ static void test_nodeadlock(void) /* Unnecessary recursion and deadlock detection are reproducible * in the preemptible kernel. */ - if (!skel->kconfig->CONFIG_PREEMPT) { + if (!skel->kconfig->CONFIG_PREEMPTION) { test__skip(); goto done; } diff --git a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c index 76556e0b42b2..69da05bb6c63 100644 --- a/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c +++ b/tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c @@ -4,7 +4,7 @@ #include #include -extern bool CONFIG_PREEMPT __kconfig __weak; +extern bool CONFIG_PREEMPTION __kconfig __weak; extern const int bpf_task_storage_busy __ksym; char _license[] SEC("license") = "GPL"; @@ -24,7 +24,7 @@ int BPF_PROG(read_bpf_task_storage_busy) { int *value; - if (!CONFIG_PREEMPT) + if (!CONFIG_PREEMPTION) return 0; if (bpf_get_current_pid_tgid() >> 32 != pid) diff --git a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c index ea2dbb80f7b3..986829aaf73a 100644 --- a/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c +++ b/tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c @@ -10,7 +10,7 @@ char _license[] SEC("license") = "GPL"; #define EBUSY 16 #endif -extern bool CONFIG_PREEMPT __kconfig __weak; +extern bool CONFIG_PREEMPTION __kconfig __weak; int nr_get_errs = 0; int nr_del_errs = 0; @@ -29,7 +29,7 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, int ret, zero = 0; int *value; - if (!CONFIG_PREEMPT) + if (!CONFIG_PREEMPTION) return 0; task = bpf_get_current_task_btf(); -- cgit v1.2.3-70-g09d2 From ca70b8baf2bd125b2a4d96e76db79375c07d7ff2 Mon Sep 17 00:00:00 2001 From: Zijian Zhang Date: Wed, 16 Oct 2024 23:48:38 +0000 Subject: tcp_bpf: Fix the sk_mem_uncharge logic in tcp_bpf_sendmsg The current sk memory accounting logic in __SK_REDIRECT is pre-uncharging tosend bytes, which is either msg->sg.size or a smaller value apply_bytes. Potential problems with this strategy are as follows: - If the actual sent bytes are smaller than tosend, we need to charge some bytes back, as in line 487, which is okay but seems not clean. - When tosend is set to apply_bytes, as in line 417, and (ret < 0), we may miss uncharging (msg->sg.size - apply_bytes) bytes. [...] 415 tosend = msg->sg.size; 416 if (psock->apply_bytes && psock->apply_bytes < tosend) 417 tosend = psock->apply_bytes; [...] 443 sk_msg_return(sk, msg, tosend); 444 release_sock(sk); 446 origsize = msg->sg.size; 447 ret = tcp_bpf_sendmsg_redir(sk_redir, redir_ingress, 448 msg, tosend, flags); 449 sent = origsize - msg->sg.size; [...] 454 lock_sock(sk); 455 if (unlikely(ret < 0)) { 456 int free = sk_msg_free_nocharge(sk, msg); 458 if (!cork) 459 *copied -= free; 460 } [...] 487 if (eval == __SK_REDIRECT) 488 sk_mem_charge(sk, tosend - sent); [...] When running the selftest test_txmsg_redir_wait_sndmem with txmsg_apply, the following warning will be reported: ------------[ cut here ]------------ WARNING: CPU: 6 PID: 57 at net/ipv4/af_inet.c:156 inet_sock_destruct+0x190/0x1a0 Modules linked in: CPU: 6 UID: 0 PID: 57 Comm: kworker/6:0 Not tainted 6.12.0-rc1.bm.1-amd64+ #43 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 Workqueue: events sk_psock_destroy RIP: 0010:inet_sock_destruct+0x190/0x1a0 RSP: 0018:ffffad0a8021fe08 EFLAGS: 00010206 RAX: 0000000000000011 RBX: ffff9aab4475b900 RCX: ffff9aab481a0800 RDX: 0000000000000303 RSI: 0000000000000011 RDI: ffff9aab4475b900 RBP: ffff9aab4475b990 R08: 0000000000000000 R09: ffff9aab40050ec0 R10: 0000000000000000 R11: ffff9aae6fdb1d01 R12: ffff9aab49c60400 R13: ffff9aab49c60598 R14: ffff9aab49c60598 R15: dead000000000100 FS: 0000000000000000(0000) GS:ffff9aae6fd80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffec7e47bd8 CR3: 00000001a1a1c004 CR4: 0000000000770ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? __warn+0x89/0x130 ? inet_sock_destruct+0x190/0x1a0 ? report_bug+0xfc/0x1e0 ? handle_bug+0x5c/0xa0 ? exc_invalid_op+0x17/0x70 ? asm_exc_invalid_op+0x1a/0x20 ? inet_sock_destruct+0x190/0x1a0 __sk_destruct+0x25/0x220 sk_psock_destroy+0x2b2/0x310 process_scheduled_works+0xa3/0x3e0 worker_thread+0x117/0x240 ? __pfx_worker_thread+0x10/0x10 kthread+0xcf/0x100 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x31/0x40 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1a/0x30 ---[ end trace 0000000000000000 ]--- In __SK_REDIRECT, a more concise way is delaying the uncharging after sent bytes are finalized, and uncharge this value. When (ret < 0), we shall invoke sk_msg_free. Same thing happens in case __SK_DROP, when tosend is set to apply_bytes, we may miss uncharging (msg->sg.size - apply_bytes) bytes. The same warning will be reported in selftest. [...] 468 case __SK_DROP: 469 default: 470 sk_msg_free_partial(sk, msg, tosend); 471 sk_msg_apply_bytes(psock, tosend); 472 *copied -= (tosend + delta); 473 return -EACCES; [...] So instead of sk_msg_free_partial we can do sk_msg_free here. Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Fixes: 8ec95b94716a ("bpf, sockmap: Fix the sk->sk_forward_alloc warning of sk_stream_kill_queues") Signed-off-by: Zijian Zhang Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20241016234838.3167769-3-zijianzhang@bytedance.com --- net/ipv4/tcp_bpf.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 370993c03d31..99cef92e6290 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -441,7 +441,6 @@ more_data: cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, tosend); release_sock(sk); origsize = msg->sg.size; @@ -453,8 +452,9 @@ more_data: sock_put(sk_redir); lock_sock(sk); + sk_mem_uncharge(sk, sent); if (unlikely(ret < 0)) { - int free = sk_msg_free_nocharge(sk, msg); + int free = sk_msg_free(sk, msg); if (!cork) *copied -= free; @@ -468,7 +468,7 @@ more_data: break; case __SK_DROP: default: - sk_msg_free_partial(sk, msg, tosend); + sk_msg_free(sk, msg); sk_msg_apply_bytes(psock, tosend); *copied -= (tosend + delta); return -EACCES; @@ -484,11 +484,8 @@ more_data: } if (msg && msg->sg.data[msg->sg.start].page_link && - msg->sg.data[msg->sg.start].length) { - if (eval == __SK_REDIRECT) - sk_mem_charge(sk, tosend - sent); + msg->sg.data[msg->sg.start].length) goto more_data; - } } return ret; } -- cgit v1.2.3-70-g09d2 From 3448ad23b34e43a2526bd0f9e1221e8de876adec Mon Sep 17 00:00:00 2001 From: Zijian Zhang Date: Wed, 16 Oct 2024 23:48:37 +0000 Subject: selftests/bpf: Add apply_bytes test to test_txmsg_redir_wait_sndmem in test_sockmap Add this to more comprehensively test the socket memory accounting logic in the __SK_REDIRECT and __SK_DROP cases of tcp_bpf_sendmsg. We don't have test when apply_bytes are not zero in test_txmsg_redir_wait_sndmem. test_send_large has opt->rate=2, it will invoke sendmsg two times. Specifically, the first sendmsg will trigger the case where the ret value of tcp_bpf_sendmsg_redir is less than 0; while the second sendmsg happens after the 3 seconds timeout, and it will trigger __SK_DROP because socket c2 has been removed from the sockmap/hash. Signed-off-by: Zijian Zhang Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20241016234838.3167769-2-zijianzhang@bytedance.com --- tools/testing/selftests/bpf/test_sockmap.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index e5c7ecbe57e3..fd2da2234cc9 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1579,8 +1579,12 @@ static void test_txmsg_redir(int cgrp, struct sockmap_options *opt) static void test_txmsg_redir_wait_sndmem(int cgrp, struct sockmap_options *opt) { - txmsg_redir = 1; opt->tx_wait_mem = true; + txmsg_redir = 1; + test_send_large(opt, cgrp); + + txmsg_redir = 1; + txmsg_apply = 4097; test_send_large(opt, cgrp); opt->tx_wait_mem = false; } -- cgit v1.2.3-70-g09d2 From 537a2525eaf76ea9b0dca62b994500d8670b39d5 Mon Sep 17 00:00:00 2001 From: Björn Töpel Date: Wed, 27 Nov 2024 11:17:46 +0100 Subject: tools: Override makefile ARCH variable if defined, but empty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are a number of tools (bpftool, selftests), that require a "bootstrap" build. Here, a bootstrap build is a build host variant of a target. E.g., assume that you're performing a bpftool cross-build on x86 to riscv, a bootstrap build would then be an x86 variant of bpftool. The typical way to perform the host build variant, is to pass "ARCH=" in a sub-make. However, if a variable has been set with a command argument, then ordinary assignments in the makefile are ignored. This side-effect results in that ARCH, and variables depending on ARCH are not set. Workaround by overriding ARCH to the host arch, if ARCH is empty. Fixes: 8859b0da5aac ("tools/bpftool: Fix cross-build") Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann Tested-by: Alexandre Ghiti Reviewed-by: Jean-Philippe Brucker Reviewed-by: Namhyung Kim Reviewed-by: Toke Høiland-Jørgensen Acked-by: Quentin Monnet Acked-by: Jiri Olsa Cc: Arnaldo Carvalho de Melo Link: https://lore.kernel.org/bpf/20241127101748.165693-1-bjorn@kernel.org --- tools/scripts/Makefile.arch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch index f6a50f06dfc4..eabfe9f411d9 100644 --- a/tools/scripts/Makefile.arch +++ b/tools/scripts/Makefile.arch @@ -7,8 +7,8 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ -e s/riscv.*/riscv/ -e s/loongarch.*/loongarch/) -ifndef ARCH -ARCH := $(HOSTARCH) +ifeq ($(strip $(ARCH)),) +override ARCH := $(HOSTARCH) endif SRCARCH := $(ARCH) -- cgit v1.2.3-70-g09d2 From 12659d28615d606b36e382f4de2dd05550d202af Mon Sep 17 00:00:00 2001 From: Tao Lyu Date: Mon, 2 Dec 2024 16:02:37 -0800 Subject: bpf: Ensure reg is PTR_TO_STACK in process_iter_arg Currently, KF_ARG_PTR_TO_ITER handling missed checking the reg->type and ensuring it is PTR_TO_STACK. Instead of enforcing this in the caller of process_iter_arg, move the check into it instead so that all callers will gain the check by default. This is similar to process_dynptr_func. An existing selftest in verifier_bits_iter.c fails due to this change, but it's because it was passing a NULL pointer into iter_next helper and getting an error further down the checks, but probably meant to pass an uninitialized iterator on the stack (as is done in the subsequent test below it). We will gain coverage for non-PTR_TO_STACK arguments in later patches hence just change the declaration to zero-ed stack object. Fixes: 06accc8779c1 ("bpf: add support for open-coded iterator loops") Suggested-by: Andrii Nakryiko Signed-off-by: Tao Lyu [ Kartikeya: move check into process_iter_arg, rewrite commit log ] Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241203000238.3602922-2-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 5 +++++ tools/testing/selftests/bpf/progs/verifier_bits_iter.c | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1c4ebb326785..358a3566bb60 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8189,6 +8189,11 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id const struct btf_type *t; int spi, err, i, nr_slots, btf_id; + if (reg->type != PTR_TO_STACK) { + verbose(env, "arg#%d expected pointer to an iterator on stack\n", regno - 1); + return -EINVAL; + } + /* For iter_{new,next,destroy} functions, btf_check_iter_kfuncs() * ensures struct convention, so we wouldn't need to do any BTF * validation here. But given iter state can be passed as a parameter diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index 7c881bca9af5..a7a6ae6c162f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -35,9 +35,9 @@ __description("uninitialized iter in ->next()") __failure __msg("expected an initialized iter_bits as arg #1") int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) { - struct bpf_iter_bits *it = NULL; + struct bpf_iter_bits it = {}; - bpf_iter_bits_next(it); + bpf_iter_bits_next(&it); return 0; } -- cgit v1.2.3-70-g09d2 From 7f71197001e35f46aca97204986edf9301f8dd09 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Mon, 2 Dec 2024 16:02:38 -0800 Subject: selftests/bpf: Add tests for iter arg check Add selftests to cover argument type check for iterator kfuncs, and cover all three kinds (new, next, destroy). Without the fix in the previous patch, the selftest would not cause a verifier error. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241203000238.3602922-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/iters.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index ef70b88bccb2..7c969c127573 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -1486,4 +1486,30 @@ int iter_subprog_check_stacksafe(const void *ctx) return 0; } +struct bpf_iter_num global_it; + +SEC("raw_tp") +__failure __msg("arg#0 expected pointer to an iterator on stack") +int iter_new_bad_arg(const void *ctx) +{ + bpf_iter_num_new(&global_it, 0, 1); + return 0; +} + +SEC("raw_tp") +__failure __msg("arg#0 expected pointer to an iterator on stack") +int iter_next_bad_arg(const void *ctx) +{ + bpf_iter_num_next(&global_it); + return 0; +} + +SEC("raw_tp") +__failure __msg("arg#0 expected pointer to an iterator on stack") +int iter_destroy_bad_arg(const void *ctx) +{ + bpf_iter_num_destroy(&global_it); + return 0; +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From bd74e238ae6944b462f57ce8752440a011ba4530 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Mon, 2 Dec 2024 16:22:35 -0800 Subject: bpf: Zero index arg error string for dynptr and iter Andrii spotted that process_dynptr_func's rejection of incorrect argument register type will print an error string where argument numbers are not zero-indexed, unlike elsewhere in the verifier. Fix this by subtracting 1 from regno. The same scenario exists for iterator messages. Fix selftest error strings that match on the exact argument number while we're at it to ensure clean bisection. Suggested-by: Andrii Nakryiko Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241203002235.3776418-1-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 12 ++++++------ tools/testing/selftests/bpf/progs/dynptr_fail.c | 22 +++++++++++----------- .../selftests/bpf/progs/iters_state_safety.c | 14 +++++++------- .../selftests/bpf/progs/iters_testmod_seq.c | 4 ++-- .../selftests/bpf/progs/test_kfunc_dynptr_param.c | 2 +- .../selftests/bpf/progs/verifier_bits_iter.c | 4 ++-- 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 358a3566bb60..2fd35465d650 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -8071,7 +8071,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { verbose(env, "arg#%d expected pointer to stack or const struct bpf_dynptr\n", - regno); + regno - 1); return -EINVAL; } @@ -8125,7 +8125,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (!is_dynptr_reg_valid_init(env, reg)) { verbose(env, "Expected an initialized dynptr as arg #%d\n", - regno); + regno - 1); return -EINVAL; } @@ -8133,7 +8133,7 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn if (!is_dynptr_type_expected(env, reg, arg_type & ~MEM_RDONLY)) { verbose(env, "Expected a dynptr of type %s as arg #%d\n", - dynptr_type_str(arg_to_dynptr_type(arg_type)), regno); + dynptr_type_str(arg_to_dynptr_type(arg_type)), regno - 1); return -EINVAL; } @@ -8202,7 +8202,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id */ btf_id = btf_check_iter_arg(meta->btf, meta->func_proto, regno - 1); if (btf_id < 0) { - verbose(env, "expected valid iter pointer as arg #%d\n", regno); + verbose(env, "expected valid iter pointer as arg #%d\n", regno - 1); return -EINVAL; } t = btf_type_by_id(meta->btf, btf_id); @@ -8212,7 +8212,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id /* bpf_iter__new() expects pointer to uninit iter state */ if (!is_iter_reg_valid_uninit(env, reg, nr_slots)) { verbose(env, "expected uninitialized iter_%s as arg #%d\n", - iter_type_str(meta->btf, btf_id), regno); + iter_type_str(meta->btf, btf_id), regno - 1); return -EINVAL; } @@ -8236,7 +8236,7 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id break; case -EINVAL: verbose(env, "expected an initialized iter_%s as arg #%d\n", - iter_type_str(meta->btf, btf_id), regno); + iter_type_str(meta->btf, btf_id), regno - 1); return err; case -EPROTO: verbose(env, "expected an RCU CS when using %s\n", meta->func_name); diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 8f36c9de7591..dfd817d0348c 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -149,7 +149,7 @@ int ringbuf_release_uninit_dynptr(void *ctx) /* A dynptr can't be used after it has been invalidated */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int use_after_invalid(void *ctx) { struct bpf_dynptr ptr; @@ -428,7 +428,7 @@ int invalid_helper2(void *ctx) /* A bpf_dynptr is invalidated if it's been written into */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int invalid_write1(void *ctx) { struct bpf_dynptr ptr; @@ -1407,7 +1407,7 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) /* bpf_dynptr_adjust can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_adjust_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1420,7 +1420,7 @@ int dynptr_adjust_invalid(void *ctx) /* bpf_dynptr_is_null can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_is_null_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1433,7 +1433,7 @@ int dynptr_is_null_invalid(void *ctx) /* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_is_rdonly_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1446,7 +1446,7 @@ int dynptr_is_rdonly_invalid(void *ctx) /* bpf_dynptr_size can only be called on initialized dynptrs */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int dynptr_size_invalid(void *ctx) { struct bpf_dynptr ptr = {}; @@ -1459,7 +1459,7 @@ int dynptr_size_invalid(void *ctx) /* Only initialized dynptrs can be cloned */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #1") +__failure __msg("Expected an initialized dynptr as arg #0") int clone_invalid1(void *ctx) { struct bpf_dynptr ptr1 = {}; @@ -1493,7 +1493,7 @@ int clone_invalid2(struct xdp_md *xdp) /* Invalidating a dynptr should invalidate its clones */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate1(void *ctx) { struct bpf_dynptr clone; @@ -1514,7 +1514,7 @@ int clone_invalidate1(void *ctx) /* Invalidating a dynptr should invalidate its parent */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate2(void *ctx) { struct bpf_dynptr ptr; @@ -1535,7 +1535,7 @@ int clone_invalidate2(void *ctx) /* Invalidating a dynptr should invalidate its siblings */ SEC("?raw_tp") -__failure __msg("Expected an initialized dynptr as arg #3") +__failure __msg("Expected an initialized dynptr as arg #2") int clone_invalidate3(void *ctx) { struct bpf_dynptr ptr; @@ -1723,7 +1723,7 @@ __noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr) } SEC("?raw_tp") -__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr") int test_dynptr_reg_type(void *ctx) { struct task_struct *current = NULL; diff --git a/tools/testing/selftests/bpf/progs/iters_state_safety.c b/tools/testing/selftests/bpf/progs/iters_state_safety.c index d47e59aba6de..f41257eadbb2 100644 --- a/tools/testing/selftests/bpf/progs/iters_state_safety.c +++ b/tools/testing/selftests/bpf/progs/iters_state_safety.c @@ -73,7 +73,7 @@ int create_and_forget_to_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int destroy_without_creating_fail(void *ctx) { /* init with zeros to stop verifier complaining about uninit stack */ @@ -91,7 +91,7 @@ int destroy_without_creating_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int compromise_iter_w_direct_write_fail(void *ctx) { struct bpf_iter_num iter; @@ -143,7 +143,7 @@ int compromise_iter_w_direct_write_and_skip_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int compromise_iter_w_helper_write_fail(void *ctx) { struct bpf_iter_num iter; @@ -230,7 +230,7 @@ int valid_stack_reuse(void *ctx) } SEC("?raw_tp") -__failure __msg("expected uninitialized iter_num as arg #1") +__failure __msg("expected uninitialized iter_num as arg #0") int double_create_fail(void *ctx) { struct bpf_iter_num iter; @@ -258,7 +258,7 @@ int double_create_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int double_destroy_fail(void *ctx) { struct bpf_iter_num iter; @@ -284,7 +284,7 @@ int double_destroy_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int next_without_new_fail(void *ctx) { struct bpf_iter_num iter; @@ -305,7 +305,7 @@ int next_without_new_fail(void *ctx) } SEC("?raw_tp") -__failure __msg("expected an initialized iter_num as arg #1") +__failure __msg("expected an initialized iter_num as arg #0") int next_after_destroy_fail(void *ctx) { struct bpf_iter_num iter; diff --git a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c index 4a176e6aede8..6543d5b6e0a9 100644 --- a/tools/testing/selftests/bpf/progs/iters_testmod_seq.c +++ b/tools/testing/selftests/bpf/progs/iters_testmod_seq.c @@ -79,7 +79,7 @@ int testmod_seq_truncated(const void *ctx) SEC("?raw_tp") __failure -__msg("expected an initialized iter_testmod_seq as arg #2") +__msg("expected an initialized iter_testmod_seq as arg #1") int testmod_seq_getter_before_bad(const void *ctx) { struct bpf_iter_testmod_seq it; @@ -89,7 +89,7 @@ int testmod_seq_getter_before_bad(const void *ctx) SEC("?raw_tp") __failure -__msg("expected an initialized iter_testmod_seq as arg #2") +__msg("expected an initialized iter_testmod_seq as arg #1") int testmod_seq_getter_after_bad(const void *ctx) { struct bpf_iter_testmod_seq it; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index e68667aec6a6..cd4d752bd089 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) } SEC("?lsm.s/bpf") -__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +__failure __msg("arg#0 expected pointer to stack or const struct bpf_dynptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val = 0; diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c index a7a6ae6c162f..8bcddadfc4da 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -32,7 +32,7 @@ int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp) SEC("iter/cgroup") __description("uninitialized iter in ->next()") -__failure __msg("expected an initialized iter_bits as arg #1") +__failure __msg("expected an initialized iter_bits as arg #0") int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) { struct bpf_iter_bits it = {}; @@ -43,7 +43,7 @@ int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) SEC("iter/cgroup") __description("uninitialized iter in ->destroy()") -__failure __msg("expected an initialized iter_bits as arg #1") +__failure __msg("expected an initialized iter_bits as arg #0") int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) { struct bpf_iter_bits it = {}; -- cgit v1.2.3-70-g09d2 From 5a6ea7022ff4d2a65ae328619c586d6a8909b48b Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 3 Dec 2024 10:22:22 -0800 Subject: samples/bpf: Remove unnecessary -I flags from libbpf EXTRA_CFLAGS Commit [0] breaks samples/bpf build: $ make M=samples/bpf ... make -C /path/to/kernel/samples/bpf/../../tools/lib/bpf \ ... EXTRA_CFLAGS=" \ ... -fsanitize=bounds \ -I/path/to/kernel/usr/include \ ... /path/to/kernel/samples/bpf/libbpf/libbpf.a install_headers CC /path/to/kernel/samples/bpf/libbpf/staticobjs/libbpf.o In file included from libbpf.c:29: /path/to/kernel/tools/include/linux/err.h:35:8: error: 'inline' can only appear on functions 35 | static inline void * __must_check ERR_PTR(long error_) | ^ The error is caused by `objtree` variable changing definition from `.` (dot) to an absolute path: - The variable TPROGS_CFLAGS is constructed as follows: ... TPROGS_CFLAGS += -I$(objtree)/usr/include - It is passed as EXTRA_CFLAGS for libbpf compilation: $(LIBBPF): ... ... $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" - Before commit [0], the line passed to libbpf makefile was '-I./usr/include', where '.' referred to LIBBPF_SRC due to -C flag. The directory $(LIBBPF_SRC)/usr/include does not exist and thus was never resolved by C compiler. - After commit [0], the line passed to libbpf makefile became: '/usr/include', this directory exists and is resolved by C compiler. - Both 'tools/include' and 'usr/include' define files err.h and types.h. - libbpf expects headers like 'linux/err.h' and 'linux/types.h' defined in 'tools/include', not 'usr/include', hence the compilation error. This commit removes unnecessary -I flags from libbpf compilation. (libbpf sets up the necessary includes at lib/bpf/Makefile:63). Changes v1 [1] -> v2: - dropped unnecessary replacement of KBUILD_OUTPUT with $(objtree) (Andrii) Changes v2 [2] -> v3: - make sure --sysroot option is set for libbpf's EXTRA_CFLAGS, if $(SYSROOT) is set (Stanislav) [0] commit 13b25489b6f8 ("kbuild: change working directory to external module directory with M=") [1] https://lore.kernel.org/bpf/20241202212154.3174402-1-eddyz87@gmail.com/ [2] https://lore.kernel.org/bpf/20241202234741.3492084-1-eddyz87@gmail.com/ Fixes: 13b25489b6f8 ("kbuild: change working directory to external module directory with M=") Signed-off-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20241203182222.3915763-1-eddyz87@gmail.com --- samples/bpf/Makefile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index bcf103a4c14f..96a05e70ace3 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -146,13 +146,14 @@ ifeq ($(ARCH), x86) BPF_EXTRA_CFLAGS += -fcf-protection endif -TPROGS_CFLAGS += -Wall -O2 -TPROGS_CFLAGS += -Wmissing-prototypes -TPROGS_CFLAGS += -Wstrict-prototypes -TPROGS_CFLAGS += $(call try-run,\ +COMMON_CFLAGS += -Wall -O2 +COMMON_CFLAGS += -Wmissing-prototypes +COMMON_CFLAGS += -Wstrict-prototypes +COMMON_CFLAGS += $(call try-run,\ printf "int main() { return 0; }" |\ $(CC) -Werror -fsanitize=bounds -x c - -o "$$TMP",-fsanitize=bounds,) +TPROGS_CFLAGS += $(COMMON_CFLAGS) TPROGS_CFLAGS += -I$(objtree)/usr/include TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ TPROGS_CFLAGS += -I$(LIBBPF_INCLUDE) @@ -162,7 +163,7 @@ TPROGS_CFLAGS += -I$(srctree)/tools/lib TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0 ifdef SYSROOT -TPROGS_CFLAGS += --sysroot=$(SYSROOT) +COMMON_CFLAGS += --sysroot=$(SYSROOT) TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib endif @@ -229,7 +230,7 @@ clean: $(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT) # Fix up variables inherited from Kbuild that tools/ build system won't like - $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \ + $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(COMMON_CFLAGS)" \ LDFLAGS="$(TPROGS_LDFLAGS)" srctree=$(BPF_SAMPLES_PATH)/../../ \ O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \ $@ install_headers -- cgit v1.2.3-70-g09d2 From 69772f509e084ec6bca12dbcdeeeff41b0103774 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Tue, 3 Dec 2024 20:47:53 -0800 Subject: bpf: Don't mark STACK_INVALID as STACK_MISC in mark_stack_slot_misc Inside mark_stack_slot_misc, we should not upgrade STACK_INVALID to STACK_MISC when allow_ptr_leaks is false, since invalid contents shouldn't be read unless the program has the relevant capabilities. The relaxation only makes sense when env->allow_ptr_leaks is true. However, such conversion in privileged mode becomes unnecessary, as invalid slots can be read without being upgraded to STACK_MISC. Currently, the condition is inverted (i.e. checking for true instead of false), simply remove it to restore correct behavior. Fixes: eaf18febd6eb ("bpf: preserve STACK_ZERO slots on partial reg spills") Acked-by: Andrii Nakryiko Reported-by: Tao Lyu Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241204044757.1483141-2-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 2fd35465d650..f18aad339de8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1202,14 +1202,17 @@ static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack) /* Mark stack slot as STACK_MISC, unless it is already STACK_INVALID, in which * case they are equivalent, or it's STACK_ZERO, in which case we preserve * more precise STACK_ZERO. - * Note, in uprivileged mode leaving STACK_INVALID is wrong, so we take - * env->allow_ptr_leaks into account and force STACK_MISC, if necessary. + * Regardless of allow_ptr_leaks setting (i.e., privileged or unprivileged + * mode), we won't promote STACK_INVALID to STACK_MISC. In privileged case it is + * unnecessary as both are considered equivalent when loading data and pruning, + * in case of unprivileged mode it will be incorrect to allow reads of invalid + * slots. */ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype) { if (*stype == STACK_ZERO) return; - if (env->allow_ptr_leaks && *stype == STACK_INVALID) + if (*stype == STACK_INVALID) return; *stype = STACK_MISC; } -- cgit v1.2.3-70-g09d2 From b0e66977dc072906bb76555fb1a64261d7f63d0f Mon Sep 17 00:00:00 2001 From: Tao Lyu Date: Tue, 3 Dec 2024 20:47:54 -0800 Subject: bpf: Fix narrow scalar spill onto 64-bit spilled scalar slots When CAP_PERFMON and CAP_SYS_ADMIN (allow_ptr_leaks) are disabled, the verifier aims to reject partial overwrite on an 8-byte stack slot that contains a spilled pointer. However, in such a scenario, it rejects all partial stack overwrites as long as the targeted stack slot is a spilled register, because it does not check if the stack slot is a spilled pointer. Incomplete checks will result in the rejection of valid programs, which spill narrower scalar values onto scalar slots, as shown below. 0: R1=ctx() R10=fp0 ; asm volatile ( @ repro.bpf.c:679 0: (7a) *(u64 *)(r10 -8) = 1 ; R10=fp0 fp-8_w=1 1: (62) *(u32 *)(r10 -8) = 1 attempt to corrupt spilled pointer on stack processed 2 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0. Fix this by expanding the check to not consider spilled scalar registers when rejecting the write into the stack. Previous discussion on this patch is at link [0]. [0]: https://lore.kernel.org/bpf/20240403202409.2615469-1-tao.lyu@epfl.ch Fixes: ab125ed3ec1c ("bpf: fix check for attempt to corrupt spilled pointer") Acked-by: Eduard Zingerman Acked-by: Andrii Nakryiko Signed-off-by: Tao Lyu Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241204044757.1483141-3-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f18aad339de8..01fbef9576e0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4703,6 +4703,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, */ if (!env->allow_ptr_leaks && is_spilled_reg(&state->stack[spi]) && + !is_spilled_scalar_reg(&state->stack[spi]) && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; -- cgit v1.2.3-70-g09d2 From adfdd9c68566120debc622712888c4d084539081 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 3 Dec 2024 20:47:55 -0800 Subject: selftests/bpf: Introduce __caps_unpriv annotation for tests Add a __caps_unpriv annotation so that tests requiring specific capabilities while dropping the rest can conveniently specify them during selftest declaration instead of munging with capabilities at runtime from the testing binary. While at it, let us convert test_verifier_mtu to use this new support instead. Since we do not want to include linux/capability.h, we only defined the four main capabilities BPF subsystem deals with in bpf_misc.h for use in tests. If the user passes a CAP_SYS_NICE or anything else that's not defined in the header, capability parsing code will return a warning. Also reject strtol returning 0. CAP_CHOWN = 0 but we'll never need to use it, and strtol doesn't errno on failed conversion. Fail the test in such a case. The original diff for this idea is available at link [0]. [0]: https://lore.kernel.org/bpf/a1e48f5d9ae133e19adc6adf27e19d585e06bab4.camel@gmail.com Signed-off-by: Eduard Zingerman [ Kartikeya: rebase on bpf-next, add warn to parse_caps, convert test_verifier_mtu ] Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241204044757.1483141-4-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/verifier.c | 19 +--------- tools/testing/selftests/bpf/progs/bpf_misc.h | 12 ++++++ tools/testing/selftests/bpf/progs/verifier_mtu.c | 4 +- tools/testing/selftests/bpf/test_loader.c | 46 +++++++++++++++++++++++ 4 files changed, 62 insertions(+), 19 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d9f65adb456b..3ee40ee9413a 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -225,24 +225,7 @@ void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } void test_verifier_lsm(void) { RUN(verifier_lsm); } - -void test_verifier_mtu(void) -{ - __u64 caps = 0; - int ret; - - /* In case CAP_BPF and CAP_PERFMON is not set */ - ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps); - if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin")) - return; - ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL); - if (!ASSERT_OK(ret, "disable_cap_sys_admin")) - goto restore_cap; - RUN(verifier_mtu); -restore_cap: - if (caps) - cap_enable_effective(caps, NULL); -} +void test_verifier_mtu(void) { RUN(verifier_mtu); } static int init_test_val_map(struct bpf_object *obj, char *map_name) { diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index eccaf955e394..f45f4352feeb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -5,6 +5,10 @@ #define XSTR(s) STR(s) #define STR(s) #s +/* Expand a macro and then stringize the expansion */ +#define QUOTE(str) #str +#define EXPAND_QUOTE(str) QUOTE(str) + /* This set of attributes controls behavior of the * test_loader.c:test_loader__run_subtests(). * @@ -106,6 +110,7 @@ * __arch_* Specify on which architecture the test case should be tested. * Several __arch_* annotations could be specified at once. * When test case is not run on current arch it is marked as skipped. + * __caps_unpriv Specify the capabilities that should be set when running the test. */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg))) #define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg))) @@ -129,6 +134,13 @@ #define __arch_x86_64 __arch("X86_64") #define __arch_arm64 __arch("ARM64") #define __arch_riscv64 __arch("RISCV64") +#define __caps_unpriv(caps) __attribute__((btf_decl_tag("comment:test_caps_unpriv=" EXPAND_QUOTE(caps)))) + +/* Define common capabilities tested using __caps_unpriv */ +#define CAP_NET_ADMIN 12 +#define CAP_SYS_ADMIN 21 +#define CAP_PERFMON 38 +#define CAP_BPF 39 /* Convenience macro for use with 'asm volatile' blocks */ #define __naked __attribute__((naked)) diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c index 70c7600a26a0..4ccf1ebc42d1 100644 --- a/tools/testing/selftests/bpf/progs/verifier_mtu.c +++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c @@ -6,7 +6,9 @@ SEC("tc/ingress") __description("uninit/mtu: write rejected") -__failure __msg("invalid indirect read from stack") +__success +__caps_unpriv(CAP_BPF|CAP_NET_ADMIN) +__failure_unpriv __msg_unpriv("invalid indirect read from stack") int tc_uninit_mtu(struct __sk_buff *ctx) { __u32 mtu; diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 3e9b009580d4..53b06647cf57 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -36,6 +36,7 @@ #define TEST_TAG_ARCH "comment:test_arch=" #define TEST_TAG_JITED_PFX "comment:test_jited=" #define TEST_TAG_JITED_PFX_UNPRIV "comment:test_jited_unpriv=" +#define TEST_TAG_CAPS_UNPRIV "comment:test_caps_unpriv=" /* Warning: duplicated in bpf_misc.h */ #define POINTER_VALUE 0xcafe4all @@ -74,6 +75,7 @@ struct test_subspec { struct expected_msgs jited; int retval; bool execute; + __u64 caps; }; struct test_spec { @@ -276,6 +278,37 @@ static int parse_int(const char *str, int *val, const char *name) return 0; } +static int parse_caps(const char *str, __u64 *val, const char *name) +{ + int cap_flag = 0; + char *token = NULL, *saveptr = NULL; + + char *str_cpy = strdup(str); + if (str_cpy == NULL) { + PRINT_FAIL("Memory allocation failed\n"); + return -EINVAL; + } + + token = strtok_r(str_cpy, "|", &saveptr); + while (token != NULL) { + errno = 0; + if (!strncmp("CAP_", token, sizeof("CAP_") - 1)) { + PRINT_FAIL("define %s constant in bpf_misc.h, failed to parse caps\n", token); + return -EINVAL; + } + cap_flag = strtol(token, NULL, 10); + if (!cap_flag || errno) { + PRINT_FAIL("failed to parse caps %s\n", name); + return -EINVAL; + } + *val |= (1ULL << cap_flag); + token = strtok_r(NULL, "|", &saveptr); + } + + free(str_cpy); + return 0; +} + static int parse_retval(const char *str, int *val, const char *name) { struct { @@ -541,6 +574,12 @@ static int parse_test_spec(struct test_loader *tester, jit_on_next_line = true; } else if (str_has_pfx(s, TEST_BTF_PATH)) { spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1; + } else if (str_has_pfx(s, TEST_TAG_CAPS_UNPRIV)) { + val = s + sizeof(TEST_TAG_CAPS_UNPRIV) - 1; + err = parse_caps(val, &spec->unpriv.caps, "test caps"); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; } } @@ -917,6 +956,13 @@ void run_subtest(struct test_loader *tester, test__end_subtest(); return; } + if (subspec->caps) { + err = cap_enable_effective(subspec->caps, NULL); + if (err) { + PRINT_FAIL("failed to set capabilities: %i, %s\n", err, strerror(err)); + goto subtest_cleanup; + } + } } /* Implicitly reset to NULL if next test case doesn't specify */ -- cgit v1.2.3-70-g09d2 From f513c3635078fc184af66b1af9e4f2b2d19b7d79 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Tue, 3 Dec 2024 20:47:56 -0800 Subject: selftests/bpf: Add test for reading from STACK_INVALID slots Ensure that when CAP_PERFMON is dropped, and the verifier sees allow_ptr_leaks as false, we are not permitted to read from a STACK_INVALID slot. Without the fix, the test will report unexpected success in loading. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241204044757.1483141-5-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/progs/verifier_spill_fill.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 671d9f415dbf..bab6d789ba00 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -1244,4 +1244,22 @@ __naked void old_stack_misc_vs_cur_ctx_ptr(void) : __clobber_all); } +SEC("socket") +__description("stack_noperfmon: reject read of invalid slots") +__success +__caps_unpriv(CAP_BPF) +__failure_unpriv __msg_unpriv("invalid read from stack off -8+1 size 8") +__naked void stack_noperfmon_reject_invalid_read(void) +{ + asm volatile (" \ + r2 = 1; \ + r6 = r10; \ + r6 += -8; \ + *(u8 *)(r6 + 0) = r2; \ + r2 = *(u64 *)(r6 + 0); \ + r0 = 0; \ + exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 19b6dbc006ecc1f2c8d7fa2d5afbfb7e7eba7920 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Tue, 3 Dec 2024 20:47:57 -0800 Subject: selftests/bpf: Add test for narrow spill into 64-bit spilled scalar Add a test case to verify that without CAP_PERFMON, the test now succeeds instead of failing due to a verification error. Acked-by: Eduard Zingerman Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20241204044757.1483141-6-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/verifier_spill_fill.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index bab6d789ba00..1e5a511e8494 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -1262,4 +1262,21 @@ __naked void stack_noperfmon_reject_invalid_read(void) " ::: __clobber_all); } +SEC("socket") +__description("stack_noperfmon: narrow spill onto 64-bit scalar spilled slots") +__success +__caps_unpriv(CAP_BPF) +__success_unpriv +__naked void stack_noperfmon_spill_32bit_onto_64bit_slot(void) +{ + asm volatile(" \ + r0 = 0; \ + *(u64 *)(r10 - 8) = r0; \ + *(u32 *)(r10 - 8) = r0; \ + exit; \ +" : + : + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; -- cgit v1.2.3-70-g09d2 From 156c977c539e87e173f505b23989d7b0ec0bc7d8 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:14 +0800 Subject: bpf: Remove unnecessary check when updating LPM trie MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When "node->prefixlen == matchlen" is true, it means that the node is fully matched. If "node->prefixlen == key->prefixlen" is false, it means the prefix length of key is greater than the prefix length of node, otherwise, matchlen will not be equal with node->prefixlen. However, it also implies that the prefix length of node must be less than max_prefixlen. Therefore, "node->prefixlen == trie->max_prefixlen" will always be false when the check of "node->prefixlen == key->prefixlen" returns false. Remove this unnecessary comparison. Reviewed-by: Toke Høiland-Jørgensen Acked-by: Daniel Borkmann Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-2-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 9b60eda0f727..73fd593d3745 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -364,8 +364,7 @@ static long trie_update_elem(struct bpf_map *map, matchlen = longest_prefix_match(trie, node, key); if (node->prefixlen != matchlen || - node->prefixlen == key->prefixlen || - node->prefixlen == trie->max_prefixlen) + node->prefixlen == key->prefixlen) break; next_bit = extract_bit(key->data, node->prefixlen); -- cgit v1.2.3-70-g09d2 From 3d5611b4d7efbefb85a74fcdbc35c603847cc022 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:15 +0800 Subject: bpf: Remove unnecessary kfree(im_node) in lpm_trie_update_elem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no need to call kfree(im_node) when updating element fails, because im_node must be NULL. Remove the unnecessary kfree() for im_node. Reviewed-by: Toke Høiland-Jørgensen Acked-by: Daniel Borkmann Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 73fd593d3745..b5e281a55760 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -315,7 +315,7 @@ static long trie_update_elem(struct bpf_map *map, void *_key, void *value, u64 flags) { struct lpm_trie *trie = container_of(map, struct lpm_trie, map); - struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL; + struct lpm_trie_node *node, *im_node, *new_node = NULL; struct lpm_trie_node *free_node = NULL; struct lpm_trie_node __rcu **slot; struct bpf_lpm_trie_key_u8 *key = _key; @@ -431,9 +431,7 @@ out: if (ret) { if (new_node) trie->n_entries--; - kfree(new_node); - kfree(im_node); } spin_unlock_irqrestore(&trie->lock, irq_flags); -- cgit v1.2.3-70-g09d2 From eae6a075e9537dd69891cf77ca5a88fa8a28b4a1 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:16 +0800 Subject: bpf: Handle BPF_EXIST and BPF_NOEXIST for LPM trie MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the currently missing handling for the BPF_EXIST and BPF_NOEXIST flags. These flags can be specified by users and are relevant since LPM trie supports exact matches during update. Fixes: b95a5c4db09b ("bpf: add a longest prefix match trie map implementation") Reviewed-by: Toke Høiland-Jørgensen Acked-by: Daniel Borkmann Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index b5e281a55760..be5bf0389532 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -375,6 +375,10 @@ static long trie_update_elem(struct bpf_map *map, * simply assign the @new_node to that slot and be done. */ if (!node) { + if (flags == BPF_EXIST) { + ret = -ENOENT; + goto out; + } rcu_assign_pointer(*slot, new_node); goto out; } @@ -383,18 +387,31 @@ static long trie_update_elem(struct bpf_map *map, * which already has the correct data array set. */ if (node->prefixlen == matchlen) { + if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) { + if (flags == BPF_NOEXIST) { + ret = -EEXIST; + goto out; + } + trie->n_entries--; + } else if (flags == BPF_EXIST) { + ret = -ENOENT; + goto out; + } + new_node->child[0] = node->child[0]; new_node->child[1] = node->child[1]; - if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) - trie->n_entries--; - rcu_assign_pointer(*slot, new_node); free_node = node; goto out; } + if (flags == BPF_EXIST) { + ret = -ENOENT; + goto out; + } + /* If the new node matches the prefix completely, it must be inserted * as an ancestor. Simply insert it between @node and *@slot. */ -- cgit v1.2.3-70-g09d2 From 532d6b36b2bfac5514426a97a4df8d103d700d43 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:17 +0800 Subject: bpf: Handle in-place update for full LPM trie correctly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a LPM trie is full, in-place updates of existing elements incorrectly return -ENOSPC. Fix this by deferring the check of trie->n_entries. For new insertions, n_entries must not exceed max_entries. However, in-place updates are allowed even when the trie is full. Fixes: b95a5c4db09b ("bpf: add a longest prefix match trie map implementation") Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-5-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 44 +++++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index be5bf0389532..df6cc0a1c9bf 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -310,6 +310,16 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, return node; } +static int trie_check_add_elem(struct lpm_trie *trie, u64 flags) +{ + if (flags == BPF_EXIST) + return -ENOENT; + if (trie->n_entries == trie->map.max_entries) + return -ENOSPC; + trie->n_entries++; + return 0; +} + /* Called from syscall or from eBPF program */ static long trie_update_elem(struct bpf_map *map, void *_key, void *value, u64 flags) @@ -333,20 +343,12 @@ static long trie_update_elem(struct bpf_map *map, spin_lock_irqsave(&trie->lock, irq_flags); /* Allocate and fill a new node */ - - if (trie->n_entries == trie->map.max_entries) { - ret = -ENOSPC; - goto out; - } - new_node = lpm_trie_node_alloc(trie, value); if (!new_node) { ret = -ENOMEM; goto out; } - trie->n_entries++; - new_node->prefixlen = key->prefixlen; RCU_INIT_POINTER(new_node->child[0], NULL); RCU_INIT_POINTER(new_node->child[1], NULL); @@ -375,10 +377,10 @@ static long trie_update_elem(struct bpf_map *map, * simply assign the @new_node to that slot and be done. */ if (!node) { - if (flags == BPF_EXIST) { - ret = -ENOENT; + ret = trie_check_add_elem(trie, flags); + if (ret) goto out; - } + rcu_assign_pointer(*slot, new_node); goto out; } @@ -392,10 +394,10 @@ static long trie_update_elem(struct bpf_map *map, ret = -EEXIST; goto out; } - trie->n_entries--; - } else if (flags == BPF_EXIST) { - ret = -ENOENT; - goto out; + } else { + ret = trie_check_add_elem(trie, flags); + if (ret) + goto out; } new_node->child[0] = node->child[0]; @@ -407,10 +409,9 @@ static long trie_update_elem(struct bpf_map *map, goto out; } - if (flags == BPF_EXIST) { - ret = -ENOENT; + ret = trie_check_add_elem(trie, flags); + if (ret) goto out; - } /* If the new node matches the prefix completely, it must be inserted * as an ancestor. Simply insert it between @node and *@slot. @@ -424,6 +425,7 @@ static long trie_update_elem(struct bpf_map *map, im_node = lpm_trie_node_alloc(trie, NULL); if (!im_node) { + trie->n_entries--; ret = -ENOMEM; goto out; } @@ -445,12 +447,8 @@ static long trie_update_elem(struct bpf_map *map, rcu_assign_pointer(*slot, im_node); out: - if (ret) { - if (new_node) - trie->n_entries--; + if (ret) kfree(new_node); - } - spin_unlock_irqrestore(&trie->lock, irq_flags); kfree_rcu(free_node, rcu); -- cgit v1.2.3-70-g09d2 From 27abc7b3fa2e09bbe41e2924d328121546865eda Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:18 +0800 Subject: bpf: Fix exact match conditions in trie_get_next_key() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit trie_get_next_key() uses node->prefixlen == key->prefixlen to identify an exact match, However, it is incorrect because when the target key doesn't fully match the found node (e.g., node->prefixlen != matchlen), these two nodes may also have the same prefixlen. It will return expected result when the passed key exist in the trie. However when a recently-deleted key or nonexistent key is passed to trie_get_next_key(), it may skip keys and return incorrect result. Fix it by using node->prefixlen == matchlen to identify exact matches. When the condition is true after the search, it also implies node->prefixlen equals key->prefixlen, otherwise, the search would return NULL instead. Fixes: b471f2f1de8b ("bpf: implement MAP_GET_NEXT_KEY command for LPM_TRIE map") Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-6-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index df6cc0a1c9bf..9ba6ae145239 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -645,7 +645,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) struct lpm_trie_node **node_stack = NULL; int err = 0, stack_ptr = -1; unsigned int next_bit; - size_t matchlen; + size_t matchlen = 0; /* The get_next_key follows postorder. For the 4 node example in * the top of this file, the trie_get_next_key() returns the following @@ -684,7 +684,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) next_bit = extract_bit(key->data, node->prefixlen); node = rcu_dereference(node->child[next_bit]); } - if (!node || node->prefixlen != key->prefixlen || + if (!node || node->prefixlen != matchlen || (node->flags & LPM_TREE_NODE_FLAG_IM)) goto find_leftmost; -- cgit v1.2.3-70-g09d2 From 3d8dc43eb2a3d179809f5fc27c88c93a57ea123d Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:19 +0800 Subject: bpf: Switch to bpf mem allocator for LPM trie MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Multiple syzbot warnings have been reported. These warnings are mainly about the lock order between trie->lock and kmalloc()'s internal lock. See report [1] as an example: ====================================================== WARNING: possible circular locking dependency detected 6.10.0-rc7-syzkaller-00003-g4376e966ecb7 #0 Not tainted ------------------------------------------------------ syz.3.2069/15008 is trying to acquire lock: ffff88801544e6d8 (&n->list_lock){-.-.}-{2:2}, at: get_partial_node ... but task is already holding lock: ffff88802dcc89f8 (&trie->lock){-.-.}-{2:2}, at: trie_update_elem ... which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&trie->lock){-.-.}-{2:2}: __raw_spin_lock_irqsave _raw_spin_lock_irqsave+0x3a/0x60 trie_delete_elem+0xb0/0x820 ___bpf_prog_run+0x3e51/0xabd0 __bpf_prog_run32+0xc1/0x100 bpf_dispatcher_nop_func ...... bpf_trace_run2+0x231/0x590 __bpf_trace_contention_end+0xca/0x110 trace_contention_end.constprop.0+0xea/0x170 __pv_queued_spin_lock_slowpath+0x28e/0xcc0 pv_queued_spin_lock_slowpath queued_spin_lock_slowpath queued_spin_lock do_raw_spin_lock+0x210/0x2c0 __raw_spin_lock_irqsave _raw_spin_lock_irqsave+0x42/0x60 __put_partials+0xc3/0x170 qlink_free qlist_free_all+0x4e/0x140 kasan_quarantine_reduce+0x192/0x1e0 __kasan_slab_alloc+0x69/0x90 kasan_slab_alloc slab_post_alloc_hook slab_alloc_node kmem_cache_alloc_node_noprof+0x153/0x310 __alloc_skb+0x2b1/0x380 ...... -> #0 (&n->list_lock){-.-.}-{2:2}: check_prev_add check_prevs_add validate_chain __lock_acquire+0x2478/0x3b30 lock_acquire lock_acquire+0x1b1/0x560 __raw_spin_lock_irqsave _raw_spin_lock_irqsave+0x3a/0x60 get_partial_node.part.0+0x20/0x350 get_partial_node get_partial ___slab_alloc+0x65b/0x1870 __slab_alloc.constprop.0+0x56/0xb0 __slab_alloc_node slab_alloc_node __do_kmalloc_node __kmalloc_node_noprof+0x35c/0x440 kmalloc_node_noprof bpf_map_kmalloc_node+0x98/0x4a0 lpm_trie_node_alloc trie_update_elem+0x1ef/0xe00 bpf_map_update_value+0x2c1/0x6c0 map_update_elem+0x623/0x910 __sys_bpf+0x90c/0x49a0 ... other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&trie->lock); lock(&n->list_lock); lock(&trie->lock); lock(&n->list_lock); *** DEADLOCK *** [1]: https://syzkaller.appspot.com/bug?extid=9045c0a3d5a7f1b119f7 A bpf program attached to trace_contention_end() triggers after acquiring &n->list_lock. The program invokes trie_delete_elem(), which then acquires trie->lock. However, it is possible that another process is invoking trie_update_elem(). trie_update_elem() will acquire trie->lock first, then invoke kmalloc_node(). kmalloc_node() may invoke get_partial_node() and try to acquire &n->list_lock (not necessarily the same lock object). Therefore, lockdep warns about the circular locking dependency. Invoking kmalloc() before acquiring trie->lock could fix the warning. However, since BPF programs call be invoked from any context (e.g., through kprobe/tracepoint/fentry), there may still be lock ordering problems for internal locks in kmalloc() or trie->lock itself. To eliminate these potential lock ordering problems with kmalloc()'s internal locks, replacing kmalloc()/kfree()/kfree_rcu() with equivalent BPF memory allocator APIs that can be invoked in any context. The lock ordering problems with trie->lock (e.g., reentrance) will be handled separately. Three aspects of this change require explanation: 1. Intermediate and leaf nodes are allocated from the same allocator. Since the value size of LPM trie is usually small, using a single alocator reduces the memory overhead of the BPF memory allocator. 2. Leaf nodes are allocated before disabling IRQs. This handles cases where leaf_size is large (e.g., > 4KB - 8) and updates require intermediate node allocation. If leaf nodes were allocated in IRQ-disabled region, the free objects in BPF memory allocator would not be refilled timely and the intermediate node allocation may fail. 3. Paired migrate_{disable|enable}() calls for node alloc and free. The BPF memory allocator uses per-CPU struct internally, these paired calls are necessary to guarantee correctness. Reviewed-by: Toke Høiland-Jørgensen Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-7-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 71 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 48 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 9ba6ae145239..f850360e75ce 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -15,6 +15,7 @@ #include #include #include +#include /* Intermediate node */ #define LPM_TREE_NODE_FLAG_IM BIT(0) @@ -22,7 +23,6 @@ struct lpm_trie_node; struct lpm_trie_node { - struct rcu_head rcu; struct lpm_trie_node __rcu *child[2]; u32 prefixlen; u32 flags; @@ -32,6 +32,7 @@ struct lpm_trie_node { struct lpm_trie { struct bpf_map map; struct lpm_trie_node __rcu *root; + struct bpf_mem_alloc ma; size_t n_entries; size_t max_prefixlen; size_t data_size; @@ -287,17 +288,18 @@ static void *trie_lookup_elem(struct bpf_map *map, void *_key) return found->data + trie->data_size; } -static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, - const void *value) +static struct lpm_trie_node *lpm_trie_node_alloc(struct lpm_trie *trie, + const void *value, + bool disable_migration) { struct lpm_trie_node *node; - size_t size = sizeof(struct lpm_trie_node) + trie->data_size; - if (value) - size += trie->map.value_size; + if (disable_migration) + migrate_disable(); + node = bpf_mem_cache_alloc(&trie->ma); + if (disable_migration) + migrate_enable(); - node = bpf_map_kmalloc_node(&trie->map, size, GFP_NOWAIT | __GFP_NOWARN, - trie->map.numa_node); if (!node) return NULL; @@ -325,7 +327,7 @@ static long trie_update_elem(struct bpf_map *map, void *_key, void *value, u64 flags) { struct lpm_trie *trie = container_of(map, struct lpm_trie, map); - struct lpm_trie_node *node, *im_node, *new_node = NULL; + struct lpm_trie_node *node, *im_node, *new_node; struct lpm_trie_node *free_node = NULL; struct lpm_trie_node __rcu **slot; struct bpf_lpm_trie_key_u8 *key = _key; @@ -340,14 +342,14 @@ static long trie_update_elem(struct bpf_map *map, if (key->prefixlen > trie->max_prefixlen) return -EINVAL; - spin_lock_irqsave(&trie->lock, irq_flags); + /* Allocate and fill a new node. Need to disable migration before + * invoking bpf_mem_cache_alloc(). + */ + new_node = lpm_trie_node_alloc(trie, value, true); + if (!new_node) + return -ENOMEM; - /* Allocate and fill a new node */ - new_node = lpm_trie_node_alloc(trie, value); - if (!new_node) { - ret = -ENOMEM; - goto out; - } + spin_lock_irqsave(&trie->lock, irq_flags); new_node->prefixlen = key->prefixlen; RCU_INIT_POINTER(new_node->child[0], NULL); @@ -423,7 +425,8 @@ static long trie_update_elem(struct bpf_map *map, goto out; } - im_node = lpm_trie_node_alloc(trie, NULL); + /* migration is disabled within the locked scope */ + im_node = lpm_trie_node_alloc(trie, NULL, false); if (!im_node) { trie->n_entries--; ret = -ENOMEM; @@ -447,10 +450,13 @@ static long trie_update_elem(struct bpf_map *map, rcu_assign_pointer(*slot, im_node); out: - if (ret) - kfree(new_node); spin_unlock_irqrestore(&trie->lock, irq_flags); - kfree_rcu(free_node, rcu); + + migrate_disable(); + if (ret) + bpf_mem_cache_free(&trie->ma, new_node); + bpf_mem_cache_free_rcu(&trie->ma, free_node); + migrate_enable(); return ret; } @@ -548,8 +554,11 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) out: spin_unlock_irqrestore(&trie->lock, irq_flags); - kfree_rcu(free_parent, rcu); - kfree_rcu(free_node, rcu); + + migrate_disable(); + bpf_mem_cache_free_rcu(&trie->ma, free_parent); + bpf_mem_cache_free_rcu(&trie->ma, free_node); + migrate_enable(); return ret; } @@ -571,6 +580,8 @@ out: static struct bpf_map *trie_alloc(union bpf_attr *attr) { struct lpm_trie *trie; + size_t leaf_size; + int err; /* check sanity of attributes */ if (attr->max_entries == 0 || @@ -595,7 +606,17 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) spin_lock_init(&trie->lock); + /* Allocate intermediate and leaf nodes from the same allocator */ + leaf_size = sizeof(struct lpm_trie_node) + trie->data_size + + trie->map.value_size; + err = bpf_mem_alloc_init(&trie->ma, leaf_size, false); + if (err) + goto free_out; return &trie->map; + +free_out: + bpf_map_area_free(trie); + return ERR_PTR(err); } static void trie_free(struct bpf_map *map) @@ -627,13 +648,17 @@ static void trie_free(struct bpf_map *map) continue; } - kfree(node); + /* No bpf program may access the map, so freeing the + * node without waiting for the extra RCU GP. + */ + bpf_mem_cache_raw_free(node); RCU_INIT_POINTER(*slot, NULL); break; } } out: + bpf_mem_alloc_destroy(&trie->ma); bpf_map_area_free(trie); } -- cgit v1.2.3-70-g09d2 From 6a5c63d43c0216d64915baa0e0eacf2beb66b271 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:20 +0800 Subject: bpf: Use raw_spinlock_t for LPM trie After switching from kmalloc() to the bpf memory allocator, there will be no blocking operation during the update of LPM trie. Therefore, change trie->lock from spinlock_t to raw_spinlock_t to make LPM trie usable in atomic context, even on RT kernels. The max value of prefixlen is 2048. Therefore, update or deletion operations will find the target after at most 2048 comparisons. Constructing a test case which updates an element after 2048 comparisons under a 8 CPU VM, and the average time and the maximal time for such update operation is about 210us and 900us. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-8-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/lpm_trie.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index f850360e75ce..f8bc1e096182 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -36,7 +36,7 @@ struct lpm_trie { size_t n_entries; size_t max_prefixlen; size_t data_size; - spinlock_t lock; + raw_spinlock_t lock; }; /* This trie implements a longest prefix match algorithm that can be used to @@ -349,7 +349,7 @@ static long trie_update_elem(struct bpf_map *map, if (!new_node) return -ENOMEM; - spin_lock_irqsave(&trie->lock, irq_flags); + raw_spin_lock_irqsave(&trie->lock, irq_flags); new_node->prefixlen = key->prefixlen; RCU_INIT_POINTER(new_node->child[0], NULL); @@ -450,7 +450,7 @@ static long trie_update_elem(struct bpf_map *map, rcu_assign_pointer(*slot, im_node); out: - spin_unlock_irqrestore(&trie->lock, irq_flags); + raw_spin_unlock_irqrestore(&trie->lock, irq_flags); migrate_disable(); if (ret) @@ -477,7 +477,7 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) if (key->prefixlen > trie->max_prefixlen) return -EINVAL; - spin_lock_irqsave(&trie->lock, irq_flags); + raw_spin_lock_irqsave(&trie->lock, irq_flags); /* Walk the tree looking for an exact key/length match and keeping * track of the path we traverse. We will need to know the node @@ -553,7 +553,7 @@ static long trie_delete_elem(struct bpf_map *map, void *_key) free_node = node; out: - spin_unlock_irqrestore(&trie->lock, irq_flags); + raw_spin_unlock_irqrestore(&trie->lock, irq_flags); migrate_disable(); bpf_mem_cache_free_rcu(&trie->ma, free_parent); @@ -604,7 +604,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) offsetof(struct bpf_lpm_trie_key_u8, data); trie->max_prefixlen = trie->data_size * 8; - spin_lock_init(&trie->lock); + raw_spin_lock_init(&trie->lock); /* Allocate intermediate and leaf nodes from the same allocator */ leaf_size = sizeof(struct lpm_trie_node) + trie->data_size + -- cgit v1.2.3-70-g09d2 From 3e18f5f1e5a152a51dbb6c234e2a0421aec11e31 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:21 +0800 Subject: selftests/bpf: Move test_lpm_map.c to map_tests Move test_lpm_map.c to map_tests/ to include LPM trie test cases in regular test_maps run. Most code remains unchanged, including the use of assert(). Only reduce n_lookups from 64K to 512, which decreases test_lpm_map runtime from 37s to 0.7s. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-9-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/.gitignore | 1 - tools/testing/selftests/bpf/Makefile | 2 +- .../bpf/map_tests/lpm_trie_map_basic_ops.c | 793 ++++++++++++++++++++ tools/testing/selftests/bpf/test_lpm_map.c | 797 --------------------- 4 files changed, 794 insertions(+), 799 deletions(-) create mode 100644 tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c delete mode 100644 tools/testing/selftests/bpf/test_lpm_map.c diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index c2a1842c3d8b..e9c377001f93 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -5,7 +5,6 @@ bpf-syscall* test_verifier test_maps test_lru_map -test_lpm_map test_tag FEATURE-DUMP.libbpf FEATURE-DUMP.selftests diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 6ad3b1ba1920..7eeb3cbe18c7 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -83,7 +83,7 @@ CLANG_CPUV4 := 1 endif # Order correspond to 'make run_tests' order -TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ +TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_progs \ test_sockmap \ test_tcpnotify_user test_sysctl \ test_progs-no_alu32 diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c new file mode 100644 index 000000000000..f375c89d78a4 --- /dev/null +++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c @@ -0,0 +1,793 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Randomized tests for eBPF longest-prefix-match maps + * + * This program runs randomized tests against the lpm-bpf-map. It implements a + * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked + * lists. The implementation should be pretty straightforward. + * + * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies + * the trie-based bpf-map implementation behaves the same way as tlpm. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "bpf_util.h" + +struct tlpm_node { + struct tlpm_node *next; + size_t n_bits; + uint8_t key[]; +}; + +static struct tlpm_node *tlpm_match(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits); + +static struct tlpm_node *tlpm_add(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *node; + size_t n; + + n = (n_bits + 7) / 8; + + /* 'overwrite' an equivalent entry if one already exists */ + node = tlpm_match(list, key, n_bits); + if (node && node->n_bits == n_bits) { + memcpy(node->key, key, n); + return list; + } + + /* add new entry with @key/@n_bits to @list and return new head */ + + node = malloc(sizeof(*node) + n); + assert(node); + + node->next = list; + node->n_bits = n_bits; + memcpy(node->key, key, n); + + return node; +} + +static void tlpm_clear(struct tlpm_node *list) +{ + struct tlpm_node *node; + + /* free all entries in @list */ + + while ((node = list)) { + list = list->next; + free(node); + } +} + +static struct tlpm_node *tlpm_match(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *best = NULL; + size_t i; + + /* Perform longest prefix-match on @key/@n_bits. That is, iterate all + * entries and match each prefix against @key. Remember the "best" + * entry we find (i.e., the longest prefix that matches) and return it + * to the caller when done. + */ + + for ( ; list; list = list->next) { + for (i = 0; i < n_bits && i < list->n_bits; ++i) { + if ((key[i / 8] & (1 << (7 - i % 8))) != + (list->key[i / 8] & (1 << (7 - i % 8)))) + break; + } + + if (i >= list->n_bits) { + if (!best || i > best->n_bits) + best = list; + } + } + + return best; +} + +static struct tlpm_node *tlpm_delete(struct tlpm_node *list, + const uint8_t *key, + size_t n_bits) +{ + struct tlpm_node *best = tlpm_match(list, key, n_bits); + struct tlpm_node *node; + + if (!best || best->n_bits != n_bits) + return list; + + if (best == list) { + node = best->next; + free(best); + return node; + } + + for (node = list; node; node = node->next) { + if (node->next == best) { + node->next = best->next; + free(best); + return list; + } + } + /* should never get here */ + assert(0); + return list; +} + +static void test_lpm_basic(void) +{ + struct tlpm_node *list = NULL, *t1, *t2; + + /* very basic, static tests to verify tlpm works as expected */ + + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + + t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16)); + assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8)); + assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8)); + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7)); + + t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); + assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); + + list = tlpm_delete(list, (uint8_t[]){ 0xff, 0xff }, 16); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); + + list = tlpm_delete(list, (uint8_t[]){ 0xff }, 8); + assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); + + tlpm_clear(list); +} + +static void test_lpm_order(void) +{ + struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL; + size_t i, j; + + /* Verify the tlpm implementation works correctly regardless of the + * order of entries. Insert a random set of entries into @l1, and copy + * the same data in reverse order into @l2. Then verify a lookup of + * random keys will yield the same result in both sets. + */ + + for (i = 0; i < (1 << 12); ++i) + l1 = tlpm_add(l1, (uint8_t[]){ + rand() % 0xff, + rand() % 0xff, + }, rand() % 16 + 1); + + for (t1 = l1; t1; t1 = t1->next) + l2 = tlpm_add(l2, t1->key, t1->n_bits); + + for (i = 0; i < (1 << 8); ++i) { + uint8_t key[] = { rand() % 0xff, rand() % 0xff }; + + t1 = tlpm_match(l1, key, 16); + t2 = tlpm_match(l2, key, 16); + + assert(!t1 == !t2); + if (t1) { + assert(t1->n_bits == t2->n_bits); + for (j = 0; j < t1->n_bits; ++j) + assert((t1->key[j / 8] & (1 << (7 - j % 8))) == + (t2->key[j / 8] & (1 << (7 - j % 8)))); + } + } + + tlpm_clear(l1); + tlpm_clear(l2); +} + +static void test_lpm_map(int keysize) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); + volatile size_t n_matches, n_matches_after_delete; + size_t i, j, n_nodes, n_lookups; + struct tlpm_node *t, *list = NULL; + struct bpf_lpm_trie_key_u8 *key; + uint8_t *data, *value; + int r, map; + + /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of + * prefixes and insert it into both tlpm and bpf-lpm. Then run some + * randomized lookups and verify both maps return the same result. + */ + + n_matches = 0; + n_matches_after_delete = 0; + n_nodes = 1 << 8; + n_lookups = 1 << 9; + + data = alloca(keysize); + memset(data, 0, keysize); + + value = alloca(keysize + 1); + memset(value, 0, keysize + 1); + + key = alloca(sizeof(*key) + keysize); + memset(key, 0, sizeof(*key) + keysize); + + map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, + sizeof(*key) + keysize, + keysize + 1, + 4096, + &opts); + assert(map >= 0); + + for (i = 0; i < n_nodes; ++i) { + for (j = 0; j < keysize; ++j) + value[j] = rand() & 0xff; + value[keysize] = rand() % (8 * keysize + 1); + + list = tlpm_add(list, value, value[keysize]); + + key->prefixlen = value[keysize]; + memcpy(key->data, value, keysize); + r = bpf_map_update_elem(map, key, value, 0); + assert(!r); + } + + for (i = 0; i < n_lookups; ++i) { + for (j = 0; j < keysize; ++j) + data[j] = rand() & 0xff; + + t = tlpm_match(list, data, 8 * keysize); + + key->prefixlen = 8 * keysize; + memcpy(key->data, data, keysize); + r = bpf_map_lookup_elem(map, key, value); + assert(!r || errno == ENOENT); + assert(!t == !!r); + + if (t) { + ++n_matches; + assert(t->n_bits == value[keysize]); + for (j = 0; j < t->n_bits; ++j) + assert((t->key[j / 8] & (1 << (7 - j % 8))) == + (value[j / 8] & (1 << (7 - j % 8)))); + } + } + + /* Remove the first half of the elements in the tlpm and the + * corresponding nodes from the bpf-lpm. Then run the same + * large number of random lookups in both and make sure they match. + * Note: we need to count the number of nodes actually inserted + * since there may have been duplicates. + */ + for (i = 0, t = list; t; i++, t = t->next) + ; + for (j = 0; j < i / 2; ++j) { + key->prefixlen = list->n_bits; + memcpy(key->data, list->key, keysize); + r = bpf_map_delete_elem(map, key); + assert(!r); + list = tlpm_delete(list, list->key, list->n_bits); + assert(list); + } + for (i = 0; i < n_lookups; ++i) { + for (j = 0; j < keysize; ++j) + data[j] = rand() & 0xff; + + t = tlpm_match(list, data, 8 * keysize); + + key->prefixlen = 8 * keysize; + memcpy(key->data, data, keysize); + r = bpf_map_lookup_elem(map, key, value); + assert(!r || errno == ENOENT); + assert(!t == !!r); + + if (t) { + ++n_matches_after_delete; + assert(t->n_bits == value[keysize]); + for (j = 0; j < t->n_bits; ++j) + assert((t->key[j / 8] & (1 << (7 - j % 8))) == + (value[j / 8] & (1 << (7 - j % 8)))); + } + } + + close(map); + tlpm_clear(list); + + /* With 255 random nodes in the map, we are pretty likely to match + * something on every lookup. For statistics, use this: + * + * printf(" nodes: %zu\n" + * " lookups: %zu\n" + * " matches: %zu\n" + * "matches(delete): %zu\n", + * n_nodes, n_lookups, n_matches, n_matches_after_delete); + */ +} + +/* Test the implementation with some 'real world' examples */ + +static void test_lpm_ipaddr(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); + struct bpf_lpm_trie_key_u8 *key_ipv4; + struct bpf_lpm_trie_key_u8 *key_ipv6; + size_t key_size_ipv4; + size_t key_size_ipv6; + int map_fd_ipv4; + int map_fd_ipv6; + __u64 value; + + key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32); + key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4; + key_ipv4 = alloca(key_size_ipv4); + key_ipv6 = alloca(key_size_ipv6); + + map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, + key_size_ipv4, sizeof(value), + 100, &opts); + assert(map_fd_ipv4 >= 0); + + map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, + key_size_ipv6, sizeof(value), + 100, &opts); + assert(map_fd_ipv6 >= 0); + + /* Fill data some IPv4 and IPv6 address ranges */ + value = 1; + key_ipv4->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 2; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 3; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 5; + key_ipv4->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 4; + key_ipv4->prefixlen = 23; + inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); + assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); + + value = 0xdeadbeef; + key_ipv6->prefixlen = 64; + inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); + assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0); + + /* Set tprefixlen to maximum for lookups */ + key_ipv4->prefixlen = 32; + key_ipv6->prefixlen = 128; + + /* Test some lookups that should come back with a value */ + inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); + assert(value == 3); + + inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); + assert(value == 2); + + inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); + assert(value == 0xdeadbeef); + + inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); + assert(value == 0xdeadbeef); + + /* Test some lookups that should not match any entry */ + inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT); + + inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); + assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT); + + inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); + assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT); + + close(map_fd_ipv4); + close(map_fd_ipv6); +} + +static void test_lpm_delete(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); + struct bpf_lpm_trie_key_u8 *key; + size_t key_size; + int map_fd; + __u64 value; + + key_size = sizeof(*key) + sizeof(__u32); + key = alloca(key_size); + + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, + key_size, sizeof(value), + 100, &opts); + assert(map_fd >= 0); + + /* Add nodes: + * 192.168.0.0/16 (1) + * 192.168.0.0/24 (2) + * 192.168.128.0/24 (3) + * 192.168.1.0/24 (4) + * + * (1) + * / \ + * (IM) (3) + * / \ + * (2) (4) + */ + value = 1; + key->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 2; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 3; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + value = 4; + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key->data); + assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); + + /* remove non-existent node */ + key->prefixlen = 32; + inet_pton(AF_INET, "10.0.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT); + + key->prefixlen = 30; // unused prefix so far + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); + + key->prefixlen = 16; // same prefix as the root node + inet_pton(AF_INET, "192.255.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); + + /* assert initial lookup */ + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 2); + + /* remove leaf node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.0.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 1); + + /* remove leaf (and intermediary) node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.1.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 1); + + /* remove root node */ + key->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.128.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); + assert(value == 3); + + /* remove last node */ + key->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key->data); + assert(bpf_map_delete_elem(map_fd, key) == 0); + + key->prefixlen = 32; + inet_pton(AF_INET, "192.168.128.1", key->data); + assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT); + + close(map_fd); +} + +static void test_lpm_get_next_key(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); + struct bpf_lpm_trie_key_u8 *key_p, *next_key_p; + size_t key_size; + __u32 value = 0; + int map_fd; + + key_size = sizeof(*key_p) + sizeof(__u32); + key_p = alloca(key_size); + next_key_p = alloca(key_size); + + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts); + assert(map_fd >= 0); + + /* empty tree. get_next_key should return ENOENT */ + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT); + + /* get and verify the first key, get the second one should fail. */ + key_p->prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 16 && key_p->data[0] == 192 && + key_p->data[1] == 168); + + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); + + /* no exact matching key should get the first one in post order. */ + key_p->prefixlen = 8; + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 16 && key_p->data[0] == 192 && + key_p->data[1] == 168); + + /* add one more element (total two) */ + key_p->prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && + key_p->data[1] == 168 && key_p->data[2] == 128); + + memset(next_key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); + + /* Add one more element (total three) */ + key_p->prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && + key_p->data[1] == 168 && key_p->data[2] == 0); + + memset(next_key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); + + /* Add one more element (total four) */ + key_p->prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && + key_p->data[1] == 168 && key_p->data[2] == 0); + + memset(next_key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 1); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); + + /* Add one more element (total five) */ + key_p->prefixlen = 28; + inet_pton(AF_INET, "192.168.1.128", key_p->data); + assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); + + memset(key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); + assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && + key_p->data[1] == 168 && key_p->data[2] == 0); + + memset(next_key_p, 0, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 1 && + next_key_p->data[3] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 1); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 128); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168); + + memcpy(key_p, next_key_p, key_size); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); + + /* no exact matching key should return the first one in post order */ + key_p->prefixlen = 22; + inet_pton(AF_INET, "192.168.1.0", key_p->data); + assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); + assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && + next_key_p->data[1] == 168 && next_key_p->data[2] == 0); + + close(map_fd); +} + +#define MAX_TEST_KEYS 4 +struct lpm_mt_test_info { + int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */ + int iter; + int map_fd; + struct { + __u32 prefixlen; + __u32 data; + } key[MAX_TEST_KEYS]; +}; + +static void *lpm_test_command(void *arg) +{ + int i, j, ret, iter, key_size; + struct lpm_mt_test_info *info = arg; + struct bpf_lpm_trie_key_u8 *key_p; + + key_size = sizeof(*key_p) + sizeof(__u32); + key_p = alloca(key_size); + for (iter = 0; iter < info->iter; iter++) + for (i = 0; i < MAX_TEST_KEYS; i++) { + /* first half of iterations in forward order, + * and second half in backward order. + */ + j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1; + key_p->prefixlen = info->key[j].prefixlen; + memcpy(key_p->data, &info->key[j].data, sizeof(__u32)); + if (info->cmd == 0) { + __u32 value = j; + /* update must succeed */ + assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0); + } else if (info->cmd == 1) { + ret = bpf_map_delete_elem(info->map_fd, key_p); + assert(ret == 0 || errno == ENOENT); + } else if (info->cmd == 2) { + __u32 value; + ret = bpf_map_lookup_elem(info->map_fd, key_p, &value); + assert(ret == 0 || errno == ENOENT); + } else { + struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size); + ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p); + assert(ret == 0 || errno == ENOENT || errno == ENOMEM); + } + } + + // Pass successful exit info back to the main thread + pthread_exit((void *)info); +} + +static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd) +{ + info->iter = 2000; + info->map_fd = map_fd; + info->key[0].prefixlen = 16; + inet_pton(AF_INET, "192.168.0.0", &info->key[0].data); + info->key[1].prefixlen = 24; + inet_pton(AF_INET, "192.168.0.0", &info->key[1].data); + info->key[2].prefixlen = 24; + inet_pton(AF_INET, "192.168.128.0", &info->key[2].data); + info->key[3].prefixlen = 24; + inet_pton(AF_INET, "192.168.1.0", &info->key[3].data); +} + +static void test_lpm_multi_thread(void) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); + struct lpm_mt_test_info info[4]; + size_t key_size, value_size; + pthread_t thread_id[4]; + int i, map_fd; + void *ret; + + /* create a trie */ + value_size = sizeof(__u32); + key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size; + map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); + + /* create 4 threads to test update, delete, lookup and get_next_key */ + setup_lpm_mt_test_info(&info[0], map_fd); + for (i = 0; i < 4; i++) { + if (i != 0) + memcpy(&info[i], &info[0], sizeof(info[i])); + info[i].cmd = i; + assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0); + } + + for (i = 0; i < 4; i++) + assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]); + + close(map_fd); +} + +void test_lpm_trie_map_basic_ops(void) +{ + int i; + + /* we want predictable, pseudo random tests */ + srand(0xf00ba1); + + test_lpm_basic(); + test_lpm_order(); + + /* Test with 8, 16, 24, 32, ... 128 bit prefix length */ + for (i = 1; i <= 16; ++i) + test_lpm_map(i); + + test_lpm_ipaddr(); + test_lpm_delete(); + test_lpm_get_next_key(); + test_lpm_multi_thread(); + + printf("%s: PASS\n", __func__); +} diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c deleted file mode 100644 index d98c72dc563e..000000000000 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ /dev/null @@ -1,797 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Randomized tests for eBPF longest-prefix-match maps - * - * This program runs randomized tests against the lpm-bpf-map. It implements a - * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked - * lists. The implementation should be pretty straightforward. - * - * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies - * the trie-based bpf-map implementation behaves the same way as tlpm. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "bpf_util.h" - -struct tlpm_node { - struct tlpm_node *next; - size_t n_bits; - uint8_t key[]; -}; - -static struct tlpm_node *tlpm_match(struct tlpm_node *list, - const uint8_t *key, - size_t n_bits); - -static struct tlpm_node *tlpm_add(struct tlpm_node *list, - const uint8_t *key, - size_t n_bits) -{ - struct tlpm_node *node; - size_t n; - - n = (n_bits + 7) / 8; - - /* 'overwrite' an equivalent entry if one already exists */ - node = tlpm_match(list, key, n_bits); - if (node && node->n_bits == n_bits) { - memcpy(node->key, key, n); - return list; - } - - /* add new entry with @key/@n_bits to @list and return new head */ - - node = malloc(sizeof(*node) + n); - assert(node); - - node->next = list; - node->n_bits = n_bits; - memcpy(node->key, key, n); - - return node; -} - -static void tlpm_clear(struct tlpm_node *list) -{ - struct tlpm_node *node; - - /* free all entries in @list */ - - while ((node = list)) { - list = list->next; - free(node); - } -} - -static struct tlpm_node *tlpm_match(struct tlpm_node *list, - const uint8_t *key, - size_t n_bits) -{ - struct tlpm_node *best = NULL; - size_t i; - - /* Perform longest prefix-match on @key/@n_bits. That is, iterate all - * entries and match each prefix against @key. Remember the "best" - * entry we find (i.e., the longest prefix that matches) and return it - * to the caller when done. - */ - - for ( ; list; list = list->next) { - for (i = 0; i < n_bits && i < list->n_bits; ++i) { - if ((key[i / 8] & (1 << (7 - i % 8))) != - (list->key[i / 8] & (1 << (7 - i % 8)))) - break; - } - - if (i >= list->n_bits) { - if (!best || i > best->n_bits) - best = list; - } - } - - return best; -} - -static struct tlpm_node *tlpm_delete(struct tlpm_node *list, - const uint8_t *key, - size_t n_bits) -{ - struct tlpm_node *best = tlpm_match(list, key, n_bits); - struct tlpm_node *node; - - if (!best || best->n_bits != n_bits) - return list; - - if (best == list) { - node = best->next; - free(best); - return node; - } - - for (node = list; node; node = node->next) { - if (node->next == best) { - node->next = best->next; - free(best); - return list; - } - } - /* should never get here */ - assert(0); - return list; -} - -static void test_lpm_basic(void) -{ - struct tlpm_node *list = NULL, *t1, *t2; - - /* very basic, static tests to verify tlpm works as expected */ - - assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); - - t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16)); - assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8)); - assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8)); - assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7)); - - t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); - assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15)); - assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16)); - - list = tlpm_delete(list, (uint8_t[]){ 0xff, 0xff }, 16); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8)); - assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16)); - - list = tlpm_delete(list, (uint8_t[]){ 0xff }, 8); - assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8)); - - tlpm_clear(list); -} - -static void test_lpm_order(void) -{ - struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL; - size_t i, j; - - /* Verify the tlpm implementation works correctly regardless of the - * order of entries. Insert a random set of entries into @l1, and copy - * the same data in reverse order into @l2. Then verify a lookup of - * random keys will yield the same result in both sets. - */ - - for (i = 0; i < (1 << 12); ++i) - l1 = tlpm_add(l1, (uint8_t[]){ - rand() % 0xff, - rand() % 0xff, - }, rand() % 16 + 1); - - for (t1 = l1; t1; t1 = t1->next) - l2 = tlpm_add(l2, t1->key, t1->n_bits); - - for (i = 0; i < (1 << 8); ++i) { - uint8_t key[] = { rand() % 0xff, rand() % 0xff }; - - t1 = tlpm_match(l1, key, 16); - t2 = tlpm_match(l2, key, 16); - - assert(!t1 == !t2); - if (t1) { - assert(t1->n_bits == t2->n_bits); - for (j = 0; j < t1->n_bits; ++j) - assert((t1->key[j / 8] & (1 << (7 - j % 8))) == - (t2->key[j / 8] & (1 << (7 - j % 8)))); - } - } - - tlpm_clear(l1); - tlpm_clear(l2); -} - -static void test_lpm_map(int keysize) -{ - LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - volatile size_t n_matches, n_matches_after_delete; - size_t i, j, n_nodes, n_lookups; - struct tlpm_node *t, *list = NULL; - struct bpf_lpm_trie_key_u8 *key; - uint8_t *data, *value; - int r, map; - - /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of - * prefixes and insert it into both tlpm and bpf-lpm. Then run some - * randomized lookups and verify both maps return the same result. - */ - - n_matches = 0; - n_matches_after_delete = 0; - n_nodes = 1 << 8; - n_lookups = 1 << 16; - - data = alloca(keysize); - memset(data, 0, keysize); - - value = alloca(keysize + 1); - memset(value, 0, keysize + 1); - - key = alloca(sizeof(*key) + keysize); - memset(key, 0, sizeof(*key) + keysize); - - map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, - sizeof(*key) + keysize, - keysize + 1, - 4096, - &opts); - assert(map >= 0); - - for (i = 0; i < n_nodes; ++i) { - for (j = 0; j < keysize; ++j) - value[j] = rand() & 0xff; - value[keysize] = rand() % (8 * keysize + 1); - - list = tlpm_add(list, value, value[keysize]); - - key->prefixlen = value[keysize]; - memcpy(key->data, value, keysize); - r = bpf_map_update_elem(map, key, value, 0); - assert(!r); - } - - for (i = 0; i < n_lookups; ++i) { - for (j = 0; j < keysize; ++j) - data[j] = rand() & 0xff; - - t = tlpm_match(list, data, 8 * keysize); - - key->prefixlen = 8 * keysize; - memcpy(key->data, data, keysize); - r = bpf_map_lookup_elem(map, key, value); - assert(!r || errno == ENOENT); - assert(!t == !!r); - - if (t) { - ++n_matches; - assert(t->n_bits == value[keysize]); - for (j = 0; j < t->n_bits; ++j) - assert((t->key[j / 8] & (1 << (7 - j % 8))) == - (value[j / 8] & (1 << (7 - j % 8)))); - } - } - - /* Remove the first half of the elements in the tlpm and the - * corresponding nodes from the bpf-lpm. Then run the same - * large number of random lookups in both and make sure they match. - * Note: we need to count the number of nodes actually inserted - * since there may have been duplicates. - */ - for (i = 0, t = list; t; i++, t = t->next) - ; - for (j = 0; j < i / 2; ++j) { - key->prefixlen = list->n_bits; - memcpy(key->data, list->key, keysize); - r = bpf_map_delete_elem(map, key); - assert(!r); - list = tlpm_delete(list, list->key, list->n_bits); - assert(list); - } - for (i = 0; i < n_lookups; ++i) { - for (j = 0; j < keysize; ++j) - data[j] = rand() & 0xff; - - t = tlpm_match(list, data, 8 * keysize); - - key->prefixlen = 8 * keysize; - memcpy(key->data, data, keysize); - r = bpf_map_lookup_elem(map, key, value); - assert(!r || errno == ENOENT); - assert(!t == !!r); - - if (t) { - ++n_matches_after_delete; - assert(t->n_bits == value[keysize]); - for (j = 0; j < t->n_bits; ++j) - assert((t->key[j / 8] & (1 << (7 - j % 8))) == - (value[j / 8] & (1 << (7 - j % 8)))); - } - } - - close(map); - tlpm_clear(list); - - /* With 255 random nodes in the map, we are pretty likely to match - * something on every lookup. For statistics, use this: - * - * printf(" nodes: %zu\n" - * " lookups: %zu\n" - * " matches: %zu\n" - * "matches(delete): %zu\n", - * n_nodes, n_lookups, n_matches, n_matches_after_delete); - */ -} - -/* Test the implementation with some 'real world' examples */ - -static void test_lpm_ipaddr(void) -{ - LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key_u8 *key_ipv4; - struct bpf_lpm_trie_key_u8 *key_ipv6; - size_t key_size_ipv4; - size_t key_size_ipv6; - int map_fd_ipv4; - int map_fd_ipv6; - __u64 value; - - key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32); - key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4; - key_ipv4 = alloca(key_size_ipv4); - key_ipv6 = alloca(key_size_ipv6); - - map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, - key_size_ipv4, sizeof(value), - 100, &opts); - assert(map_fd_ipv4 >= 0); - - map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, - key_size_ipv6, sizeof(value), - 100, &opts); - assert(map_fd_ipv6 >= 0); - - /* Fill data some IPv4 and IPv6 address ranges */ - value = 1; - key_ipv4->prefixlen = 16; - inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); - - value = 2; - key_ipv4->prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); - - value = 3; - key_ipv4->prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", key_ipv4->data); - assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); - - value = 5; - key_ipv4->prefixlen = 24; - inet_pton(AF_INET, "192.168.1.0", key_ipv4->data); - assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); - - value = 4; - key_ipv4->prefixlen = 23; - inet_pton(AF_INET, "192.168.0.0", key_ipv4->data); - assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0); - - value = 0xdeadbeef; - key_ipv6->prefixlen = 64; - inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data); - assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0); - - /* Set tprefixlen to maximum for lookups */ - key_ipv4->prefixlen = 32; - key_ipv6->prefixlen = 128; - - /* Test some lookups that should come back with a value */ - inet_pton(AF_INET, "192.168.128.23", key_ipv4->data); - assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); - assert(value == 3); - - inet_pton(AF_INET, "192.168.0.1", key_ipv4->data); - assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0); - assert(value == 2); - - inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data); - assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); - assert(value == 0xdeadbeef); - - inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data); - assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0); - assert(value == 0xdeadbeef); - - /* Test some lookups that should not match any entry */ - inet_pton(AF_INET, "10.0.0.1", key_ipv4->data); - assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT); - - inet_pton(AF_INET, "11.11.11.11", key_ipv4->data); - assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT); - - inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data); - assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT); - - close(map_fd_ipv4); - close(map_fd_ipv6); -} - -static void test_lpm_delete(void) -{ - LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key_u8 *key; - size_t key_size; - int map_fd; - __u64 value; - - key_size = sizeof(*key) + sizeof(__u32); - key = alloca(key_size); - - map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, - key_size, sizeof(value), - 100, &opts); - assert(map_fd >= 0); - - /* Add nodes: - * 192.168.0.0/16 (1) - * 192.168.0.0/24 (2) - * 192.168.128.0/24 (3) - * 192.168.1.0/24 (4) - * - * (1) - * / \ - * (IM) (3) - * / \ - * (2) (4) - */ - value = 1; - key->prefixlen = 16; - inet_pton(AF_INET, "192.168.0.0", key->data); - assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); - - value = 2; - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", key->data); - assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); - - value = 3; - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", key->data); - assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); - - value = 4; - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.1.0", key->data); - assert(bpf_map_update_elem(map_fd, key, &value, 0) == 0); - - /* remove non-existent node */ - key->prefixlen = 32; - inet_pton(AF_INET, "10.0.0.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT); - - key->prefixlen = 30; // unused prefix so far - inet_pton(AF_INET, "192.255.0.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); - - key->prefixlen = 16; // same prefix as the root node - inet_pton(AF_INET, "192.255.0.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == -ENOENT); - - /* assert initial lookup */ - key->prefixlen = 32; - inet_pton(AF_INET, "192.168.0.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); - assert(value == 2); - - /* remove leaf node */ - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == 0); - - key->prefixlen = 32; - inet_pton(AF_INET, "192.168.0.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); - assert(value == 1); - - /* remove leaf (and intermediary) node */ - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.1.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == 0); - - key->prefixlen = 32; - inet_pton(AF_INET, "192.168.1.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); - assert(value == 1); - - /* remove root node */ - key->prefixlen = 16; - inet_pton(AF_INET, "192.168.0.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == 0); - - key->prefixlen = 32; - inet_pton(AF_INET, "192.168.128.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == 0); - assert(value == 3); - - /* remove last node */ - key->prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", key->data); - assert(bpf_map_delete_elem(map_fd, key) == 0); - - key->prefixlen = 32; - inet_pton(AF_INET, "192.168.128.1", key->data); - assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT); - - close(map_fd); -} - -static void test_lpm_get_next_key(void) -{ - LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key_u8 *key_p, *next_key_p; - size_t key_size; - __u32 value = 0; - int map_fd; - - key_size = sizeof(*key_p) + sizeof(__u32); - key_p = alloca(key_size); - next_key_p = alloca(key_size); - - map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, sizeof(value), 100, &opts); - assert(map_fd >= 0); - - /* empty tree. get_next_key should return ENOENT */ - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT); - - /* get and verify the first key, get the second one should fail. */ - key_p->prefixlen = 16; - inet_pton(AF_INET, "192.168.0.0", key_p->data); - assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); - - memset(key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 16 && key_p->data[0] == 192 && - key_p->data[1] == 168); - - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); - - /* no exact matching key should get the first one in post order. */ - key_p->prefixlen = 8; - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 16 && key_p->data[0] == 192 && - key_p->data[1] == 168); - - /* add one more element (total two) */ - key_p->prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", key_p->data); - assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); - - memset(key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && - key_p->data[1] == 168 && key_p->data[2] == 128); - - memset(next_key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); - - /* Add one more element (total three) */ - key_p->prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", key_p->data); - assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); - - memset(key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && - key_p->data[1] == 168 && key_p->data[2] == 0); - - memset(next_key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 128); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); - - /* Add one more element (total four) */ - key_p->prefixlen = 24; - inet_pton(AF_INET, "192.168.1.0", key_p->data); - assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); - - memset(key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && - key_p->data[1] == 168 && key_p->data[2] == 0); - - memset(next_key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 1); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 128); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); - - /* Add one more element (total five) */ - key_p->prefixlen = 28; - inet_pton(AF_INET, "192.168.1.128", key_p->data); - assert(bpf_map_update_elem(map_fd, key_p, &value, 0) == 0); - - memset(key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, NULL, key_p) == 0); - assert(key_p->prefixlen == 24 && key_p->data[0] == 192 && - key_p->data[1] == 168 && key_p->data[2] == 0); - - memset(next_key_p, 0, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 28 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 1 && - next_key_p->data[3] == 128); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 1); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 128); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 16 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168); - - memcpy(key_p, next_key_p, key_size); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT); - - /* no exact matching key should return the first one in post order */ - key_p->prefixlen = 22; - inet_pton(AF_INET, "192.168.1.0", key_p->data); - assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == 0); - assert(next_key_p->prefixlen == 24 && next_key_p->data[0] == 192 && - next_key_p->data[1] == 168 && next_key_p->data[2] == 0); - - close(map_fd); -} - -#define MAX_TEST_KEYS 4 -struct lpm_mt_test_info { - int cmd; /* 0: update, 1: delete, 2: lookup, 3: get_next_key */ - int iter; - int map_fd; - struct { - __u32 prefixlen; - __u32 data; - } key[MAX_TEST_KEYS]; -}; - -static void *lpm_test_command(void *arg) -{ - int i, j, ret, iter, key_size; - struct lpm_mt_test_info *info = arg; - struct bpf_lpm_trie_key_u8 *key_p; - - key_size = sizeof(*key_p) + sizeof(__u32); - key_p = alloca(key_size); - for (iter = 0; iter < info->iter; iter++) - for (i = 0; i < MAX_TEST_KEYS; i++) { - /* first half of iterations in forward order, - * and second half in backward order. - */ - j = (iter < (info->iter / 2)) ? i : MAX_TEST_KEYS - i - 1; - key_p->prefixlen = info->key[j].prefixlen; - memcpy(key_p->data, &info->key[j].data, sizeof(__u32)); - if (info->cmd == 0) { - __u32 value = j; - /* update must succeed */ - assert(bpf_map_update_elem(info->map_fd, key_p, &value, 0) == 0); - } else if (info->cmd == 1) { - ret = bpf_map_delete_elem(info->map_fd, key_p); - assert(ret == 0 || errno == ENOENT); - } else if (info->cmd == 2) { - __u32 value; - ret = bpf_map_lookup_elem(info->map_fd, key_p, &value); - assert(ret == 0 || errno == ENOENT); - } else { - struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size); - ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p); - assert(ret == 0 || errno == ENOENT || errno == ENOMEM); - } - } - - // Pass successful exit info back to the main thread - pthread_exit((void *)info); -} - -static void setup_lpm_mt_test_info(struct lpm_mt_test_info *info, int map_fd) -{ - info->iter = 2000; - info->map_fd = map_fd; - info->key[0].prefixlen = 16; - inet_pton(AF_INET, "192.168.0.0", &info->key[0].data); - info->key[1].prefixlen = 24; - inet_pton(AF_INET, "192.168.0.0", &info->key[1].data); - info->key[2].prefixlen = 24; - inet_pton(AF_INET, "192.168.128.0", &info->key[2].data); - info->key[3].prefixlen = 24; - inet_pton(AF_INET, "192.168.1.0", &info->key[3].data); -} - -static void test_lpm_multi_thread(void) -{ - LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct lpm_mt_test_info info[4]; - size_t key_size, value_size; - pthread_t thread_id[4]; - int i, map_fd; - void *ret; - - /* create a trie */ - value_size = sizeof(__u32); - key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size; - map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); - - /* create 4 threads to test update, delete, lookup and get_next_key */ - setup_lpm_mt_test_info(&info[0], map_fd); - for (i = 0; i < 4; i++) { - if (i != 0) - memcpy(&info[i], &info[0], sizeof(info[i])); - info[i].cmd = i; - assert(pthread_create(&thread_id[i], NULL, &lpm_test_command, &info[i]) == 0); - } - - for (i = 0; i < 4; i++) - assert(pthread_join(thread_id[i], &ret) == 0 && ret == (void *)&info[i]); - - close(map_fd); -} - -int main(void) -{ - int i; - - /* we want predictable, pseudo random tests */ - srand(0xf00ba1); - - /* Use libbpf 1.0 API mode */ - libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - - test_lpm_basic(); - test_lpm_order(); - - /* Test with 8, 16, 24, 32, ... 128 bit prefix length */ - for (i = 1; i <= 16; ++i) - test_lpm_map(i); - - test_lpm_ipaddr(); - test_lpm_delete(); - test_lpm_get_next_key(); - test_lpm_multi_thread(); - - printf("test_lpm: OK\n"); - return 0; -} -- cgit v1.2.3-70-g09d2 From 04d4ce91b0bed4120e0c5fadc5291cebaa9c2a06 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 6 Dec 2024 19:06:22 +0800 Subject: selftests/bpf: Add more test cases for LPM trie Add more test cases for LPM trie in test_maps: 1) test_lpm_trie_update_flags It constructs various use cases for BPF_EXIST and BPF_NOEXIST and check whether the return value of update operation is expected. 2) test_lpm_trie_update_full_maps It tests the update operations on a full LPM trie map. Adding new node will fail and overwriting the value of existed node will succeed. 3) test_lpm_trie_iterate_strs and test_lpm_trie_iterate_ints There two test cases test whether the iteration through get_next_key is sorted and expected. These two test cases delete the minimal key after each iteration and check whether next iteration returns the second minimal key. The only difference between these two test cases is the former one saves strings in the LPM trie and the latter saves integers. Without the fix of get_next_key, these two cases will fail as shown below: test_lpm_trie_iterate_strs(1091):FAIL:iterate #2 got abc exp abS test_lpm_trie_iterate_ints(1142):FAIL:iterate #1 got 0x2 exp 0x1 Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20241206110622.1161752-10-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- .../bpf/map_tests/lpm_trie_map_basic_ops.c | 395 +++++++++++++++++++++ 1 file changed, 395 insertions(+) diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c index f375c89d78a4..d32e4edac930 100644 --- a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c +++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_basic_ops.c @@ -20,10 +20,12 @@ #include #include #include +#include #include #include #include +#include #include "bpf_util.h" @@ -33,6 +35,22 @@ struct tlpm_node { uint8_t key[]; }; +struct lpm_trie_bytes_key { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + unsigned char data[8]; +}; + +struct lpm_trie_int_key { + union { + struct bpf_lpm_trie_key_hdr hdr; + __u32 prefixlen; + }; + unsigned int data; +}; + static struct tlpm_node *tlpm_match(struct tlpm_node *list, const uint8_t *key, size_t n_bits); @@ -770,6 +788,378 @@ static void test_lpm_multi_thread(void) close(map_fd); } +static int lpm_trie_create(unsigned int key_size, unsigned int value_size, unsigned int max_entries) +{ + LIBBPF_OPTS(bpf_map_create_opts, opts); + int fd; + + opts.map_flags = BPF_F_NO_PREALLOC; + fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie", key_size, value_size, max_entries, + &opts); + CHECK(fd < 0, "bpf_map_create", "error %d\n", errno); + + return fd; +} + +static void test_lpm_trie_update_flags(void) +{ + struct lpm_trie_int_key key; + unsigned int value, got; + int fd, err; + + fd = lpm_trie_create(sizeof(key), sizeof(value), 3); + + /* invalid flags (Error) */ + key.prefixlen = 32; + key.data = 0; + value = 0; + err = bpf_map_update_elem(fd, &key, &value, BPF_F_LOCK); + CHECK(err != -EINVAL, "invalid update flag", "error %d\n", err); + + /* invalid flags (Error) */ + key.prefixlen = 32; + key.data = 0; + value = 0; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST | BPF_EXIST); + CHECK(err != -EINVAL, "invalid update flag", "error %d\n", err); + + /* overwrite an empty qp-trie (Error) */ + key.prefixlen = 32; + key.data = 0; + value = 2; + err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); + CHECK(err != -ENOENT, "overwrite empty qp-trie", "error %d\n", err); + + /* add a new node */ + key.prefixlen = 16; + key.data = 0; + value = 1; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* add the same node as new node (Error) */ + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err != -EEXIST, "add new elem again", "error %d\n", err); + + /* overwrite the existed node */ + value = 4; + err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); + CHECK(err, "overwrite elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* overwrite the node */ + value = 1; + err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); + CHECK(err, "update elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* overwrite a non-existent node which is the prefix of the first + * node (Error). + */ + key.prefixlen = 8; + key.data = 0; + value = 2; + err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); + CHECK(err != -ENOENT, "overwrite nonexistent elem", "error %d\n", err); + + /* add a new node which is the prefix of the first node */ + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup key", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* add another new node which will be the sibling of the first node */ + key.prefixlen = 9; + key.data = htobe32(1 << 23); + value = 5; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup key", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* overwrite the third node */ + value = 3; + err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); + CHECK(err, "overwrite elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup key", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* delete the second node to make it an intermediate node */ + key.prefixlen = 8; + key.data = 0; + err = bpf_map_delete_elem(fd, &key); + CHECK(err, "del elem", "error %d\n", err); + + /* overwrite the intermediate node (Error) */ + value = 2; + err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); + CHECK(err != -ENOENT, "overwrite nonexistent elem", "error %d\n", err); + + close(fd); +} + +static void test_lpm_trie_update_full_map(void) +{ + struct lpm_trie_int_key key; + int value, got; + int fd, err; + + fd = lpm_trie_create(sizeof(key), sizeof(value), 3); + + /* add a new node */ + key.prefixlen = 16; + key.data = 0; + value = 0; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* add new node */ + key.prefixlen = 8; + key.data = 0; + value = 1; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* add new node */ + key.prefixlen = 9; + key.data = htobe32(1 << 23); + value = 2; + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add new elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* try to add more node (Error) */ + key.prefixlen = 32; + key.data = 0; + value = 3; + err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); + CHECK(err != -ENOSPC, "add to full trie", "error %d\n", err); + + /* update the value of an existed node with BPF_EXIST */ + key.prefixlen = 16; + key.data = 0; + value = 4; + err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); + CHECK(err, "overwrite elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + /* update the value of an existed node with BPF_ANY */ + key.prefixlen = 9; + key.data = htobe32(1 << 23); + value = 5; + err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); + CHECK(err, "overwrite elem", "error %d\n", err); + got = 0; + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "error %d\n", err); + CHECK(got != value, "check value", "got %d exp %d\n", got, value); + + close(fd); +} + +static int cmp_str(const void *a, const void *b) +{ + const char *str_a = *(const char **)a, *str_b = *(const char **)b; + + return strcmp(str_a, str_b); +} + +/* Save strings in LPM trie. The trailing '\0' for each string will be + * accounted in the prefixlen. The strings returned during the iteration + * should be sorted as expected. + */ +static void test_lpm_trie_iterate_strs(void) +{ + static const char * const keys[] = { + "ab", "abO", "abc", "abo", "abS", "abcd", + }; + const char *sorted_keys[ARRAY_SIZE(keys)]; + struct lpm_trie_bytes_key key, next_key; + unsigned int value, got, i, j, len; + struct lpm_trie_bytes_key *cur; + int fd, err; + + fd = lpm_trie_create(sizeof(key), sizeof(value), ARRAY_SIZE(keys)); + + for (i = 0; i < ARRAY_SIZE(keys); i++) { + unsigned int flags; + + /* add i-th element */ + flags = i % 2 ? BPF_NOEXIST : 0; + len = strlen(keys[i]); + /* include the trailing '\0' */ + key.prefixlen = (len + 1) * 8; + memset(key.data, 0, sizeof(key.data)); + memcpy(key.data, keys[i], len); + value = i + 100; + err = bpf_map_update_elem(fd, &key, &value, flags); + CHECK(err, "add elem", "#%u error %d\n", i, err); + + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "#%u error %d\n", i, err); + CHECK(got != value, "lookup elem", "#%u expect %u got %u\n", i, value, got); + + /* re-add i-th element (Error) */ + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err != -EEXIST, "re-add elem", "#%u error %d\n", i, err); + + /* Overwrite i-th element */ + flags = i % 2 ? 0 : BPF_EXIST; + value = i; + err = bpf_map_update_elem(fd, &key, &value, flags); + CHECK(err, "update elem", "error %d\n", err); + + /* Lookup #[0~i] elements */ + for (j = 0; j <= i; j++) { + len = strlen(keys[j]); + key.prefixlen = (len + 1) * 8; + memset(key.data, 0, sizeof(key.data)); + memcpy(key.data, keys[j], len); + err = bpf_map_lookup_elem(fd, &key, &got); + CHECK(err, "lookup elem", "#%u/%u error %d\n", i, j, err); + CHECK(got != j, "lookup elem", "#%u/%u expect %u got %u\n", + i, j, value, got); + } + } + + /* Add element to a full qp-trie (Error) */ + key.prefixlen = sizeof(key.data) * 8; + memset(key.data, 0, sizeof(key.data)); + value = 0; + err = bpf_map_update_elem(fd, &key, &value, 0); + CHECK(err != -ENOSPC, "add to full qp-trie", "error %d\n", err); + + /* Iterate sorted elements: no deletion */ + memcpy(sorted_keys, keys, sizeof(keys)); + qsort(sorted_keys, ARRAY_SIZE(sorted_keys), sizeof(sorted_keys[0]), cmp_str); + cur = NULL; + for (i = 0; i < ARRAY_SIZE(sorted_keys); i++) { + len = strlen(sorted_keys[i]); + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err, "iterate", "#%u error %d\n", i, err); + CHECK(next_key.prefixlen != (len + 1) * 8, "iterate", + "#%u invalid len %u expect %u\n", + i, next_key.prefixlen, (len + 1) * 8); + CHECK(memcmp(sorted_keys[i], next_key.data, len + 1), "iterate", + "#%u got %.*s exp %.*s\n", i, len, next_key.data, len, sorted_keys[i]); + + cur = &next_key; + } + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err != -ENOENT, "more element", "error %d\n", err); + + /* Iterate sorted elements: delete the found key after each iteration */ + cur = NULL; + for (i = 0; i < ARRAY_SIZE(sorted_keys); i++) { + len = strlen(sorted_keys[i]); + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err, "iterate", "#%u error %d\n", i, err); + CHECK(next_key.prefixlen != (len + 1) * 8, "iterate", + "#%u invalid len %u expect %u\n", + i, next_key.prefixlen, (len + 1) * 8); + CHECK(memcmp(sorted_keys[i], next_key.data, len + 1), "iterate", + "#%u got %.*s exp %.*s\n", i, len, next_key.data, len, sorted_keys[i]); + + cur = &next_key; + + err = bpf_map_delete_elem(fd, cur); + CHECK(err, "delete", "#%u error %d\n", i, err); + } + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err != -ENOENT, "non-empty qp-trie", "error %d\n", err); + + close(fd); +} + +/* Use the fixed prefixlen (32) and save integers in LPM trie. The iteration of + * LPM trie will return these integers in big-endian order, therefore, convert + * these integers to big-endian before update. After each iteration, delete the + * found key (the smallest integer) and expect the next iteration will return + * the second smallest number. + */ +static void test_lpm_trie_iterate_ints(void) +{ + struct lpm_trie_int_key key, next_key; + unsigned int i, max_entries; + struct lpm_trie_int_key *cur; + unsigned int *data_set; + int fd, err; + bool value; + + max_entries = 4096; + data_set = calloc(max_entries, sizeof(*data_set)); + CHECK(!data_set, "malloc", "no mem\n"); + for (i = 0; i < max_entries; i++) + data_set[i] = i; + + fd = lpm_trie_create(sizeof(key), sizeof(value), max_entries); + value = true; + for (i = 0; i < max_entries; i++) { + key.prefixlen = 32; + key.data = htobe32(data_set[i]); + + err = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST); + CHECK(err, "add elem", "#%u error %d\n", i, err); + } + + cur = NULL; + for (i = 0; i < max_entries; i++) { + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err, "iterate", "#%u error %d\n", i, err); + CHECK(next_key.prefixlen != 32, "iterate", "#%u invalid len %u\n", + i, next_key.prefixlen); + CHECK(be32toh(next_key.data) != data_set[i], "iterate", "#%u got 0x%x exp 0x%x\n", + i, be32toh(next_key.data), data_set[i]); + cur = &next_key; + + /* + * Delete the minimal key, the next call of bpf_get_next_key() + * will return the second minimal key. + */ + err = bpf_map_delete_elem(fd, &next_key); + CHECK(err, "del elem", "#%u elem error %d\n", i, err); + } + err = bpf_map_get_next_key(fd, cur, &next_key); + CHECK(err != -ENOENT, "more element", "error %d\n", err); + + err = bpf_map_get_next_key(fd, NULL, &next_key); + CHECK(err != -ENOENT, "no-empty qp-trie", "error %d\n", err); + + free(data_set); + + close(fd); +} + void test_lpm_trie_map_basic_ops(void) { int i; @@ -789,5 +1179,10 @@ void test_lpm_trie_map_basic_ops(void) test_lpm_get_next_key(); test_lpm_multi_thread(); + test_lpm_trie_update_flags(); + test_lpm_trie_update_full_map(); + test_lpm_trie_iterate_strs(); + test_lpm_trie_iterate_ints(); + printf("%s: PASS\n", __func__); } -- cgit v1.2.3-70-g09d2