diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-12-07 09:32:23 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-12-07 09:32:24 -0800 |
commit | c85e5594b7456d55103fa1f1bde47cd4e002e7fb (patch) | |
tree | 1c0a09fc3833761d00c051008ebd92eedddab4b1 /tools/testing/selftests/bpf/prog_tests | |
parent | 0ad722bd9ee3a9bdfca9613148645e4c9b7f26cf (diff) | |
parent | ffed24eff9e0e52d8e74df1c18db8ed43b4666e6 (diff) |
Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2023-12-06
We've added 4 non-merge commits during the last 6 day(s) which contain
a total of 7 files changed, 185 insertions(+), 55 deletions(-).
The main changes are:
1) Fix race found by syzkaller on prog_array_map_poke_run when
a BPF program's kallsym symbols were still missing, from Jiri Olsa.
2) Fix BPF verifier's branch offset comparison for BPF_JMP32 | BPF_JA,
from Yonghong Song.
3) Fix xsk's poll handling to only set mask on bound xsk sockets,
from Yewon Choi.
* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
selftests/bpf: Add test for early update in prog_array_map_poke_run
bpf: Fix prog_array_map_poke_run map poke update
xsk: Skip polling event check for unbound socket
bpf: Fix a verifier bug due to incorrect branch offset comparison with cpu=v4
====================
Link: https://lore.kernel.org/r/20231206220528.12093-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r-- | tools/testing/selftests/bpf/prog_tests/tailcalls.c | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index fc6b2954e8f5..59993fc9c0d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -1,6 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 +#include <unistd.h> #include <test_progs.h> #include <network_helpers.h> +#include "tailcall_poke.skel.h" + /* test_tailcall_1 checks basic functionality by patching multiple locations * in a single program for a single tail call slot with nop->jmp, jmp->nop @@ -1105,6 +1108,85 @@ out: bpf_object__close(tgt_obj); } +#define JMP_TABLE "/sys/fs/bpf/jmp_table" + +static int poke_thread_exit; + +static void *poke_update(void *arg) +{ + __u32 zero = 0, prog1_fd, prog2_fd, map_fd; + struct tailcall_poke *call = arg; + + map_fd = bpf_map__fd(call->maps.jmp_table); + prog1_fd = bpf_program__fd(call->progs.call1); + prog2_fd = bpf_program__fd(call->progs.call2); + + while (!poke_thread_exit) { + bpf_map_update_elem(map_fd, &zero, &prog1_fd, BPF_ANY); + bpf_map_update_elem(map_fd, &zero, &prog2_fd, BPF_ANY); + } + + return NULL; +} + +/* + * We are trying to hit prog array update during another program load + * that shares the same prog array map. + * + * For that we share the jmp_table map between two skeleton instances + * by pinning the jmp_table to same path. Then first skeleton instance + * periodically updates jmp_table in 'poke update' thread while we load + * the second skeleton instance in the main thread. + */ +static void test_tailcall_poke(void) +{ + struct tailcall_poke *call, *test; + int err, cnt = 10; + pthread_t thread; + + unlink(JMP_TABLE); + + call = tailcall_poke__open_and_load(); + if (!ASSERT_OK_PTR(call, "tailcall_poke__open")) + return; + + err = bpf_map__pin(call->maps.jmp_table, JMP_TABLE); + if (!ASSERT_OK(err, "bpf_map__pin")) + goto out; + + err = pthread_create(&thread, NULL, poke_update, call); + if (!ASSERT_OK(err, "new toggler")) + goto out; + + while (cnt--) { + test = tailcall_poke__open(); + if (!ASSERT_OK_PTR(test, "tailcall_poke__open")) + break; + + err = bpf_map__set_pin_path(test->maps.jmp_table, JMP_TABLE); + if (!ASSERT_OK(err, "bpf_map__pin")) { + tailcall_poke__destroy(test); + break; + } + + bpf_program__set_autoload(test->progs.test, true); + bpf_program__set_autoload(test->progs.call1, false); + bpf_program__set_autoload(test->progs.call2, false); + + err = tailcall_poke__load(test); + tailcall_poke__destroy(test); + if (!ASSERT_OK(err, "tailcall_poke__load")) + break; + } + + poke_thread_exit = 1; + ASSERT_OK(pthread_join(thread, NULL), "pthread_join"); + +out: + bpf_map__unpin(call->maps.jmp_table, JMP_TABLE); + tailcall_poke__destroy(call); +} + void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) @@ -1139,4 +1221,6 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_fentry_fexit(); if (test__start_subtest("tailcall_bpf2bpf_fentry_entry")) test_tailcall_bpf2bpf_fentry_entry(); + if (test__start_subtest("tailcall_poke")) + test_tailcall_poke(); } |