summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-01-18 14:26:42 -0800
committerAlexei Starovoitov <ast@kernel.org>2022-01-18 14:29:11 -0800
commit2a1aff6035187d877d7b6f28f81b0a084c00e17a (patch)
treef49f1d93bf0b46c892f3ea1a47e397d7ca807e48 /tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
parente80f2a0d194605553315de68284fc41969f81f62 (diff)
parent4656569643409568fa7c162614c17277abdf84de (diff)
Merge branch 'Introduce unstable CT lookup helpers'
Kumar Kartikeya says: ==================== This series adds unstable conntrack lookup helpers using BPF kfunc support. The patch adding the lookup helper is based off of Maxim's recent patch to aid in rebasing their series on top of this, all adjusted to work with module kfuncs [0]. [0]: https://lore.kernel.org/bpf/20211019144655.3483197-8-maximmi@nvidia.com To enable returning a reference to struct nf_conn, the verifier is extended to support reference tracking for PTR_TO_BTF_ID, and kfunc is extended with support for working as acquire/release functions, similar to existing BPF helpers. kfunc returning pointer (limited to PTR_TO_BTF_ID in the kernel) can also return a PTR_TO_BTF_ID_OR_NULL now, typically needed when acquiring a resource can fail. kfunc can also receive PTR_TO_CTX and PTR_TO_MEM (with some limitations) as arguments now. There is also support for passing a mem, len pair as argument to kfunc now. In such cases, passing pointer to unsized type (void) is also permitted. Please see individual commits for details. Changelog: ---------- v7 -> v8: v7: https://lore.kernel.org/bpf/20220111180428.931466-1-memxor@gmail.com * Move enum btf_kfunc_hook to btf.c (Alexei) * Drop verbose log for unlikely failure case in __find_kfunc_desc_btf (Alexei) * Remove unnecessary barrier in register_btf_kfunc_id_set (Alexei) * Switch macro in bpf_nf test to __always_inline function (Alexei) v6 -> v7: v6: https://lore.kernel.org/bpf/20220102162115.1506833-1-memxor@gmail.com * Drop try_module_get_live patch, use flag in btf_module struct (Alexei) * Add comments and expand commit message detailing why we have to concatenate and sort vmlinux kfunc BTF ID sets (Alexei) * Use bpf_testmod for testing btf_try_get_module race (Alexei) * Use bpf_prog_type for both btf_kfunc_id_set_contains and register_btf_kfunc_id_set calls (Alexei) * In case of module set registration, directly assign set (Alexei) * Add CONFIG_USERFAULTFD=y to selftest config * Fix other nits v5 -> v6: v5: https://lore.kernel.org/bpf/20211230023705.3860970-1-memxor@gmail.com * Fix for a bug in btf_try_get_module leading to use-after-free * Drop *kallsyms_on_each_symbol loop, reinstate register_btf_kfunc_id_set (Alexei) * btf_free_kfunc_set_tab now takes struct btf, and handles resetting tab to NULL * Check return value btf_name_by_offset for param_name * Instead of using tmp_set, use btf->kfunc_set_tab directly, and simplify cleanup v4 -> v5: v4: https://lore.kernel.org/bpf/20211217015031.1278167-1-memxor@gmail.com * Move nf_conntrack helpers code to its own separate file (Toke, Pablo) * Remove verifier callbacks, put btf_id_sets in struct btf (Alexei) * Convert the in-kernel users away from the old API * Change len__ prefix convention to __sz suffix (Alexei) * Drop parent_ref_obj_id patch (Alexei) v3 -> v4: v3: https://lore.kernel.org/bpf/20211210130230.4128676-1-memxor@gmail.com * Guard unstable CT helpers with CONFIG_DEBUG_INFO_BTF_MODULES * Move addition of prog_test test kfuncs to selftest commit * Move negative kfunc tests to test_verifier suite * Limit struct nesting depth to 4, which should be enough for now v2 -> v3: v2: https://lore.kernel.org/bpf/20211209170929.3485242-1-memxor@gmail.com * Fix build error for !CONFIG_BPF_SYSCALL (Patchwork) RFC v1 -> v2: v1: https://lore.kernel.org/bpf/20211030144609.263572-1-memxor@gmail.com * Limit PTR_TO_MEM support to pointer to scalar, or struct with scalars (Alexei) * Use btf_id_set for checking acquire, release, ret type null (Alexei) * Introduce opts struct for CT helpers, move int err parameter to it * Add l4proto as parameter to CT helper's opts, remove separate tcp/udp helpers * Add support for mem, len argument pair to kfunc * Allow void * as pointer type for mem, len argument pair * Extend selftests to cover new additions to kfuncs * Copy ref_obj_id to PTR_TO_BTF_ID dst_reg on btf_struct_access, test it * Fix other misc nits, bugs, and expand commit messages ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c230
1 files changed, 230 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
new file mode 100644
index 000000000000..d43f548c572c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <stdatomic.h>
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include <linux/module.h>
+#include <linux/userfaultfd.h>
+
+#include "ksym_race.skel.h"
+#include "bpf_mod_race.skel.h"
+#include "kfunc_call_race.skel.h"
+
+/* This test crafts a race between btf_try_get_module and do_init_module, and
+ * checks whether btf_try_get_module handles the invocation for a well-formed
+ * but uninitialized module correctly. Unless the module has completed its
+ * initcalls, the verifier should fail the program load and return ENXIO.
+ *
+ * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
+ * sleep, then the BPF program is loaded and the return value from verifier is
+ * inspected. After this, the userfaultfd is closed so that the module loading
+ * thread makes forward progress, and fmod_ret injects an error so that the
+ * module load fails and it is freed.
+ *
+ * If the verifier succeeded in loading the supplied program, it will end up
+ * taking reference to freed module, and trigger a crash when the program fd
+ * is closed later. This is true for both kfuncs and ksyms. In both cases,
+ * the crash is triggered inside bpf_prog_free_deferred, when module reference
+ * is finally released.
+ */
+
+struct test_config {
+ const char *str_open;
+ void *(*bpf_open_and_load)();
+ void (*bpf_destroy)(void *);
+};
+
+enum test_state {
+ _TS_INVALID,
+ TS_MODULE_LOAD,
+ TS_MODULE_LOAD_FAIL,
+};
+
+static _Atomic enum test_state state = _TS_INVALID;
+
+static int sys_finit_module(int fd, const char *param_values, int flags)
+{
+ return syscall(__NR_finit_module, fd, param_values, flags);
+}
+
+static int sys_delete_module(const char *name, unsigned int flags)
+{
+ return syscall(__NR_delete_module, name, flags);
+}
+
+static int load_module(const char *mod)
+{
+ int ret, fd;
+
+ fd = open("bpf_testmod.ko", O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ ret = sys_finit_module(fd, "", 0);
+ close(fd);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static void *load_module_thread(void *p)
+{
+
+ if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail"))
+ atomic_store(&state, TS_MODULE_LOAD);
+ else
+ atomic_store(&state, TS_MODULE_LOAD_FAIL);
+ return p;
+}
+
+static int sys_userfaultfd(int flags)
+{
+ return syscall(__NR_userfaultfd, flags);
+}
+
+static int test_setup_uffd(void *fault_addr)
+{
+ struct uffdio_register uffd_register = {};
+ struct uffdio_api uffd_api = {};
+ int uffd;
+
+ uffd = sys_userfaultfd(O_CLOEXEC);
+ if (uffd < 0)
+ return -errno;
+
+ uffd_api.api = UFFD_API;
+ uffd_api.features = 0;
+ if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
+ close(uffd);
+ return -1;
+ }
+
+ uffd_register.range.start = (unsigned long)fault_addr;
+ uffd_register.range.len = 4096;
+ uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
+ if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
+ close(uffd);
+ return -1;
+ }
+ return uffd;
+}
+
+static void test_bpf_mod_race_config(const struct test_config *config)
+{
+ void *fault_addr, *skel_fail;
+ struct bpf_mod_race *skel;
+ struct uffd_msg uffd_msg;
+ pthread_t load_mod_thrd;
+ _Atomic int *blockingp;
+ int uffd, ret;
+
+ fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
+ return;
+
+ if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod"))
+ goto end_mmap;
+
+ skel = bpf_mod_race__open();
+ if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
+ goto end_module;
+
+ skel->rodata->bpf_mod_race_config.tgid = getpid();
+ skel->rodata->bpf_mod_race_config.inject_error = -4242;
+ skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
+ if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
+ goto end_destroy;
+ blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
+
+ if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
+ goto end_destroy;
+
+ uffd = test_setup_uffd(fault_addr);
+ if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
+ goto end_destroy;
+
+ if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
+ "load module thread"))
+ goto end_uffd;
+
+ /* Now, we either fail loading module, or block in bpf prog, spin to find out */
+ while (!atomic_load(&state) && !atomic_load(blockingp))
+ ;
+ if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
+ goto end_join;
+ if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
+ pthread_kill(load_mod_thrd, SIGKILL);
+ goto end_uffd;
+ }
+
+ /* We might have set bpf_blocking to 1, but may have not blocked in
+ * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
+ */
+ if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
+ "read uffd block event"))
+ goto end_join;
+ if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
+ goto end_join;
+
+ /* We know that load_mod_thrd is blocked in the fmod_ret program, the
+ * module state is still MODULE_STATE_COMING because mod->init hasn't
+ * returned. This is the time we try to load a program calling kfunc and
+ * check if we get ENXIO from verifier.
+ */
+ skel_fail = config->bpf_open_and_load();
+ ret = errno;
+ if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
+ /* Close uffd to unblock load_mod_thrd */
+ close(uffd);
+ uffd = -1;
+ while (atomic_load(blockingp) != 2)
+ ;
+ ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
+ config->bpf_destroy(skel_fail);
+ goto end_join;
+
+ }
+ ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
+ ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
+
+ close(uffd);
+ uffd = -1;
+end_join:
+ pthread_join(load_mod_thrd, NULL);
+ if (uffd < 0)
+ ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
+end_uffd:
+ if (uffd >= 0)
+ close(uffd);
+end_destroy:
+ bpf_mod_race__destroy(skel);
+ ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
+end_module:
+ sys_delete_module("bpf_testmod", 0);
+ ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod");
+end_mmap:
+ munmap(fault_addr, 4096);
+ atomic_store(&state, _TS_INVALID);
+}
+
+static const struct test_config ksym_config = {
+ .str_open = "ksym_race__open_and_load",
+ .bpf_open_and_load = (void *)ksym_race__open_and_load,
+ .bpf_destroy = (void *)ksym_race__destroy,
+};
+
+static const struct test_config kfunc_config = {
+ .str_open = "kfunc_call_race__open_and_load",
+ .bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
+ .bpf_destroy = (void *)kfunc_call_race__destroy,
+};
+
+void serial_test_bpf_mod_race(void)
+{
+ if (test__start_subtest("ksym (used_btfs UAF)"))
+ test_bpf_mod_race_config(&ksym_config);
+ if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
+ test_bpf_mod_race_config(&kfunc_config);
+}