From a9d2fae89fa8eb638203d8a4da435c647c12dfa3 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Tue, 12 Jul 2022 13:31:45 +0100 Subject: selftests/bpf: add a ksym iter subtest add subtest verifying BPF ksym iter behaviour. The BPF ksym iter program shows an example of dumping a format different to /proc/kallsyms. It adds KIND and MAX_SIZE fields which represent the kind of symbol (core kernel, module, ftrace, bpf, or kprobe) and the maximum size the symbol can be. The latter is calculated from the difference between current symbol value and the next symbol value. The key benefit for this iterator will likely be supporting in-kernel data-gathering rather than dumping symbol details to userspace and parsing the results. Signed-off-by: Alan Maguire Acked-by: Yonghong Song Link: https://lore.kernel.org/r/1657629105-7812-3-git-send-email-alan.maguire@oracle.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_iter.c') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 7ff5fa93d056..a33874b081b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -27,6 +27,7 @@ #include "bpf_iter_test_kern5.skel.h" #include "bpf_iter_test_kern6.skel.h" #include "bpf_iter_bpf_link.skel.h" +#include "bpf_iter_ksym.skel.h" static int duration; @@ -1120,6 +1121,19 @@ static void test_link_iter(void) bpf_iter_bpf_link__destroy(skel); } +static void test_ksym_iter(void) +{ + struct bpf_iter_ksym *skel; + + skel = bpf_iter_ksym__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_ksym); + + bpf_iter_ksym__destroy(skel); +} + #define CMP_BUFFER_SIZE 1024 static char task_vma_output[CMP_BUFFER_SIZE]; static char proc_maps_output[CMP_BUFFER_SIZE]; @@ -1267,4 +1281,6 @@ void test_bpf_iter(void) test_buf_neg_offset(); if (test__start_subtest("link-iter")) test_link_iter(); + if (test__start_subtest("ksym")) + test_ksym_iter(); } -- cgit v1.2.3-70-g09d2 From 5836d81e4b039429f409007ba7e0235391537397 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 10 Aug 2022 16:05:36 +0800 Subject: selftests/bpf: Add tests for reading a dangling map iter fd After closing both related link fd and map fd, reading the map iterator fd to ensure it is OK to do so. Signed-off-by: Hou Tao Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20220810080538.1845898-8-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 92 +++++++++++++++++++++++ 1 file changed, 92 insertions(+) (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_iter.c') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index a33874b081b6..b690c9e9d346 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -28,6 +28,7 @@ #include "bpf_iter_test_kern6.skel.h" #include "bpf_iter_bpf_link.skel.h" #include "bpf_iter_ksym.skel.h" +#include "bpf_iter_sockmap.skel.h" static int duration; @@ -67,6 +68,50 @@ free_link: bpf_link__destroy(link); } +static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, + struct bpf_map *map) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + union bpf_iter_link_info linfo; + struct bpf_link *link; + char buf[16] = {}; + int iter_fd, len; + + memset(&linfo, 0, sizeof(linfo)); + linfo.map.map_fd = bpf_map__fd(map); + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + link = bpf_program__attach_iter(prog, &opts); + if (!ASSERT_OK_PTR(link, "attach_map_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) { + bpf_link__destroy(link); + return; + } + + /* Close link and map fd prematurely */ + bpf_link__destroy(link); + bpf_object__destroy_skeleton(*skel); + *skel = NULL; + + /* Try to let map free work to run first if map is freed */ + usleep(100); + /* Memory used by both sock map and sock local storage map are + * freed after two synchronize_rcu() calls, so wait for it + */ + kern_sync_rcu(); + kern_sync_rcu(); + + /* Read after both map fd and link fd are closed */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + ASSERT_GE(len, 0, "read_iterator"); + + close(iter_fd); +} + static int read_fd_into_buffer(int fd, char *buf, int size) { int bufleft = size; @@ -827,6 +872,20 @@ out: bpf_iter_bpf_array_map__destroy(skel); } +static void test_bpf_array_map_iter_fd(void) +{ + struct bpf_iter_bpf_array_map *skel; + + skel = bpf_iter_bpf_array_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map, + skel->maps.arraymap1); + + bpf_iter_bpf_array_map__destroy(skel); +} + static void test_bpf_percpu_array_map(void) { DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1009,6 +1068,20 @@ out: bpf_iter_bpf_sk_storage_helpers__destroy(skel); } +static void test_bpf_sk_stoarge_map_iter_fd(void) +{ + struct bpf_iter_bpf_sk_storage_map *skel; + + skel = bpf_iter_bpf_sk_storage_map__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_sk_storage_map, + skel->maps.sk_stg_map); + + bpf_iter_bpf_sk_storage_map__destroy(skel); +} + static void test_bpf_sk_storage_map(void) { DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1217,6 +1290,19 @@ out: bpf_iter_task_vma__destroy(skel); } +void test_bpf_sockmap_map_iter_fd(void) +{ + struct bpf_iter_sockmap *skel; + + skel = bpf_iter_sockmap__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) + return; + + do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap); + + bpf_iter_sockmap__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -1267,10 +1353,14 @@ void test_bpf_iter(void) test_bpf_percpu_hash_map(); if (test__start_subtest("bpf_array_map")) test_bpf_array_map(); + if (test__start_subtest("bpf_array_map_iter_fd")) + test_bpf_array_map_iter_fd(); if (test__start_subtest("bpf_percpu_array_map")) test_bpf_percpu_array_map(); if (test__start_subtest("bpf_sk_storage_map")) test_bpf_sk_storage_map(); + if (test__start_subtest("bpf_sk_storage_map_iter_fd")) + test_bpf_sk_stoarge_map_iter_fd(); if (test__start_subtest("bpf_sk_storage_delete")) test_bpf_sk_storage_delete(); if (test__start_subtest("bpf_sk_storage_get")) @@ -1283,4 +1373,6 @@ void test_bpf_iter(void) test_link_iter(); if (test__start_subtest("ksym")) test_ksym_iter(); + if (test__start_subtest("bpf_sockmap_map_iter_fd")) + test_bpf_sockmap_map_iter_fd(); } -- cgit v1.2.3-70-g09d2 From 939a1a946d755c1565e18694e278e9ba7ba19ccc Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 10 Aug 2022 16:05:37 +0800 Subject: selftests/bpf: Add write tests for sk local storage map iterator Add test to validate the overwrite of sock local storage map value in map iterator and another one to ensure out-of-bound value writing is rejected. Signed-off-by: Hou Tao Acked-by: Yonghong Song Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20220810080538.1845898-9-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 20 ++++++++++++++++++-- .../bpf/progs/bpf_iter_bpf_sk_storage_map.c | 22 ++++++++++++++++++++-- 2 files changed, 38 insertions(+), 4 deletions(-) (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_iter.c') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index b690c9e9d346..1571a6586b3b 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -1076,7 +1076,7 @@ static void test_bpf_sk_stoarge_map_iter_fd(void) if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) return; - do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_sk_storage_map, + do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map, skel->maps.sk_stg_map); bpf_iter_bpf_sk_storage_map__destroy(skel); @@ -1117,7 +1117,15 @@ static void test_bpf_sk_storage_map(void) linfo.map.map_fd = map_fd; opts.link_info = &linfo; opts.link_info_len = sizeof(linfo); - link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); + link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts); + err = libbpf_get_error(link); + if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) { + if (!err) + bpf_link__destroy(link); + goto out; + } + + link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts); if (!ASSERT_OK_PTR(link, "attach_iter")) goto out; @@ -1125,6 +1133,7 @@ static void test_bpf_sk_storage_map(void) if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; + skel->bss->to_add_val = time(NULL); /* do some tests */ while ((len = read(iter_fd, buf, sizeof(buf))) > 0) ; @@ -1138,6 +1147,13 @@ static void test_bpf_sk_storage_map(void) if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; + for (i = 0; i < num_sockets; i++) { + err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val); + if (!ASSERT_OK(err, "map_lookup") || + !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value")) + break; + } + close_iter: close(iter_fd); free_link: diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c index 6b70ccaba301..c7b8e006b171 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c @@ -16,19 +16,37 @@ struct { __u32 val_sum = 0; __u32 ipv6_sk_count = 0; +__u32 to_add_val = 0; SEC("iter/bpf_sk_storage_map") -int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx) +int rw_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx) { struct sock *sk = ctx->sk; __u32 *val = ctx->value; - if (sk == (void *)0 || val == (void *)0) + if (sk == NULL || val == NULL) return 0; if (sk->sk_family == AF_INET6) ipv6_sk_count++; val_sum += *val; + + *val += to_add_val; + + return 0; +} + +SEC("iter/bpf_sk_storage_map") +int oob_write_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx) +{ + struct sock *sk = ctx->sk; + __u32 *val = ctx->value; + + if (sk == NULL || val == NULL) + return 0; + + *(val + 1) = 0xdeadbeef; + return 0; } -- cgit v1.2.3-70-g09d2 From c5c0981fd81d35233d625631f13000544c108c53 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Wed, 10 Aug 2022 16:05:38 +0800 Subject: selftests/bpf: Ensure sleepable program is rejected by hash map iter Add a test to ensure sleepable program is rejected by hash map iterator. Signed-off-by: Hou Tao Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20220810080538.1845898-10-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 6 ++++++ tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c | 9 +++++++++ 2 files changed, 15 insertions(+) (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_iter.c') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 1571a6586b3b..e89685bd587c 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -679,6 +679,12 @@ static void test_bpf_hash_map(void) goto out; } + /* Sleepable program is prohibited for hash map iterator */ + linfo.map.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); + if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) + goto out; + linfo.map.map_fd = map_fd; link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); if (!ASSERT_OK_PTR(link, "attach_iter")) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c index 0aa3cd34cbe3..d7a69217fb68 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -112,3 +112,12 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) return 0; } + +SEC("iter.s/bpf_map_elem") +int sleepable_dummy_dump(struct bpf_iter__bpf_map_elem *ctx) +{ + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(ctx->meta->seq, "map dump starts\n"); + + return 0; +} -- cgit v1.2.3-70-g09d2