diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_iter.c')
| -rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_iter.c | 132 | 
1 files changed, 131 insertions, 1 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 7ff5fa93d056..e89685bd587c 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -27,6 +27,8 @@  #include "bpf_iter_test_kern5.skel.h"  #include "bpf_iter_test_kern6.skel.h"  #include "bpf_iter_bpf_link.skel.h" +#include "bpf_iter_ksym.skel.h" +#include "bpf_iter_sockmap.skel.h"  static int duration; @@ -66,6 +68,50 @@ free_link:  	bpf_link__destroy(link);  } +static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog, +				struct bpf_map *map) +{ +	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); +	union bpf_iter_link_info linfo; +	struct bpf_link *link; +	char buf[16] = {}; +	int iter_fd, len; + +	memset(&linfo, 0, sizeof(linfo)); +	linfo.map.map_fd = bpf_map__fd(map); +	opts.link_info = &linfo; +	opts.link_info_len = sizeof(linfo); +	link = bpf_program__attach_iter(prog, &opts); +	if (!ASSERT_OK_PTR(link, "attach_map_iter")) +		return; + +	iter_fd = bpf_iter_create(bpf_link__fd(link)); +	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) { +		bpf_link__destroy(link); +		return; +	} + +	/* Close link and map fd prematurely */ +	bpf_link__destroy(link); +	bpf_object__destroy_skeleton(*skel); +	*skel = NULL; + +	/* Try to let map free work to run first if map is freed */ +	usleep(100); +	/* Memory used by both sock map and sock local storage map are +	 * freed after two synchronize_rcu() calls, so wait for it +	 */ +	kern_sync_rcu(); +	kern_sync_rcu(); + +	/* Read after both map fd and link fd are closed */ +	while ((len = read(iter_fd, buf, sizeof(buf))) > 0) +		; +	ASSERT_GE(len, 0, "read_iterator"); + +	close(iter_fd); +} +  static int read_fd_into_buffer(int fd, char *buf, int size)  {  	int bufleft = size; @@ -633,6 +679,12 @@ static void test_bpf_hash_map(void)  			goto out;  	} +	/* Sleepable program is prohibited for hash map iterator */ +	linfo.map.map_fd = map_fd; +	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts); +	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter")) +		goto out; +  	linfo.map.map_fd = map_fd;  	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);  	if (!ASSERT_OK_PTR(link, "attach_iter")) @@ -826,6 +878,20 @@ out:  	bpf_iter_bpf_array_map__destroy(skel);  } +static void test_bpf_array_map_iter_fd(void) +{ +	struct bpf_iter_bpf_array_map *skel; + +	skel = bpf_iter_bpf_array_map__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) +		return; + +	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map, +			    skel->maps.arraymap1); + +	bpf_iter_bpf_array_map__destroy(skel); +} +  static void test_bpf_percpu_array_map(void)  {  	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1008,6 +1074,20 @@ out:  	bpf_iter_bpf_sk_storage_helpers__destroy(skel);  } +static void test_bpf_sk_stoarge_map_iter_fd(void) +{ +	struct bpf_iter_bpf_sk_storage_map *skel; + +	skel = bpf_iter_bpf_sk_storage_map__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) +		return; + +	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map, +			    skel->maps.sk_stg_map); + +	bpf_iter_bpf_sk_storage_map__destroy(skel); +} +  static void test_bpf_sk_storage_map(void)  {  	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); @@ -1043,7 +1123,15 @@ static void test_bpf_sk_storage_map(void)  	linfo.map.map_fd = map_fd;  	opts.link_info = &linfo;  	opts.link_info_len = sizeof(linfo); -	link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); +	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts); +	err = libbpf_get_error(link); +	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) { +		if (!err) +			bpf_link__destroy(link); +		goto out; +	} + +	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);  	if (!ASSERT_OK_PTR(link, "attach_iter"))  		goto out; @@ -1051,6 +1139,7 @@ static void test_bpf_sk_storage_map(void)  	if (!ASSERT_GE(iter_fd, 0, "create_iter"))  		goto free_link; +	skel->bss->to_add_val = time(NULL);  	/* do some tests */  	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)  		; @@ -1064,6 +1153,13 @@ static void test_bpf_sk_storage_map(void)  	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))  		goto close_iter; +	for (i = 0; i < num_sockets; i++) { +		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val); +		if (!ASSERT_OK(err, "map_lookup") || +		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value")) +			break; +	} +  close_iter:  	close(iter_fd);  free_link: @@ -1120,6 +1216,19 @@ static void test_link_iter(void)  	bpf_iter_bpf_link__destroy(skel);  } +static void test_ksym_iter(void) +{ +	struct bpf_iter_ksym *skel; + +	skel = bpf_iter_ksym__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load")) +		return; + +	do_dummy_read(skel->progs.dump_ksym); + +	bpf_iter_ksym__destroy(skel); +} +  #define CMP_BUFFER_SIZE 1024  static char task_vma_output[CMP_BUFFER_SIZE];  static char proc_maps_output[CMP_BUFFER_SIZE]; @@ -1203,6 +1312,19 @@ out:  	bpf_iter_task_vma__destroy(skel);  } +void test_bpf_sockmap_map_iter_fd(void) +{ +	struct bpf_iter_sockmap *skel; + +	skel = bpf_iter_sockmap__open_and_load(); +	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load")) +		return; + +	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap); + +	bpf_iter_sockmap__destroy(skel); +} +  void test_bpf_iter(void)  {  	if (test__start_subtest("btf_id_or_null")) @@ -1253,10 +1375,14 @@ void test_bpf_iter(void)  		test_bpf_percpu_hash_map();  	if (test__start_subtest("bpf_array_map"))  		test_bpf_array_map(); +	if (test__start_subtest("bpf_array_map_iter_fd")) +		test_bpf_array_map_iter_fd();  	if (test__start_subtest("bpf_percpu_array_map"))  		test_bpf_percpu_array_map();  	if (test__start_subtest("bpf_sk_storage_map"))  		test_bpf_sk_storage_map(); +	if (test__start_subtest("bpf_sk_storage_map_iter_fd")) +		test_bpf_sk_stoarge_map_iter_fd();  	if (test__start_subtest("bpf_sk_storage_delete"))  		test_bpf_sk_storage_delete();  	if (test__start_subtest("bpf_sk_storage_get")) @@ -1267,4 +1393,8 @@ void test_bpf_iter(void)  		test_buf_neg_offset();  	if (test__start_subtest("link-iter"))  		test_link_iter(); +	if (test__start_subtest("ksym")) +		test_ksym_iter(); +	if (test__start_subtest("bpf_sockmap_map_iter_fd")) +		test_bpf_sockmap_map_iter_fd();  }  | 
