diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
8 files changed, 764 insertions, 99 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index cb827383db4d..fb5840a62548 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -106,8 +106,8 @@ void test_bpf_obj_id(void) if (CHECK(err || prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER || info_len != sizeof(struct bpf_prog_info) || - (jit_enabled && !prog_infos[i].jited_prog_len) || - (jit_enabled && + (env.jit_enabled && !prog_infos[i].jited_prog_len) || + (env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))) || !prog_infos[i].xlated_prog_len || !memcmp(xlated_insns, zeros, sizeof(zeros)) || @@ -121,7 +121,7 @@ void test_bpf_obj_id(void) err, errno, i, prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, info_len, sizeof(struct bpf_prog_info), - jit_enabled, + env.jit_enabled, prog_infos[i].jited_prog_len, prog_infos[i].xlated_prog_len, !!memcmp(jited_insns, zeros, sizeof(zeros)), diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index e1b55261526f..1a1eae356f81 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -4,12 +4,15 @@ static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { - if (level != LIBBPF_DEBUG) - return vfprintf(stderr, format, args); + if (level != LIBBPF_DEBUG) { + vprintf(format, args); + return 0; + } if (!strstr(format, "verifier log")) return 0; - return vfprintf(stderr, "%s", args); + vprintf("%s", args); + return 0; } static int check_load(const char *file, enum bpf_prog_type type) @@ -30,14 +33,25 @@ static int check_load(const char *file, enum bpf_prog_type type) return err; } +struct scale_test_def { + const char *file; + enum bpf_prog_type attach_type; + bool fails; +}; + void test_bpf_verif_scale(void) { - const char *sched_cls[] = { - "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o", - }; - const char *raw_tp[] = { + struct scale_test_def tests[] = { + { "loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT, true /* fails */ }, + + { "test_verif_scale1.o", BPF_PROG_TYPE_SCHED_CLS }, + { "test_verif_scale2.o", BPF_PROG_TYPE_SCHED_CLS }, + { "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS }, + /* full unroll by llvm */ - "./pyperf50.o", "./pyperf100.o", "./pyperf180.o", + { "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "pyperf100.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "pyperf180.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, /* partial unroll. llvm will unroll loop ~150 times. * C loop count -> 600. @@ -45,7 +59,7 @@ void test_bpf_verif_scale(void) * 16k insns in loop body. * Total of 5 such loops. Total program size ~82k insns. */ - "./pyperf600.o", + { "pyperf600.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, /* no unroll at all. * C loop count -> 600. @@ -53,48 +67,52 @@ void test_bpf_verif_scale(void) * ~110 insns in loop body. * Total of 5 such loops. Total program size ~1500 insns. */ - "./pyperf600_nounroll.o", + { "pyperf600_nounroll.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - "./loop1.o", "./loop2.o", + { "loop1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "loop2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "loop4.o", BPF_PROG_TYPE_SCHED_CLS }, + { "loop5.o", BPF_PROG_TYPE_SCHED_CLS }, /* partial unroll. 19k insn in a loop. * Total program size 20.8k insn. * ~350k processed_insns */ - "./strobemeta.o", + { "strobemeta.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, /* no unroll, tiny loops */ - "./strobemeta_nounroll1.o", - "./strobemeta_nounroll2.o", - }; - const char *cg_sysctl[] = { - "./test_sysctl_loop1.o", "./test_sysctl_loop2.o", - }; - int err, i; + { "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, + { "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT }, - if (verifier_stats) - libbpf_set_print(libbpf_debug_print); + { "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, + { "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL }, - err = check_load("./loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT); - printf("test_scale:loop3:%s\n", err ? (error_cnt--, "OK") : "FAIL"); + { "test_xdp_loop.o", BPF_PROG_TYPE_XDP }, + { "test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL }, + }; + libbpf_print_fn_t old_print_fn = NULL; + int err, i; - for (i = 0; i < ARRAY_SIZE(sched_cls); i++) { - err = check_load(sched_cls[i], BPF_PROG_TYPE_SCHED_CLS); - printf("test_scale:%s:%s\n", sched_cls[i], err ? "FAIL" : "OK"); + if (env.verifier_stats) { + test__force_log(); + old_print_fn = libbpf_set_print(libbpf_debug_print); } - for (i = 0; i < ARRAY_SIZE(raw_tp); i++) { - err = check_load(raw_tp[i], BPF_PROG_TYPE_RAW_TRACEPOINT); - printf("test_scale:%s:%s\n", raw_tp[i], err ? "FAIL" : "OK"); - } + for (i = 0; i < ARRAY_SIZE(tests); i++) { + const struct scale_test_def *test = &tests[i]; + + if (!test__start_subtest(test->file)) + continue; - for (i = 0; i < ARRAY_SIZE(cg_sysctl); i++) { - err = check_load(cg_sysctl[i], BPF_PROG_TYPE_CGROUP_SYSCTL); - printf("test_scale:%s:%s\n", cg_sysctl[i], err ? "FAIL" : "OK"); + err = check_load(test->file, test->attach_type); + if (test->fails) { /* expected to fail */ + if (err) + error_cnt--; + else + error_cnt++; + } } - err = check_load("./test_xdp_loop.o", BPF_PROG_TYPE_XDP); - printf("test_scale:test_xdp_loop:%s\n", err ? "FAIL" : "OK"); - err = check_load("./test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL); - printf("test_scale:test_seg6_loop:%s\n", err ? "FAIL" : "OK"); + if (env.verifier_stats) + libbpf_set_print(old_print_fn); } diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c new file mode 100644 index 000000000000..f3863f976a48 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "progs/core_reloc_types.h" + +#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) + +#define FLAVORS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 42, \ + .b = 0xc001, \ + .c = 0xbeef, \ +} + +#define FLAVORS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_flavors.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" \ + +#define FLAVORS_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .input = FLAVORS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = FLAVORS_DATA(core_reloc_flavors), \ + .output_len = sizeof(struct core_reloc_flavors), \ +} + +#define FLAVORS_ERR_CASE(name) { \ + FLAVORS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define NESTING_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { .a = { .a = 42 } }, \ + .b = { .b = { .b = 0xc001 } }, \ +} + +#define NESTING_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_nesting.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define NESTING_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .input = NESTING_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = NESTING_DATA(core_reloc_nesting), \ + .output_len = sizeof(struct core_reloc_nesting) \ +} + +#define NESTING_ERR_CASE(name) { \ + NESTING_CASE_COMMON(name), \ + .fails = true, \ +} + +#define ARRAYS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = { [2] = 1 }, \ + .b = { [1] = { [2] = { [3] = 2 } } }, \ + .c = { [1] = { .c = 3 } }, \ + .d = { [0] = { [0] = { .d = 4 } } }, \ +} + +#define ARRAYS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_arrays.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define ARRAYS_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .input = ARRAYS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_arrays_output) { \ + .a2 = 1, \ + .b123 = 2, \ + .c1c = 3, \ + .d00d = 4, \ + }, \ + .output_len = sizeof(struct core_reloc_arrays_output) \ +} + +#define ARRAYS_ERR_CASE(name) { \ + ARRAYS_CASE_COMMON(name), \ + .fails = true, \ +} + +#define PRIMITIVES_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .a = 1, \ + .b = 2, \ + .c = 3, \ + .d = (void *)4, \ + .f = (void *)5, \ +} + +#define PRIMITIVES_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_primitives.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define PRIMITIVES_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .input = PRIMITIVES_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = PRIMITIVES_DATA(core_reloc_primitives), \ + .output_len = sizeof(struct core_reloc_primitives), \ +} + +#define PRIMITIVES_ERR_CASE(name) { \ + PRIMITIVES_CASE_COMMON(name), \ + .fails = true, \ +} + +#define MODS_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_mods.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) { \ + .a = 1, \ + .b = 2, \ + .c = (void *)3, \ + .d = (void *)4, \ + .e = { [2] = 5 }, \ + .f = { [1] = 6 }, \ + .g = { .x = 7 }, \ + .h = { .y = 8 }, \ + }, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_mods_output) { \ + .a = 1, .b = 2, .c = 3, .d = 4, \ + .e = 5, .f = 6, .g = 7, .h = 8, \ + }, \ + .output_len = sizeof(struct core_reloc_mods_output), \ +} + +#define PTR_AS_ARR_CASE(name) { \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ptr_as_arr.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .input = (const char *)&(struct core_reloc_##name []){ \ + { .a = 1 }, \ + { .a = 2 }, \ + { .a = 3 }, \ + }, \ + .input_len = 3 * sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_ptr_as_arr) { \ + .a = 3, \ + }, \ + .output_len = sizeof(struct core_reloc_ptr_as_arr), \ +} + +#define INTS_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ + .u8_field = 1, \ + .s8_field = 2, \ + .u16_field = 3, \ + .s16_field = 4, \ + .u32_field = 5, \ + .s32_field = 6, \ + .u64_field = 7, \ + .s64_field = 8, \ +} + +#define INTS_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_ints.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define INTS_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .input = INTS_DATA(core_reloc_##name), \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = INTS_DATA(core_reloc_ints), \ + .output_len = sizeof(struct core_reloc_ints), \ +} + +#define INTS_ERR_CASE(name) { \ + INTS_CASE_COMMON(name), \ + .fails = true, \ +} + +struct core_reloc_test_case { + const char *case_name; + const char *bpf_obj_file; + const char *btf_src_file; + const char *input; + int input_len; + const char *output; + int output_len; + bool fails; +}; + +static struct core_reloc_test_case test_cases[] = { + /* validate we can find kernel image and use its BTF for relocs */ + { + .case_name = "kernel", + .bpf_obj_file = "test_core_reloc_kernel.o", + .btf_src_file = NULL, /* load from /lib/modules/$(uname -r) */ + .input = "", + .input_len = 0, + .output = "\1", /* true */ + .output_len = 1, + }, + + /* validate BPF program can use multiple flavors to match against + * single target BTF type + */ + FLAVORS_CASE(flavors), + + FLAVORS_ERR_CASE(flavors__err_wrong_name), + + /* various struct/enum nesting and resolution scenarios */ + NESTING_CASE(nesting), + NESTING_CASE(nesting___anon_embed), + NESTING_CASE(nesting___struct_union_mixup), + NESTING_CASE(nesting___extra_nesting), + NESTING_CASE(nesting___dup_compat_types), + + NESTING_ERR_CASE(nesting___err_missing_field), + NESTING_ERR_CASE(nesting___err_array_field), + NESTING_ERR_CASE(nesting___err_missing_container), + NESTING_ERR_CASE(nesting___err_nonstruct_container), + NESTING_ERR_CASE(nesting___err_array_container), + NESTING_ERR_CASE(nesting___err_dup_incompat_types), + NESTING_ERR_CASE(nesting___err_partial_match_dups), + NESTING_ERR_CASE(nesting___err_too_deep), + + /* various array access relocation scenarios */ + ARRAYS_CASE(arrays), + ARRAYS_CASE(arrays___diff_arr_dim), + ARRAYS_CASE(arrays___diff_arr_val_sz), + + ARRAYS_ERR_CASE(arrays___err_too_small), + ARRAYS_ERR_CASE(arrays___err_too_shallow), + ARRAYS_ERR_CASE(arrays___err_non_array), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type1), + ARRAYS_ERR_CASE(arrays___err_wrong_val_type2), + + /* enum/ptr/int handling scenarios */ + PRIMITIVES_CASE(primitives), + PRIMITIVES_CASE(primitives___diff_enum_def), + PRIMITIVES_CASE(primitives___diff_func_proto), + PRIMITIVES_CASE(primitives___diff_ptr_type), + + PRIMITIVES_ERR_CASE(primitives___err_non_enum), + PRIMITIVES_ERR_CASE(primitives___err_non_int), + PRIMITIVES_ERR_CASE(primitives___err_non_ptr), + + /* const/volatile/restrict and typedefs scenarios */ + MODS_CASE(mods), + MODS_CASE(mods___mod_swap), + MODS_CASE(mods___typedefs), + + /* handling "ptr is an array" semantics */ + PTR_AS_ARR_CASE(ptr_as_arr), + PTR_AS_ARR_CASE(ptr_as_arr___diff_sz), + + /* int signedness/sizing/bitfield handling */ + INTS_CASE(ints), + INTS_CASE(ints___bool), + INTS_CASE(ints___reverse_sign), + + INTS_ERR_CASE(ints___err_bitfield), + INTS_ERR_CASE(ints___err_wrong_sz_8), + INTS_ERR_CASE(ints___err_wrong_sz_16), + INTS_ERR_CASE(ints___err_wrong_sz_32), + INTS_ERR_CASE(ints___err_wrong_sz_64), + + /* validate edge cases of capturing relocations */ + { + .case_name = "misc", + .bpf_obj_file = "test_core_reloc_misc.o", + .btf_src_file = "btf__core_reloc_misc.o", + .input = (const char *)&(struct core_reloc_misc_extensible[]){ + { .a = 1 }, + { .a = 2 }, /* not read */ + { .a = 3 }, + }, + .input_len = 4 * sizeof(int), + .output = STRUCT_TO_CHAR_PTR(core_reloc_misc_output) { + .a = 1, + .b = 1, + .c = 0, /* BUG in clang, should be 3 */ + }, + .output_len = sizeof(struct core_reloc_misc_output), + }, +}; + +struct data { + char in[256]; + char out[256]; +}; + +void test_core_reloc(void) +{ + const char *probe_name = "raw_tracepoint/sys_enter"; + struct bpf_object_load_attr load_attr = {}; + struct core_reloc_test_case *test_case; + int err, duration = 0, i, equal; + struct bpf_link *link = NULL; + struct bpf_map *data_map; + struct bpf_program *prog; + struct bpf_object *obj; + const int zero = 0; + struct data data; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + test_case = &test_cases[i]; + + if (!test__start_subtest(test_case->case_name)) + continue; + + obj = bpf_object__open(test_case->bpf_obj_file); + if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", + "failed to open '%s': %ld\n", + test_case->bpf_obj_file, PTR_ERR(obj))) + continue; + + prog = bpf_object__find_program_by_title(obj, probe_name); + if (CHECK(!prog, "find_probe", + "prog '%s' not found\n", probe_name)) + goto cleanup; + bpf_program__set_type(prog, BPF_PROG_TYPE_RAW_TRACEPOINT); + + load_attr.obj = obj; + load_attr.log_level = 0; + load_attr.target_btf_path = test_case->btf_src_file; + err = bpf_object__load_xattr(&load_attr); + if (test_case->fails) { + CHECK(!err, "obj_load_fail", + "should fail to load prog '%s'\n", probe_name); + goto cleanup; + } else { + if (CHECK(err, "obj_load", + "failed to load prog '%s': %d\n", + probe_name, err)) + goto cleanup; + } + + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", + PTR_ERR(link))) + goto cleanup; + + data_map = bpf_object__find_map_by_name(obj, "test_cor.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto cleanup; + + memset(&data, 0, sizeof(data)); + memcpy(data.in, test_case->input, test_case->input_len); + + err = bpf_map_update_elem(bpf_map__fd(data_map), + &zero, &data, 0); + if (CHECK(err, "update_data_map", + "failed to update .data map: %d\n", err)) + goto cleanup; + + /* trigger test run */ + usleep(1); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto cleanup; + + equal = memcmp(data.out, test_case->output, + test_case->output_len) == 0; + if (CHECK(!equal, "check_result", + "input/output data don't match\n")) { + int j; + + for (j = 0; j < test_case->input_len; j++) { + printf("input byte #%d: 0x%02hhx\n", + j, test_case->input[j]); + } + for (j = 0; j < test_case->output_len; j++) { + printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n", + j, test_case->output[j], data.out[j]); + } + goto cleanup; + } + +cleanup: + if (!IS_ERR_OR_NULL(link)) { + bpf_link__destroy(link); + link = NULL; + } + bpf_object__close(obj); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index c938283ac232..6892b88ae065 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -5,6 +5,10 @@ #include <linux/if_tun.h> #include <sys/uio.h> +#ifndef IP_MF +#define IP_MF 0x2000 +#endif + #define CHECK_FLOW_KEYS(desc, got, expected) \ CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ desc, \ @@ -16,6 +20,7 @@ "is_encap=%u/%u " \ "ip_proto=0x%x/0x%x " \ "n_proto=0x%x/0x%x " \ + "flow_label=0x%x/0x%x " \ "sport=%u/%u " \ "dport=%u/%u\n", \ got.nhoff, expected.nhoff, \ @@ -26,6 +31,7 @@ got.is_encap, expected.is_encap, \ got.ip_proto, expected.ip_proto, \ got.n_proto, expected.n_proto, \ + got.flow_label, expected.flow_label, \ got.sport, expected.sport, \ got.dport, expected.dport) @@ -35,6 +41,13 @@ struct ipv4_pkt { struct tcphdr tcp; } __packed; +struct ipip_pkt { + struct ethhdr eth; + struct iphdr iph; + struct iphdr iph_inner; + struct tcphdr tcp; +} __packed; + struct svlan_ipv4_pkt { struct ethhdr eth; __u16 vlan_tci; @@ -49,6 +62,18 @@ struct ipv6_pkt { struct tcphdr tcp; } __packed; +struct ipv6_frag_pkt { + struct ethhdr eth; + struct ipv6hdr iph; + struct frag_hdr { + __u8 nexthdr; + __u8 reserved; + __be16 frag_off; + __be32 identification; + } ipf; + struct tcphdr tcp; +} __packed; + struct dvlan_ipv6_pkt { struct ethhdr eth; __u16 vlan_tci; @@ -64,10 +89,13 @@ struct test { union { struct ipv4_pkt ipv4; struct svlan_ipv4_pkt svlan_ipv4; + struct ipip_pkt ipip; struct ipv6_pkt ipv6; + struct ipv6_frag_pkt ipv6_frag; struct dvlan_ipv6_pkt dvlan_ipv6; } pkt; struct bpf_flow_keys keys; + __u32 flags; }; #define VLAN_HLEN 4 @@ -81,6 +109,8 @@ struct test tests[] = { .iph.protocol = IPPROTO_TCP, .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, }, .keys = { .nhoff = ETH_HLEN, @@ -88,6 +118,8 @@ struct test tests[] = { .addr_proto = ETH_P_IP, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, }, }, { @@ -97,6 +129,8 @@ struct test tests[] = { .iph.nexthdr = IPPROTO_TCP, .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, }, .keys = { .nhoff = ETH_HLEN, @@ -104,6 +138,8 @@ struct test tests[] = { .addr_proto = ETH_P_IPV6, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, }, }, { @@ -115,6 +151,8 @@ struct test tests[] = { .iph.protocol = IPPROTO_TCP, .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, }, .keys = { .nhoff = ETH_HLEN + VLAN_HLEN, @@ -122,6 +160,8 @@ struct test tests[] = { .addr_proto = ETH_P_IP, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IP), + .sport = 80, + .dport = 8080, }, }, { @@ -133,6 +173,8 @@ struct test tests[] = { .iph.nexthdr = IPPROTO_TCP, .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, }, .keys = { .nhoff = ETH_HLEN + VLAN_HLEN * 2, @@ -141,8 +183,206 @@ struct test tests[] = { .addr_proto = ETH_P_IPV6, .ip_proto = IPPROTO_TCP, .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, }, }, + { + .name = "ipv4-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + }, + { + .name = "ipv4-no-frag", + .pkt.ipv4 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_TCP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.frag_off = __bpf_constant_htons(IP_MF), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_frag = true, + .is_first_frag = true, + }, + }, + { + .name = "ipv6-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + .sport = 80, + .dport = 8080, + }, + .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, + }, + { + .name = "ipv6-no-frag", + .pkt.ipv6_frag = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_FRAGMENT, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .ipf.nexthdr = IPPROTO_TCP, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + + sizeof(struct frag_hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .is_frag = true, + .is_first_frag = true, + }, + }, + { + .name = "ipv6-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + }, + { + .name = "ipv6-no-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + }, + { + .name = "ipip-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = 0, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr) + + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + .sport = 80, + .dport = 8080, + }, + }, + { + .name = "ipip-no-encap", + .pkt.ipip = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IP), + .iph.ihl = 5, + .iph.protocol = IPPROTO_IPIP, + .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), + .iph_inner.ihl = 5, + .iph_inner.protocol = IPPROTO_TCP, + .iph_inner.tot_len = + __bpf_constant_htons(MAGIC_BYTES) - + sizeof(struct iphdr), + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct iphdr), + .addr_proto = ETH_P_IP, + .ip_proto = IPPROTO_IPIP, + .n_proto = __bpf_constant_htons(ETH_P_IP), + .is_encap = true, + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, + }, }; static int create_tap(const char *ifname) @@ -225,6 +465,13 @@ void test_flow_dissector(void) .data_size_in = sizeof(tests[i].pkt), .data_out = &flow_keys, }; + static struct bpf_flow_keys ctx = {}; + + if (tests[i].flags) { + tattr.ctx_in = &ctx; + tattr.ctx_size_in = sizeof(ctx); + ctx.flags = tests[i].flags; + } err = bpf_prog_test_run_xattr(&tattr); CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || @@ -251,9 +498,20 @@ void test_flow_dissector(void) CHECK(err, "ifup", "err %d errno %d\n", err, errno); for (i = 0; i < ARRAY_SIZE(tests); i++) { - struct bpf_flow_keys flow_keys = {}; + /* Keep in sync with 'flags' from eth_get_headlen. */ + __u32 eth_get_headlen_flags = + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; struct bpf_prog_test_run_attr tattr = {}; - __u32 key = 0; + struct bpf_flow_keys flow_keys = {}; + __u32 key = (__u32)(tests[i].keys.sport) << 16 | + tests[i].keys.dport; + + /* For skb-less case we can't pass input flags; run + * only the tests that have a matching set of flags. + */ + + if (tests[i].flags != eth_get_headlen_flags) + continue; err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); @@ -263,6 +521,9 @@ void test_flow_dissector(void) CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); + + err = bpf_map_delete_elem(keys_fd, &key); + CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); } bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index c2a0a9d5591b..3d59b3c841fe 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -1,8 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 +#define _GNU_SOURCE +#include <pthread.h> +#include <sched.h> +#include <sys/socket.h> #include <test_progs.h> #define MAX_CNT_RAWTP 10ull #define MAX_STACK_RAWTP 100 + +static int duration = 0; + struct get_stack_trace_t { int pid; int kern_stack_size; @@ -13,7 +20,7 @@ struct get_stack_trace_t { struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP]; }; -static int get_stack_print_output(void *data, int size) +static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) { bool good_kern_stack = false, good_user_stack = false; const char *nonjit_func = "___bpf_prog_run"; @@ -34,7 +41,7 @@ static int get_stack_print_output(void *data, int size) * just assume it is good if the stack is not empty. * This could be improved in the future. */ - if (jit_enabled) { + if (env.jit_enabled) { found = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { @@ -51,7 +58,7 @@ static int get_stack_print_output(void *data, int size) } } else { num_stack = e->kern_stack_size / sizeof(__u64); - if (jit_enabled) { + if (env.jit_enabled) { good_kern_stack = num_stack > 0; } else { for (i = 0; i < num_stack; i++) { @@ -65,75 +72,76 @@ static int get_stack_print_output(void *data, int size) if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0) good_user_stack = true; } - if (!good_kern_stack || !good_user_stack) - return LIBBPF_PERF_EVENT_ERROR; - if (cnt == MAX_CNT_RAWTP) - return LIBBPF_PERF_EVENT_DONE; - - return LIBBPF_PERF_EVENT_CONT; + if (!good_kern_stack) + CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n"); + if (!good_user_stack) + CHECK(!good_user_stack, "user_stack", "corrupted user stack\n"); } void test_get_stack_raw_tp(void) { const char *file = "./test_get_stack_rawtp.o"; - int i, efd, err, prog_fd, pmu_fd, perfmap_fd; - struct perf_event_attr attr = {}; + const char *prog_name = "raw_tracepoint/sys_enter"; + int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP; + struct perf_buffer_opts pb_opts = {}; + struct perf_buffer *pb = NULL; + struct bpf_link *link = NULL; struct timespec tv = {0, 10}; - __u32 key = 0, duration = 0; + struct bpf_program *prog; struct bpf_object *obj; + struct bpf_map *map; + cpu_set_t cpu_set; err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) return; - efd = bpf_raw_tracepoint_open("sys_enter", prog_fd); - if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) goto close_prog; - perfmap_fd = bpf_find_map(__func__, obj, "perfmap"); - if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n", - perfmap_fd, errno)) + map = bpf_object__find_map_by_name(obj, "perfmap"); + if (CHECK(!map, "bpf_find_map", "not found\n")) goto close_prog; err = load_kallsyms(); if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno)) goto close_prog; - attr.sample_type = PERF_SAMPLE_RAW; - attr.type = PERF_TYPE_SOFTWARE; - attr.config = PERF_COUNT_SW_BPF_OUTPUT; - pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/, - -1/*group_fd*/, 0); - if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd, - errno)) + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno)) goto close_prog; - err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY); - if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err, - errno)) + link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; - err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); - if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n", - err, errno)) - goto close_prog; - - err = perf_event_mmap(pmu_fd); - if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno)) + pb_opts.sample_cb = get_stack_print_output; + pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts); + if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) goto close_prog; /* trigger some syscall action */ for (i = 0; i < MAX_CNT_RAWTP; i++) nanosleep(&tv, NULL); - err = perf_event_poller(pmu_fd, get_stack_print_output); - if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno)) - goto close_prog; + while (exp_cnt > 0) { + err = perf_buffer__poll(pb, 100); + if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err)) + goto close_prog; + exp_cnt -= err; + } goto close_prog_noerr; close_prog: error_cnt++; close_prog_noerr: + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + if (!IS_ERR_OR_NULL(pb)) + perf_buffer__free(pb); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 5633be43828f..4a4f428d1a78 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -1,15 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> -static int libbpf_debug_print(enum libbpf_print_level level, - const char *format, va_list args) -{ - if (level == LIBBPF_DEBUG) - return 0; - - return vfprintf(stderr, format, args); -} - void test_reference_tracking(void) { const char *file = "./test_sk_lookup_kern.o"; @@ -36,9 +27,11 @@ void test_reference_tracking(void) /* Expect verifier failure if test name has 'fail' */ if (strstr(title, "fail") != NULL) { - libbpf_set_print(NULL); + libbpf_print_fn_t old_print_fn; + + old_print_fn = libbpf_set_print(NULL); err = !bpf_program__load(prog, "GPL", 0); - libbpf_set_print(libbpf_debug_print); + libbpf_set_print(old_print_fn); } else { err = bpf_program__load(prog, "GPL", 0); } diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 54218ee3c004..1575f0a1f586 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -203,7 +203,7 @@ static int test_send_signal_nmi(void) if (pmu_fd == -1) { if (errno == ENOENT) { printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", - __func__); + __func__); return 0; } /* Let the test fail with a more informative message */ @@ -219,11 +219,10 @@ void test_send_signal(void) { int ret = 0; - ret |= test_send_signal_tracepoint(); - ret |= test_send_signal_perf(); - ret |= test_send_signal_nmi(); - if (!ret) - printf("test_send_signal:OK\n"); - else - printf("test_send_signal:FAIL\n"); + if (test__start_subtest("send_signal_tracepoint")) + ret |= test_send_signal_tracepoint(); + if (test__start_subtest("send_signal_perf")) + ret |= test_send_signal_perf(); + if (test__start_subtest("send_signal_nmi")) + ret |= test_send_signal_nmi(); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c index 09e6b46f5515..15f7c272edb0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -75,7 +75,8 @@ void test_xdp_noinline(void) } if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { error_cnt++; - printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); + printf("test_xdp_noinline:FAIL:stats %lld %lld\n", + bytes, pkts); } out: bpf_object__close(obj); |