diff options
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
122 files changed, 7596 insertions, 1706 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 0ee29e11eaee..970f09156eb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv2"}, - {1, "R3_w=inv4"}, - {2, "R3_w=inv8"}, - {3, "R3_w=inv16"}, - {4, "R3_w=inv32"}, + {0, "R3_w=2"}, + {1, "R3_w=4"}, + {2, "R3_w=8"}, + {3, "R3_w=16"}, + {4, "R3_w=32"}, }, }, { @@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv1"}, - {1, "R3_w=inv2"}, - {2, "R3_w=inv4"}, - {3, "R3_w=inv8"}, - {4, "R3_w=inv16"}, - {5, "R3_w=inv1"}, - {6, "R4_w=inv32"}, - {7, "R4_w=inv16"}, - {8, "R4_w=inv8"}, - {9, "R4_w=inv4"}, - {10, "R4_w=inv2"}, + {0, "R3_w=1"}, + {1, "R3_w=2"}, + {2, "R3_w=4"}, + {3, "R3_w=8"}, + {4, "R3_w=16"}, + {5, "R3_w=1"}, + {6, "R4_w=32"}, + {7, "R4_w=16"}, + {8, "R4_w=8"}, + {9, "R4_w=4"}, + {10, "R4_w=2"}, }, }, { @@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv4"}, - {1, "R3_w=inv8"}, - {2, "R3_w=inv10"}, - {3, "R4_w=inv8"}, - {4, "R4_w=inv12"}, - {5, "R4_w=inv14"}, + {0, "R3_w=4"}, + {1, "R3_w=8"}, + {2, "R3_w=10"}, + {3, "R4_w=8"}, + {4, "R4_w=12"}, + {5, "R4_w=14"}, }, }, { @@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv7"}, - {1, "R3_w=inv7"}, - {2, "R3_w=inv14"}, - {3, "R3_w=inv56"}, + {0, "R3_w=7"}, + {1, "R3_w=7"}, + {2, "R3_w=14"}, + {3, "R3_w=56"}, }, }, @@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"}, - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, - {8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, - {12, "R3_w=pkt_end(id=0,off=0,imm=0)"}, - {17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, - {19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, - {20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, + {6, "R0_w=pkt(off=8,r=8,imm=0)"}, + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {12, "R3_w=pkt_end(off=0,imm=0)"}, + {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"}, + {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, }, }, { @@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, - {11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, }, }, { @@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, - {4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, - {5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, - {9, "R2=pkt(id=0,off=0,r=18,imm=0)"}, - {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, - {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, - {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, + {2, "R5_w=pkt(off=0,r=0,imm=0)"}, + {4, "R5_w=pkt(off=14,r=0,imm=0)"}, + {5, "R4_w=pkt(off=14,r=0,imm=0)"}, + {9, "R2=pkt(off=0,r=18,imm=0)"}, + {10, "R5=pkt(off=14,r=18,imm=0)"}, + {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, + {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, }, }, { @@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Offset is added to packet pointer R5, resulting in * known fixed offset, and variable offset from R6. */ - {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * it's total offset is NET_IP_ALIGN + reg->off (0) + * reg->aux_off (14) which is 16. Then the variable * offset is considered using reg->aux_off_align which * is 4 and meets the load's requirements. */ - {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, * resulting in auxiliary alignment of 4. */ - {17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {25, "R5_w=pkt(id=0,off=14,r=8"}, + {25, "R5_w=pkt(off=14,r=8"}, /* Variable offset is added to R5, resulting in a * variable offset of (4n). */ - {26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, }, }, { @@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Adding 14 makes R6 be (4n+2) */ - {8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, /* Packet pointer has (4n+2) offset */ - {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, - {12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, + {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, /* Newly read value in R6 was shifted left by 2, so has * known alignment of 4. */ - {17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Added (4n) to packet pointer's (4n+2) var_off, giving * another (4n+2). */ - {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, - {20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, + {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, }, }, { @@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, .matches = { - {3, "R5_w=pkt_end(id=0,off=0,imm=0)"}, + {3, "R5_w=pkt_end(off=0,imm=0)"}, /* (ptr - ptr) << 2 == unknown, (4n) */ - {5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, + {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, /* (4n) + 14 == (4n+2). We blow our bounds, because * the add could overflow. */ - {6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, /* Checked s>=0 */ - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* packet pointer + nonnegative (4n+2) */ - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, - {12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. * We checked the bounds, but it might have been able * to overflow if the packet pointer started in the @@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = { * So we did not get a 'range' on R6, and the access * attempt will fail. */ - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, } }, { @@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Adding 14 makes R6 be (4n+2) */ - {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, /* New unknown value in R7 is (4n) */ - {10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Subtracting it from R6 blows our unsigned bounds */ - {11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, /* Checked s>= 0 */ - {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"}, }, }, @@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"}, /* Adding 14 makes R6 be (4n+2) */ - {10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, + {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"}, /* Subtracting from packet pointer overflows ubounds */ - {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, + {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, /* New unknown value in R7 is (4n), >= 76 */ - {14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, + {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"}, /* Adding it to packet pointer gives nice bounds again */ - {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, + {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, + {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, }, }, }; @@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test) /* Check the next line as well in case the previous line * did not have a corresponding bpf insn. Example: * func#0 @0 - * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 - * 0: (b7) r3 = 2 ; R3_w=inv2 + * 0: R1=ctx(off=0,imm=0) R10=fp0 + * 0: (b7) r3 = 2 ; R3_w=2 */ if (!strstr(line_ptr, m.match)) { cur_line = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c new file mode 100644 index 000000000000..b17bfa0e0aac --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void init_test_filter_set(struct test_filter_set *set) +{ + set->cnt = 0; + set->tests = NULL; +} + +static void free_test_filter_set(struct test_filter_set *set) +{ + int i, j; + + for (i = 0; i < set->cnt; i++) { + for (j = 0; j < set->tests[i].subtest_cnt; j++) + free((void *)set->tests[i].subtests[j]); + free(set->tests[i].subtests); + free(set->tests[i].name); + } + + free(set->tests); + init_test_filter_set(set); +} + +static void test_parse_test_list(void) +{ + struct test_filter_set set; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "test filters count")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie", + &set, + true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 2, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true), + "parsing"); + ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing"); + ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing"); + if (!ASSERT_EQ(set.cnt, 3, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count"); + ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]), + "subtest name"); + ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name"); + ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); +error: + free_test_filter_set(&set); +} + +void test_arg_parsing(void) +{ + if (test__start_subtest("test_parse_test_list")) + test_parse_test_list(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index 86b7d5d84eec..13e101f370a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -7,19 +7,15 @@ static void test_add(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__add__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(add)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.add.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run add", - "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); @@ -31,28 +27,20 @@ static void test_add(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); - -cleanup: - close(link_fd); } static void test_sub(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__sub__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(sub)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.sub.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run sub", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value"); ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); @@ -64,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result"); ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); - -cleanup: - close(link_fd); } static void test_and(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__and__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(and)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.and.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run and", - "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value"); ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); @@ -93,27 +74,20 @@ static void test_and(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result"); ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); -cleanup: - close(link_fd); } static void test_or(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__or__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(or)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.or.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run or", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value"); ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); @@ -122,26 +96,20 @@ static void test_or(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result"); ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); -cleanup: - close(link_fd); } static void test_xor(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__xor__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(xor)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.xor.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run xor", - "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value"); ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); @@ -150,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result"); ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); -cleanup: - close(link_fd); } static void test_cmpxchg(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__cmpxchg__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.cmpxchg.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run cmpxchg", - "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value"); ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); @@ -178,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel) ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value"); ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); - -cleanup: - close(link_fd); } static void test_xchg(struct atomics_lskel *skel) { int err, prog_fd; - __u32 duration = 0, retval; - int link_fd; - - link_fd = atomics_lskel__xchg__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) - return; + LIBBPF_OPTS(bpf_test_run_opts, topts); + /* No need to attach it, just run it directly */ prog_fd = skel->progs.xchg.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "test_run xchg", - "err %d errno %d retval %d duration %d\n", err, errno, retval, duration)) - goto cleanup; + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value"); ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result"); ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value"); ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); - -cleanup: - close(link_fd); } void test_atomics(void) { struct atomics_lskel *skel; - __u32 duration = 0; skel = atomics_lskel__open_and_load(); - if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) + if (!ASSERT_OK_PTR(skel, "atomics skeleton load")) return; if (skel->data->skip_tests) { diff --git a/tools/testing/selftests/bpf/prog_tests/attach_probe.c b/tools/testing/selftests/bpf/prog_tests/attach_probe.c index d0bd51eb23c8..08c0601b3e84 100644 --- a/tools/testing/selftests/bpf/prog_tests/attach_probe.c +++ b/tools/testing/selftests/bpf/prog_tests/attach_probe.c @@ -5,21 +5,28 @@ /* this is how USDT semaphore is actually defined, except volatile modifier */ volatile unsigned short uprobe_ref_ctr __attribute__((unused)) __attribute((section(".probes"))); -/* attach point */ -static void method(void) { - return ; +/* uprobe attach point */ +static void trigger_func(void) +{ + asm volatile (""); +} + +/* attach point for byname uprobe */ +static void trigger_func2(void) +{ + asm volatile (""); } void test_attach_probe(void) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, uprobe_opts); - int duration = 0; struct bpf_link *kprobe_link, *kretprobe_link; struct bpf_link *uprobe_link, *uretprobe_link; struct test_attach_probe* skel; - size_t uprobe_offset; - ssize_t base_addr, ref_ctr_offset; + ssize_t uprobe_offset, ref_ctr_offset; + struct bpf_link *uprobe_err_link; bool legacy; + char *mem; /* Check if new-style kprobe/uprobe API is supported. * Kernels that support new FD-based kprobe and uprobe BPF attachment @@ -34,22 +41,21 @@ void test_attach_probe(void) */ legacy = access("/sys/bus/event_source/devices/kprobe/type", F_OK) != 0; - base_addr = get_base_addr(); - if (CHECK(base_addr < 0, "get_base_addr", - "failed to find base addr: %zd", base_addr)) + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) return; - uprobe_offset = get_uprobe_offset(&method, base_addr); ref_ctr_offset = get_rel_offset((uintptr_t)&uprobe_ref_ctr); if (!ASSERT_GE(ref_ctr_offset, 0, "ref_ctr_offset")) return; skel = test_attach_probe__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "skel_open")) return; - if (CHECK(!skel->bss, "check_bss", ".bss wasn't mmap()-ed\n")) + if (!ASSERT_OK_PTR(skel->bss, "check_bss")) goto cleanup; + /* manual-attach kprobe/kretprobe */ kprobe_link = bpf_program__attach_kprobe(skel->progs.handle_kprobe, false /* retprobe */, SYS_NANOSLEEP_KPROBE_NAME); @@ -64,6 +70,13 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_kretprobe = kretprobe_link; + /* auto-attachable kprobe and kretprobe */ + skel->links.handle_kprobe_auto = bpf_program__attach(skel->progs.handle_kprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kprobe_auto, "attach_kprobe_auto"); + + skel->links.handle_kretprobe_auto = bpf_program__attach(skel->progs.handle_kretprobe_auto); + ASSERT_OK_PTR(skel->links.handle_kretprobe_auto, "attach_kretprobe_auto"); + if (!legacy) ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_before"); @@ -92,26 +105,76 @@ void test_attach_probe(void) goto cleanup; skel->links.handle_uretprobe = uretprobe_link; - /* trigger & validate kprobe && kretprobe */ - usleep(1); - - if (CHECK(skel->bss->kprobe_res != 1, "check_kprobe_res", - "wrong kprobe res: %d\n", skel->bss->kprobe_res)) + /* verify auto-attach fails for old-style uprobe definition */ + uprobe_err_link = bpf_program__attach(skel->progs.handle_uprobe_byname); + if (!ASSERT_EQ(libbpf_get_error(uprobe_err_link), -EOPNOTSUPP, + "auto-attach should fail for old-style name")) goto cleanup; - if (CHECK(skel->bss->kretprobe_res != 2, "check_kretprobe_res", - "wrong kretprobe res: %d\n", skel->bss->kretprobe_res)) + + uprobe_opts.func_name = "trigger_func2"; + uprobe_opts.retprobe = false; + uprobe_opts.ref_ctr_offset = 0; + skel->links.handle_uprobe_byname = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname, + 0 /* this pid */, + "/proc/self/exe", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname, "attach_uprobe_byname")) goto cleanup; - /* trigger & validate uprobe & uretprobe */ - method(); + /* verify auto-attach works */ + skel->links.handle_uretprobe_byname = + bpf_program__attach(skel->progs.handle_uretprobe_byname); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname, "attach_uretprobe_byname")) + goto cleanup; - if (CHECK(skel->bss->uprobe_res != 3, "check_uprobe_res", - "wrong uprobe res: %d\n", skel->bss->uprobe_res)) + /* test attach by name for a library function, using the library + * as the binary argument. libc.so.6 will be resolved via dlopen()/dlinfo(). + */ + uprobe_opts.func_name = "malloc"; + uprobe_opts.retprobe = false; + skel->links.handle_uprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe_byname2, + 0 /* this pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uprobe_byname2, "attach_uprobe_byname2")) goto cleanup; - if (CHECK(skel->bss->uretprobe_res != 4, "check_uretprobe_res", - "wrong uretprobe res: %d\n", skel->bss->uretprobe_res)) + + uprobe_opts.func_name = "free"; + uprobe_opts.retprobe = true; + skel->links.handle_uretprobe_byname2 = + bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe_byname2, + -1 /* any pid */, + "libc.so.6", + 0, &uprobe_opts); + if (!ASSERT_OK_PTR(skel->links.handle_uretprobe_byname2, "attach_uretprobe_byname2")) goto cleanup; + /* trigger & validate kprobe && kretprobe */ + usleep(1); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(1); + free(mem); + + /* trigger & validate uprobe & uretprobe */ + trigger_func(); + + /* trigger & validate uprobe attached by name */ + trigger_func2(); + + ASSERT_EQ(skel->bss->kprobe_res, 1, "check_kprobe_res"); + ASSERT_EQ(skel->bss->kprobe2_res, 11, "check_kprobe_auto_res"); + ASSERT_EQ(skel->bss->kretprobe_res, 2, "check_kretprobe_res"); + ASSERT_EQ(skel->bss->kretprobe2_res, 22, "check_kretprobe_auto_res"); + ASSERT_EQ(skel->bss->uprobe_res, 3, "check_uprobe_res"); + ASSERT_EQ(skel->bss->uretprobe_res, 4, "check_uretprobe_res"); + ASSERT_EQ(skel->bss->uprobe_byname_res, 5, "check_uprobe_byname_res"); + ASSERT_EQ(skel->bss->uretprobe_byname_res, 6, "check_uretprobe_byname_res"); + ASSERT_EQ(skel->bss->uprobe_byname2_res, 7, "check_uprobe_byname2_res"); + ASSERT_EQ(skel->bss->uretprobe_byname2_res, 8, "check_uretprobe_byname2_res"); + cleanup: test_attach_probe__destroy(skel); ASSERT_EQ(uprobe_ref_ctr, 0, "uprobe_ref_ctr_cleanup"); diff --git a/tools/testing/selftests/bpf/prog_tests/bind_perm.c b/tools/testing/selftests/bpf/prog_tests/bind_perm.c index d0f06e40c16d..a1766a298bb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/bind_perm.c +++ b/tools/testing/selftests/bpf/prog_tests/bind_perm.c @@ -1,13 +1,24 @@ // SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include "bind_perm.skel.h" - +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> -#include <sys/capability.h> + +#include "test_progs.h" +#include "cap_helpers.h" +#include "bind_perm.skel.h" static int duration; +static int create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + return 0; +} + void try_bind(int family, int port, int expected_errno) { struct sockaddr_storage addr = {}; @@ -38,43 +49,16 @@ close_socket: close(fd); } -bool cap_net_bind_service(cap_flag_value_t flag) -{ - const cap_value_t cap_net_bind_service = CAP_NET_BIND_SERVICE; - cap_flag_value_t original_value; - bool was_effective = false; - cap_t caps; - - caps = cap_get_proc(); - if (CHECK(!caps, "cap_get_proc", "errno %d", errno)) - goto free_caps; - - if (CHECK(cap_get_flag(caps, CAP_NET_BIND_SERVICE, CAP_EFFECTIVE, - &original_value), - "cap_get_flag", "errno %d", errno)) - goto free_caps; - - was_effective = (original_value == CAP_SET); - - if (CHECK(cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_bind_service, - flag), - "cap_set_flag", "errno %d", errno)) - goto free_caps; - - if (CHECK(cap_set_proc(caps), "cap_set_proc", "errno %d", errno)) - goto free_caps; - -free_caps: - CHECK(cap_free(caps), "cap_free", "errno %d", errno); - return was_effective; -} - void test_bind_perm(void) { - bool cap_was_effective; + const __u64 net_bind_svc_cap = 1ULL << CAP_NET_BIND_SERVICE; struct bind_perm *skel; + __u64 old_caps = 0; int cgroup_fd; + if (create_netns()) + return; + cgroup_fd = test__join_cgroup("/bind_perm"); if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) return; @@ -91,7 +75,8 @@ void test_bind_perm(void) if (!ASSERT_OK_PTR(skel, "bind_v6_prog")) goto close_skeleton; - cap_was_effective = cap_net_bind_service(CAP_CLEAR); + ASSERT_OK(cap_disable_effective(net_bind_svc_cap, &old_caps), + "cap_disable_effective"); try_bind(AF_INET, 110, EACCES); try_bind(AF_INET6, 110, EACCES); @@ -99,8 +84,9 @@ void test_bind_perm(void) try_bind(AF_INET, 111, 0); try_bind(AF_INET6, 111, 0); - if (cap_was_effective) - cap_net_bind_service(CAP_SET); + if (old_caps & net_bind_svc_cap) + ASSERT_OK(cap_enable_effective(net_bind_svc_cap, NULL), + "cap_enable_effective"); close_skeleton: bind_perm__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 5eea3c3a40fe..2974b44f80fa 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -4,9 +4,19 @@ #include <pthread.h> #include <sched.h> #include <sys/syscall.h> +#include <sys/mman.h> #include <unistd.h> #include <test_progs.h> +#include <network_helpers.h> +#include <bpf/btf.h> #include "test_bpf_cookie.skel.h" +#include "kprobe_multi.skel.h" + +/* uprobe attach point */ +static void trigger_func(void) +{ + asm volatile (""); +} static void kprobe_subtest(struct test_bpf_cookie *skel) { @@ -57,16 +67,188 @@ cleanup: bpf_link__destroy(retlink2); } +static void kprobe_multi_test_run(struct kprobe_multi *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); +} + +static void kprobe_multi_link_api_subtest(void) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + __u64 cookies[8]; + + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "ksym_get_addr " #__sym)) \ + goto cleanup; \ +}) + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test3", addrs[1]); + GET_ADDR("bpf_fentry_test4", addrs[2]); + GET_ADDR("bpf_fentry_test5", addrs[3]); + GET_ADDR("bpf_fentry_test6", addrs[4]); + GET_ADDR("bpf_fentry_test7", addrs[5]); + GET_ADDR("bpf_fentry_test2", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + +#undef GET_ADDR + + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ + + opts.kprobe_multi.addrs = (const unsigned long *) &addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + opts.kprobe_multi.cookies = (const __u64 *) &cookies; + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link1_fd, 0, "link1_fd")) + goto cleanup; + + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ + + opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &opts); + if (!ASSERT_GE(link2_fd, 0, "link2_fd")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + close(link1_fd); + close(link2_fd); + kprobe_multi__destroy(skel); +} + +static void kprobe_multi_attach_api_subtest(void) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + LIBBPF_OPTS(bpf_test_run_opts, topts); + struct kprobe_multi *skel = NULL; + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test2", + "bpf_fentry_test8", + }; + __u64 cookies[8]; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + skel->bss->test_cookie = true; + + cookies[0] = 1; /* bpf_fentry_test1 */ + cookies[1] = 2; /* bpf_fentry_test3 */ + cookies[2] = 3; /* bpf_fentry_test4 */ + cookies[3] = 4; /* bpf_fentry_test5 */ + cookies[4] = 5; /* bpf_fentry_test6 */ + cookies[5] = 6; /* bpf_fentry_test7 */ + cookies[6] = 7; /* bpf_fentry_test2 */ + cookies[7] = 8; /* bpf_fentry_test8 */ + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = cookies; + + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + cookies[0] = 8; /* bpf_fentry_test1 */ + cookies[1] = 7; /* bpf_fentry_test3 */ + cookies[2] = 6; /* bpf_fentry_test4 */ + cookies[3] = 5; /* bpf_fentry_test5 */ + cookies[4] = 4; /* bpf_fentry_test6 */ + cookies[5] = 3; /* bpf_fentry_test7 */ + cookies[6] = 2; /* bpf_fentry_test2 */ + cookies[7] = 1; /* bpf_fentry_test8 */ + + opts.retprobe = true; + + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe, + NULL, &opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + kprobe_multi_test_run(skel); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} static void uprobe_subtest(struct test_bpf_cookie *skel) { DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts); struct bpf_link *link1 = NULL, *link2 = NULL; struct bpf_link *retlink1 = NULL, *retlink2 = NULL; - size_t uprobe_offset; - ssize_t base_addr; + ssize_t uprobe_offset; - base_addr = get_base_addr(); - uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) + goto cleanup; /* attach two uprobes */ opts.bpf_cookie = 0x100; @@ -99,7 +281,7 @@ static void uprobe_subtest(struct test_bpf_cookie *skel) goto cleanup; /* trigger uprobe && uretprobe */ - get_base_addr(); + trigger_func(); ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res"); ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res"); @@ -193,7 +375,7 @@ static void pe_subtest(struct test_bpf_cookie *skel) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (!ASSERT_GE(pfd, 0, "perf_fd")) goto cleanup; @@ -231,6 +413,88 @@ cleanup: bpf_link__destroy(link); } +static void tracing_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int fentry_fd = -1, fexit_fd = -1, fmod_ret_fd = -1; + LIBBPF_OPTS(bpf_test_run_opts, opts); + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->fentry_res = 0; + skel->bss->fexit_res = 0; + + cookie = 0x10000000000000L; + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + link_opts.tracing.cookie = cookie; + fentry_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FENTRY, &link_opts); + if (!ASSERT_GE(fentry_fd, 0, "fentry.link_create")) + goto cleanup; + + cookie = 0x20000000000000L; + prog_fd = bpf_program__fd(skel->progs.fexit_test1); + link_opts.tracing.cookie = cookie; + fexit_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_FEXIT, &link_opts); + if (!ASSERT_GE(fexit_fd, 0, "fexit.link_create")) + goto cleanup; + + cookie = 0x30000000000000L; + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + link_opts.tracing.cookie = cookie; + fmod_ret_fd = bpf_link_create(prog_fd, 0, BPF_MODIFY_RETURN, &link_opts); + if (!ASSERT_GE(fmod_ret_fd, 0, "fmod_ret.link_create")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.fentry_test1); + bpf_prog_test_run_opts(prog_fd, &opts); + + prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); + bpf_prog_test_run_opts(prog_fd, &opts); + + ASSERT_EQ(skel->bss->fentry_res, 0x10000000000000L, "fentry_res"); + ASSERT_EQ(skel->bss->fexit_res, 0x20000000000000L, "fexit_res"); + ASSERT_EQ(skel->bss->fmod_ret_res, 0x30000000000000L, "fmod_ret_res"); + +cleanup: + if (fentry_fd >= 0) + close(fentry_fd); + if (fexit_fd >= 0) + close(fexit_fd); + if (fmod_ret_fd >= 0) + close(fmod_ret_fd); +} + +int stack_mprotect(void); + +static void lsm_subtest(struct test_bpf_cookie *skel) +{ + __u64 cookie; + int prog_fd; + int lsm_fd = -1; + LIBBPF_OPTS(bpf_link_create_opts, link_opts); + + skel->bss->lsm_res = 0; + + cookie = 0x90000000000090L; + prog_fd = bpf_program__fd(skel->progs.test_int_hook); + link_opts.tracing.cookie = cookie; + lsm_fd = bpf_link_create(prog_fd, 0, BPF_LSM_MAC, &link_opts); + if (!ASSERT_GE(lsm_fd, 0, "lsm.link_create")) + goto cleanup; + + stack_mprotect(); + if (!ASSERT_EQ(errno, EPERM, "stack_mprotect")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->lsm_res, 0x90000000000090L, "fentry_res"); + +cleanup: + if (lsm_fd >= 0) + close(lsm_fd); +} + void test_bpf_cookie(void) { struct test_bpf_cookie *skel; @@ -243,12 +507,20 @@ void test_bpf_cookie(void) if (test__start_subtest("kprobe")) kprobe_subtest(skel); + if (test__start_subtest("multi_kprobe_link_api")) + kprobe_multi_link_api_subtest(); + if (test__start_subtest("multi_kprobe_attach_api")) + kprobe_multi_attach_api_subtest(); if (test__start_subtest("uprobe")) uprobe_subtest(skel); if (test__start_subtest("tracepoint")) tp_subtest(skel); if (test__start_subtest("perf_event")) pe_subtest(skel); + if (test__start_subtest("trampoline")) + tracing_subtest(skel); + if (test__start_subtest("lsm")) + lsm_subtest(skel); test_bpf_cookie__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index b84f859b1267..7ff5fa93d056 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -26,6 +26,7 @@ #include "bpf_iter_bpf_sk_storage_map.skel.h" #include "bpf_iter_test_kern5.skel.h" #include "bpf_iter_test_kern6.skel.h" +#include "bpf_iter_bpf_link.skel.h" static int duration; @@ -34,8 +35,7 @@ static void test_btf_id_or_null(void) struct bpf_iter_test_kern3 *skel; skel = bpf_iter_test_kern3__open_and_load(); - if (CHECK(skel, "bpf_iter_test_kern3__open_and_load", - "skeleton open_and_load unexpectedly succeeded\n")) { + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) { bpf_iter_test_kern3__destroy(skel); return; } @@ -52,7 +52,7 @@ static void do_dummy_read(struct bpf_program *prog) return; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* not check contents, but ensure read() ends without error */ @@ -87,8 +87,7 @@ static void test_ipv6_route(void) struct bpf_iter_ipv6_route *skel; skel = bpf_iter_ipv6_route__open_and_load(); - if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load")) return; do_dummy_read(skel->progs.dump_ipv6_route); @@ -101,8 +100,7 @@ static void test_netlink(void) struct bpf_iter_netlink *skel; skel = bpf_iter_netlink__open_and_load(); - if (CHECK(!skel, "bpf_iter_netlink__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load")) return; do_dummy_read(skel->progs.dump_netlink); @@ -115,8 +113,7 @@ static void test_bpf_map(void) struct bpf_iter_bpf_map *skel; skel = bpf_iter_bpf_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load")) return; do_dummy_read(skel->progs.dump_bpf_map); @@ -129,8 +126,7 @@ static void test_task(void) struct bpf_iter_task *skel; skel = bpf_iter_task__open_and_load(); - if (CHECK(!skel, "bpf_iter_task__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) return; do_dummy_read(skel->progs.dump_task); @@ -138,13 +134,30 @@ static void test_task(void) bpf_iter_task__destroy(skel); } +static void test_task_sleepable(void) +{ + struct bpf_iter_task *skel; + + skel = bpf_iter_task__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_task_sleepable); + + ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0, + "num_expected_failure_copy_from_user_task"); + ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, + "num_success_copy_from_user_task"); + + bpf_iter_task__destroy(skel); +} + static void test_task_stack(void) { struct bpf_iter_task_stack *skel; skel = bpf_iter_task_stack__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load")) return; do_dummy_read(skel->progs.dump_task_stack); @@ -165,24 +178,22 @@ static void test_task_file(void) void *ret; skel = bpf_iter_task_file__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_file__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load")) return; skel->bss->tgid = getpid(); - if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL), - "pthread_create", "pthread_create failed\n")) + if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL), + "pthread_create")) goto done; do_dummy_read(skel->progs.dump_task_file); - if (CHECK(pthread_join(thread_id, &ret) || ret != NULL, - "pthread_join", "pthread_join failed\n")) + if (!ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, + "pthread_join")) goto done; - CHECK(skel->bss->count != 0, "check_count", - "invalid non pthread file visit count %d\n", skel->bss->count); + ASSERT_EQ(skel->bss->count, 0, "check_count"); done: bpf_iter_task_file__destroy(skel); @@ -206,7 +217,7 @@ static int do_btf_read(struct bpf_iter_task_btf *skel) return ret; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ); @@ -220,9 +231,8 @@ static int do_btf_read(struct bpf_iter_task_btf *skel) if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno))) goto free_link; - CHECK(strstr(taskbuf, "(struct task_struct)") == NULL, - "check for btf representation of task_struct in iter data", - "struct task_struct not found"); + ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)", + "check for btf representation of task_struct in iter data"); free_link: if (iter_fd > 0) close(iter_fd); @@ -237,8 +247,7 @@ static void test_task_btf(void) int ret; skel = bpf_iter_task_btf__open_and_load(); - if (CHECK(!skel, "bpf_iter_task_btf__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load")) return; bss = skel->bss; @@ -247,12 +256,10 @@ static void test_task_btf(void) if (ret) goto cleanup; - if (CHECK(bss->tasks == 0, "check if iterated over tasks", - "no task iteration, did BPF program run?\n")) + if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?")) goto cleanup; - CHECK(bss->seq_err != 0, "check for unexpected err", - "bpf_seq_printf_btf returned %ld", bss->seq_err); + ASSERT_EQ(bss->seq_err, 0, "check for unexpected err"); cleanup: bpf_iter_task_btf__destroy(skel); @@ -263,8 +270,7 @@ static void test_tcp4(void) struct bpf_iter_tcp4 *skel; skel = bpf_iter_tcp4__open_and_load(); - if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load")) return; do_dummy_read(skel->progs.dump_tcp4); @@ -277,8 +283,7 @@ static void test_tcp6(void) struct bpf_iter_tcp6 *skel; skel = bpf_iter_tcp6__open_and_load(); - if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load")) return; do_dummy_read(skel->progs.dump_tcp6); @@ -291,8 +296,7 @@ static void test_udp4(void) struct bpf_iter_udp4 *skel; skel = bpf_iter_udp4__open_and_load(); - if (CHECK(!skel, "bpf_iter_udp4__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load")) return; do_dummy_read(skel->progs.dump_udp4); @@ -305,8 +309,7 @@ static void test_udp6(void) struct bpf_iter_udp6 *skel; skel = bpf_iter_udp6__open_and_load(); - if (CHECK(!skel, "bpf_iter_udp6__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load")) return; do_dummy_read(skel->progs.dump_udp6); @@ -331,7 +334,7 @@ static void test_unix(void) static int do_read_with_fd(int iter_fd, const char *expected, bool read_one_char) { - int err = -1, len, read_buf_len, start; + int len, read_buf_len, start; char buf[16] = {}; read_buf_len = read_one_char ? 1 : 16; @@ -345,9 +348,7 @@ static int do_read_with_fd(int iter_fd, const char *expected, if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) return -1; - err = strcmp(buf, expected); - if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n", - buf, expected)) + if (!ASSERT_STREQ(buf, expected, "read")) return -1; return 0; @@ -360,19 +361,17 @@ static void test_anon_iter(bool read_one_char) int iter_fd, err; skel = bpf_iter_test_kern1__open_and_load(); - if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load")) return; err = bpf_iter_test_kern1__attach(skel); - if (CHECK(err, "bpf_iter_test_kern1__attach", - "skeleton attach failed\n")) { + if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) { goto out; } link = skel->links.dump_task; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto out; do_read_with_fd(iter_fd, "abcd", read_one_char); @@ -405,8 +404,7 @@ static void test_file_iter(void) int err; skel1 = bpf_iter_test_kern1__open_and_load(); - if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load")) return; link = bpf_program__attach_iter(skel1->progs.dump_task, NULL); @@ -429,12 +427,11 @@ static void test_file_iter(void) * should change. */ skel2 = bpf_iter_test_kern2__open_and_load(); - if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load")) goto unlink_path; err = bpf_link__update_program(link, skel2->progs.dump_task); - if (CHECK(err, "update_prog", "update_prog failed\n")) + if (!ASSERT_OK(err, "update_prog")) goto destroy_skel2; do_read(path, "ABCD"); @@ -460,8 +457,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) char *buf; skel = bpf_iter_test_kern4__open(); - if (CHECK(!skel, "bpf_iter_test_kern4__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open")) return; /* create two maps: bpf program will only do bpf_seq_write @@ -497,8 +493,8 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) } skel->rodata->ret1 = ret1; - if (CHECK(bpf_iter_test_kern4__load(skel), - "bpf_iter_test_kern4__load", "skeleton load failed\n")) + if (!ASSERT_OK(bpf_iter_test_kern4__load(skel), + "bpf_iter_test_kern4__load")) goto free_map2; /* setup filtering map_id in bpf program */ @@ -520,7 +516,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) goto free_map2; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; buf = malloc(expected_read_len); @@ -556,22 +552,16 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) goto free_buf; } - if (CHECK(total_read_len != expected_read_len, "read", - "total len %u, expected len %u\n", total_read_len, - expected_read_len)) + if (!ASSERT_EQ(total_read_len, expected_read_len, "read")) goto free_buf; - if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed", - "expected 1 actual %d\n", skel->bss->map1_accessed)) + if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed")) goto free_buf; - if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed", - "expected 2 actual %d\n", skel->bss->map2_accessed)) + if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed")) goto free_buf; - CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2, - "map2_seqnum", "two different seqnum %lld %lld\n", - skel->bss->map2_seqnum1, skel->bss->map2_seqnum2); + ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum"); free_buf: free(buf); @@ -604,15 +594,13 @@ static void test_bpf_hash_map(void) char buf[64]; skel = bpf_iter_bpf_hash_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open")) return; skel->bss->in_test_mode = true; err = bpf_iter_bpf_hash_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load")) goto out; /* iterator with hashmap2 and hashmap3 should fail */ @@ -641,7 +629,7 @@ static void test_bpf_hash_map(void) expected_val += val; err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -651,7 +639,7 @@ static void test_bpf_hash_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -661,17 +649,11 @@ static void test_bpf_hash_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum_a != expected_key_a, - "key_sum_a", "got %u expected %u\n", - skel->bss->key_sum_a, expected_key_a)) + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) goto close_iter; - if (CHECK(skel->bss->key_sum_b != expected_key_b, - "key_sum_b", "got %u expected %u\n", - skel->bss->key_sum_b, expected_key_b)) + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %llu expected %llu\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -700,16 +682,14 @@ static void test_bpf_percpu_hash_map(void) void *val; skel = bpf_iter_bpf_percpu_hash_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_hash_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load")) goto out; /* update map values here */ @@ -727,7 +707,7 @@ static void test_bpf_percpu_hash_map(void) } err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -740,7 +720,7 @@ static void test_bpf_percpu_hash_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -750,17 +730,11 @@ static void test_bpf_percpu_hash_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum_a != expected_key_a, - "key_sum_a", "got %u expected %u\n", - skel->bss->key_sum_a, expected_key_a)) + if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a")) goto close_iter; - if (CHECK(skel->bss->key_sum_b != expected_key_b, - "key_sum_b", "got %u expected %u\n", - skel->bss->key_sum_b, expected_key_b)) + if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -785,8 +759,7 @@ static void test_bpf_array_map(void) int len, start; skel = bpf_iter_bpf_array_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.arraymap1); @@ -799,7 +772,7 @@ static void test_bpf_array_map(void) first_val = val; err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -812,7 +785,7 @@ static void test_bpf_array_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -832,21 +805,16 @@ static void test_bpf_array_map(void) res_first_key, res_first_val, first_val)) goto close_iter; - if (CHECK(skel->bss->key_sum != expected_key, - "key_sum", "got %u expected %u\n", - skel->bss->key_sum, expected_key)) + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %llu expected %llu\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { err = bpf_map_lookup_elem(map_fd, &i, &val); - if (CHECK(err, "map_lookup", "map_lookup failed\n")) + if (!ASSERT_OK(err, "map_lookup")) goto out; - if (CHECK(i != val, "invalid_val", - "got value %llu expected %u\n", val, i)) + if (!ASSERT_EQ(i, val, "invalid_val")) goto out; } @@ -871,16 +839,14 @@ static void test_bpf_percpu_array_map(void) int len; skel = bpf_iter_bpf_percpu_array_map__open(); - if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", - "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open")) return; skel->rodata->num_cpus = bpf_num_possible_cpus(); val = malloc(8 * bpf_num_possible_cpus()); err = bpf_iter_bpf_percpu_array_map__load(skel); - if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", - "skeleton load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load")) goto out; /* update map values here */ @@ -894,7 +860,7 @@ static void test_bpf_percpu_array_map(void) } err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -907,7 +873,7 @@ static void test_bpf_percpu_array_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -917,13 +883,9 @@ static void test_bpf_percpu_array_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->key_sum != expected_key, - "key_sum", "got %u expected %u\n", - skel->bss->key_sum, expected_key)) + if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -948,17 +910,16 @@ static void test_bpf_sk_storage_delete(void) char buf[64]; skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.sk_stg_map); sock_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; memset(&linfo, 0, sizeof(linfo)); @@ -971,7 +932,7 @@ static void test_bpf_sk_storage_delete(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -1009,22 +970,21 @@ static void test_bpf_sk_storage_get(void) int sock_fd = -1; skel = bpf_iter_bpf_sk_storage_helpers__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load")) return; sock_fd = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd, 0, "socket")) goto out; err = listen(sock_fd, 1); - if (CHECK(err != 0, "listen", "errno: %d\n", errno)) + if (!ASSERT_OK(err, "listen")) goto close_socket; map_fd = bpf_map__fd(skel->maps.sk_stg_map); err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST); - if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n")) + if (!ASSERT_OK(err, "bpf_map_update_elem")) goto close_socket; do_dummy_read(skel->progs.fill_socket_owner); @@ -1060,15 +1020,14 @@ static void test_bpf_sk_storage_map(void) char buf[64]; skel = bpf_iter_bpf_sk_storage_map__open_and_load(); - if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load")) return; map_fd = bpf_map__fd(skel->maps.sk_stg_map); num_sockets = ARRAY_SIZE(sock_fd); for (i = 0; i < num_sockets; i++) { sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); - if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) + if (!ASSERT_GE(sock_fd[i], 0, "socket")) goto out; val = i + 1; @@ -1076,7 +1035,7 @@ static void test_bpf_sk_storage_map(void) err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, BPF_NOEXIST); - if (CHECK(err, "map_update", "map_update failed\n")) + if (!ASSERT_OK(err, "map_update")) goto out; } @@ -1089,7 +1048,7 @@ static void test_bpf_sk_storage_map(void) goto out; iter_fd = bpf_iter_create(bpf_link__fd(link)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto free_link; /* do some tests */ @@ -1099,14 +1058,10 @@ static void test_bpf_sk_storage_map(void) goto close_iter; /* test results */ - if (CHECK(skel->bss->ipv6_sk_count != num_sockets, - "ipv6_sk_count", "got %u expected %u\n", - skel->bss->ipv6_sk_count, num_sockets)) + if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count")) goto close_iter; - if (CHECK(skel->bss->val_sum != expected_val, - "val_sum", "got %u expected %u\n", - skel->bss->val_sum, expected_val)) + if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum")) goto close_iter; close_iter: @@ -1129,8 +1084,7 @@ static void test_rdonly_buf_out_of_bound(void) struct bpf_link *link; skel = bpf_iter_test_kern5__open_and_load(); - if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", - "skeleton open_and_load failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load")) return; memset(&linfo, 0, sizeof(linfo)); @@ -1149,11 +1103,23 @@ static void test_buf_neg_offset(void) struct bpf_iter_test_kern6 *skel; skel = bpf_iter_test_kern6__open_and_load(); - if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", - "skeleton open_and_load unexpected success\n")) + if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load")) bpf_iter_test_kern6__destroy(skel); } +static void test_link_iter(void) +{ + struct bpf_iter_bpf_link *skel; + + skel = bpf_iter_bpf_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load")) + return; + + do_dummy_read(skel->progs.dump_bpf_link); + + bpf_iter_bpf_link__destroy(skel); +} + #define CMP_BUFFER_SIZE 1024 static char task_vma_output[CMP_BUFFER_SIZE]; static char proc_maps_output[CMP_BUFFER_SIZE]; @@ -1174,8 +1140,6 @@ static void str_strip_first_line(char *str) *dst = '\0'; } -#define min(a, b) ((a) < (b) ? (a) : (b)) - static void test_task_vma(void) { int err, iter_fd = -1, proc_maps_fd = -1; @@ -1184,13 +1148,13 @@ static void test_task_vma(void) char maps_path[64]; skel = bpf_iter_task_vma__open(); - if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n")) + if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open")) return; skel->bss->pid = getpid(); err = bpf_iter_task_vma__load(skel); - if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n")) + if (!ASSERT_OK(err, "bpf_iter_task_vma__load")) goto out; skel->links.proc_maps = bpf_program__attach_iter( @@ -1202,7 +1166,7 @@ static void test_task_vma(void) } iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps)); - if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + if (!ASSERT_GE(iter_fd, 0, "create_iter")) goto out; /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks @@ -1211,10 +1175,10 @@ static void test_task_vma(void) len = 0; while (len < CMP_BUFFER_SIZE) { err = read_fd_into_buffer(iter_fd, task_vma_output + len, - min(read_size, CMP_BUFFER_SIZE - len)); + MIN(read_size, CMP_BUFFER_SIZE - len)); if (!err) break; - if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n")) + if (!ASSERT_GE(err, 0, "read_iter_fd")) goto out; len += err; } @@ -1222,18 +1186,17 @@ static void test_task_vma(void) /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */ snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid); proc_maps_fd = open(maps_path, O_RDONLY); - if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n")) + if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps")) goto out; err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE); - if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n")) + if (!ASSERT_GE(err, 0, "read_prog_maps_fd")) goto out; /* strip and compare the first line of the two files */ str_strip_first_line(task_vma_output); str_strip_first_line(proc_maps_output); - CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output", - "found mismatch\n"); + ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output"); out: close(proc_maps_fd); close(iter_fd); @@ -1252,6 +1215,8 @@ void test_bpf_iter(void) test_bpf_map(); if (test__start_subtest("task")) test_task(); + if (test__start_subtest("task_sleepable")) + test_task_sleepable(); if (test__start_subtest("task_stack")) test_task_stack(); if (test__start_subtest("task_file")) @@ -1300,4 +1265,6 @@ void test_bpf_iter(void) test_rdonly_buf_out_of_bound(); if (test__start_subtest("buf-neg-offset")) test_buf_neg_offset(); + if (test__start_subtest("link-iter")) + test_link_iter(); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c new file mode 100644 index 000000000000..ee725d4d98a5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ +#include <sys/socket.h> +#include <sys/un.h> +#include <test_progs.h> +#include "bpf_iter_setsockopt_unix.skel.h" + +#define NR_CASES 5 + +static int create_unix_socket(struct bpf_iter_setsockopt_unix *skel) +{ + struct sockaddr_un addr = { + .sun_family = AF_UNIX, + .sun_path = "", + }; + socklen_t len; + int fd, err; + + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (!ASSERT_NEQ(fd, -1, "socket")) + return -1; + + len = offsetof(struct sockaddr_un, sun_path); + err = bind(fd, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "bind")) + return -1; + + len = sizeof(addr); + err = getsockname(fd, (struct sockaddr *)&addr, &len); + if (!ASSERT_OK(err, "getsockname")) + return -1; + + memcpy(&skel->bss->sun_path, &addr.sun_path, + len - offsetof(struct sockaddr_un, sun_path)); + + return fd; +} + +static void test_sndbuf(struct bpf_iter_setsockopt_unix *skel, int fd) +{ + socklen_t optlen; + int i, err; + + for (i = 0; i < NR_CASES; i++) { + if (!ASSERT_NEQ(skel->data->sndbuf_getsockopt[i], -1, + "bpf_(get|set)sockopt")) + return; + + err = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, + &(skel->data->sndbuf_setsockopt[i]), + sizeof(skel->data->sndbuf_setsockopt[i])); + if (!ASSERT_OK(err, "setsockopt")) + return; + + optlen = sizeof(skel->bss->sndbuf_getsockopt_expected[i]); + err = getsockopt(fd, SOL_SOCKET, SO_SNDBUF, + &(skel->bss->sndbuf_getsockopt_expected[i]), + &optlen); + if (!ASSERT_OK(err, "getsockopt")) + return; + + if (!ASSERT_EQ(skel->data->sndbuf_getsockopt[i], + skel->bss->sndbuf_getsockopt_expected[i], + "bpf_(get|set)sockopt")) + return; + } +} + +void test_bpf_iter_setsockopt_unix(void) +{ + struct bpf_iter_setsockopt_unix *skel; + int err, unix_fd, iter_fd; + char buf; + + skel = bpf_iter_setsockopt_unix__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + unix_fd = create_unix_socket(skel); + if (!ASSERT_NEQ(unix_fd, -1, "create_unix_server")) + goto destroy; + + skel->links.change_sndbuf = bpf_program__attach_iter(skel->progs.change_sndbuf, NULL); + if (!ASSERT_OK_PTR(skel->links.change_sndbuf, "bpf_program__attach_iter")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(skel->links.change_sndbuf)); + if (!ASSERT_GE(iter_fd, 0, "bpf_iter_create")) + goto destroy; + + while ((err = read(iter_fd, &buf, sizeof(buf))) == -1 && + errno == EAGAIN) + ; + if (!ASSERT_OK(err, "read iter error")) + goto destroy; + + test_sndbuf(skel, unix_fd); +destroy: + bpf_iter_setsockopt_unix__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c new file mode 100644 index 000000000000..a4d0cc9d3367 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <unistd.h> +#include <pthread.h> +#include <sys/mman.h> +#include <stdatomic.h> +#include <test_progs.h> +#include <sys/syscall.h> +#include <linux/module.h> +#include <linux/userfaultfd.h> + +#include "ksym_race.skel.h" +#include "bpf_mod_race.skel.h" +#include "kfunc_call_race.skel.h" + +/* This test crafts a race between btf_try_get_module and do_init_module, and + * checks whether btf_try_get_module handles the invocation for a well-formed + * but uninitialized module correctly. Unless the module has completed its + * initcalls, the verifier should fail the program load and return ENXIO. + * + * userfaultfd is used to trigger a fault in an fmod_ret program, and make it + * sleep, then the BPF program is loaded and the return value from verifier is + * inspected. After this, the userfaultfd is closed so that the module loading + * thread makes forward progress, and fmod_ret injects an error so that the + * module load fails and it is freed. + * + * If the verifier succeeded in loading the supplied program, it will end up + * taking reference to freed module, and trigger a crash when the program fd + * is closed later. This is true for both kfuncs and ksyms. In both cases, + * the crash is triggered inside bpf_prog_free_deferred, when module reference + * is finally released. + */ + +struct test_config { + const char *str_open; + void *(*bpf_open_and_load)(); + void (*bpf_destroy)(void *); +}; + +enum bpf_test_state { + _TS_INVALID, + TS_MODULE_LOAD, + TS_MODULE_LOAD_FAIL, +}; + +static _Atomic enum bpf_test_state state = _TS_INVALID; + +static int sys_finit_module(int fd, const char *param_values, int flags) +{ + return syscall(__NR_finit_module, fd, param_values, flags); +} + +static int sys_delete_module(const char *name, unsigned int flags) +{ + return syscall(__NR_delete_module, name, flags); +} + +static int load_module(const char *mod) +{ + int ret, fd; + + fd = open("bpf_testmod.ko", O_RDONLY); + if (fd < 0) + return fd; + + ret = sys_finit_module(fd, "", 0); + close(fd); + if (ret < 0) + return ret; + return 0; +} + +static void *load_module_thread(void *p) +{ + + if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail")) + atomic_store(&state, TS_MODULE_LOAD); + else + atomic_store(&state, TS_MODULE_LOAD_FAIL); + return p; +} + +static int sys_userfaultfd(int flags) +{ + return syscall(__NR_userfaultfd, flags); +} + +static int test_setup_uffd(void *fault_addr) +{ + struct uffdio_register uffd_register = {}; + struct uffdio_api uffd_api = {}; + int uffd; + + uffd = sys_userfaultfd(O_CLOEXEC); + if (uffd < 0) + return -errno; + + uffd_api.api = UFFD_API; + uffd_api.features = 0; + if (ioctl(uffd, UFFDIO_API, &uffd_api)) { + close(uffd); + return -1; + } + + uffd_register.range.start = (unsigned long)fault_addr; + uffd_register.range.len = 4096; + uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING; + if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) { + close(uffd); + return -1; + } + return uffd; +} + +static void test_bpf_mod_race_config(const struct test_config *config) +{ + void *fault_addr, *skel_fail; + struct bpf_mod_race *skel; + struct uffd_msg uffd_msg; + pthread_t load_mod_thrd; + _Atomic int *blockingp; + int uffd, ret; + + fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) + return; + + if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod")) + goto end_mmap; + + skel = bpf_mod_race__open(); + if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open")) + goto end_module; + + skel->rodata->bpf_mod_race_config.tgid = getpid(); + skel->rodata->bpf_mod_race_config.inject_error = -4242; + skel->rodata->bpf_mod_race_config.fault_addr = fault_addr; + if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load")) + goto end_destroy; + blockingp = (_Atomic int *)&skel->bss->bpf_blocking; + + if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach")) + goto end_destroy; + + uffd = test_setup_uffd(fault_addr); + if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address")) + goto end_destroy; + + if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL), + "load module thread")) + goto end_uffd; + + /* Now, we either fail loading module, or block in bpf prog, spin to find out */ + while (!atomic_load(&state) && !atomic_load(blockingp)) + ; + if (!ASSERT_EQ(state, _TS_INVALID, "module load should block")) + goto end_join; + if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) { + pthread_kill(load_mod_thrd, SIGKILL); + goto end_uffd; + } + + /* We might have set bpf_blocking to 1, but may have not blocked in + * bpf_copy_from_user. Read userfaultfd descriptor to verify that. + */ + if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg), + "read uffd block event")) + goto end_join; + if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault")) + goto end_join; + + /* We know that load_mod_thrd is blocked in the fmod_ret program, the + * module state is still MODULE_STATE_COMING because mod->init hasn't + * returned. This is the time we try to load a program calling kfunc and + * check if we get ENXIO from verifier. + */ + skel_fail = config->bpf_open_and_load(); + ret = errno; + if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) { + /* Close uffd to unblock load_mod_thrd */ + close(uffd); + uffd = -1; + while (atomic_load(blockingp) != 2) + ; + ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); + config->bpf_destroy(skel_fail); + goto end_join; + + } + ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO"); + ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false"); + + close(uffd); + uffd = -1; +end_join: + pthread_join(load_mod_thrd, NULL); + if (uffd < 0) + ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success"); +end_uffd: + if (uffd >= 0) + close(uffd); +end_destroy: + bpf_mod_race__destroy(skel); + ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); +end_module: + sys_delete_module("bpf_testmod", 0); + ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod"); +end_mmap: + munmap(fault_addr, 4096); + atomic_store(&state, _TS_INVALID); +} + +static const struct test_config ksym_config = { + .str_open = "ksym_race__open_and_load", + .bpf_open_and_load = (void *)ksym_race__open_and_load, + .bpf_destroy = (void *)ksym_race__destroy, +}; + +static const struct test_config kfunc_config = { + .str_open = "kfunc_call_race__open_and_load", + .bpf_open_and_load = (void *)kfunc_call_race__open_and_load, + .bpf_destroy = (void *)kfunc_call_race__destroy, +}; + +void serial_test_bpf_mod_race(void) +{ + if (test__start_subtest("ksym (used_btfs UAF)")) + test_bpf_mod_race_config(&ksym_config); + if (test__start_subtest("kfunc (kfunc_btf_tab UAF)")) + test_bpf_mod_race_config(&kfunc_config); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c new file mode 100644 index 000000000000..dd30b1e3a67c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "test_bpf_nf.skel.h" + +enum { + TEST_XDP, + TEST_TC_BPF, +}; + +void test_bpf_nf_ct(int mode) +{ + struct test_bpf_nf *skel; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = test_bpf_nf__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_bpf_nf__open_and_load")) + return; + + if (mode == TEST_XDP) + prog_fd = bpf_program__fd(skel->progs.nf_xdp_ct_test); + else + prog_fd = bpf_program__fd(skel->progs.nf_skb_ct_test); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "bpf_prog_test_run")) + goto end; + + ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); + ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); + ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); + ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); + ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); + ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id"); + ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup"); + ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple"); +end: + test_bpf_nf__destroy(skel); +} + +void test_bpf_nf(void) +{ + if (test__start_subtest("xdp-ct")) + test_bpf_nf_ct(TEST_XDP); + if (test__start_subtest("tc-bpf-ct")) + test_bpf_nf_ct(TEST_TC_BPF); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 8f7a1cef7d87..e9a9a31b2ffe 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -10,8 +10,6 @@ #include "bpf_tcp_nogpl.skel.h" #include "bpf_dctcp_release.skel.h" -#define min(a, b) ((a) < (b) ? (a) : (b)) - #ifndef ENOTSUPP #define ENOTSUPP 524 #endif @@ -53,7 +51,7 @@ static void *server(void *arg) while (bytes < total_bytes && !READ_ONCE(stop)) { nr_sent = send(fd, &batch, - min(total_bytes - bytes, sizeof(batch)), 0); + MIN(total_bytes - bytes, sizeof(batch)), 0); if (nr_sent == -1 && errno == EINTR) continue; if (nr_sent == -1) { @@ -146,7 +144,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) /* recv total_bytes */ while (bytes < total_bytes && !READ_ONCE(stop)) { nr_recv = recv(fd, &batch, - min(total_bytes - bytes, sizeof(batch)), 0); + MIN(total_bytes - bytes, sizeof(batch)), 0); if (nr_recv == -1 && errno == EINTR) continue; if (nr_recv == -1) diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 8ba53acf9eb4..ba5bde53d418 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -8,7 +8,6 @@ #include <linux/filter.h> #include <linux/unistd.h> #include <bpf/bpf.h> -#include <sys/resource.h> #include <libelf.h> #include <gelf.h> #include <string.h> @@ -3939,6 +3938,25 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid component_idx", }, { + .descr = "decl_tag test #15, func, invalid func proto", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_DECL_TAG_ENC(NAME_TBD, 3, 0), /* [2] */ + BTF_FUNC_ENC(NAME_TBD, 8), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0func"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Invalid type_id", +}, +{ .descr = "type_tag test #1", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ @@ -3955,6 +3973,105 @@ static struct btf_raw_test raw_tests[] = { .value_type_id = 1, .max_entries = 1, }, +{ + .descr = "type_tag test #2, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_CONST_ENC(3), /* [2] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #3, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #4, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, +{ + .descr = "type_tag test #5, type tag order", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(1), /* [3] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, +}, +{ + .descr = "type_tag test #6, type tag order", + .raw_types = { + BTF_PTR_ENC(2), /* [1] */ + BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */ + BTF_CONST_ENC(4), /* [3] */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */ + BTF_PTR_ENC(6), /* [5] */ + BTF_CONST_ENC(2), /* [6] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0tag"), + .map_type = BPF_MAP_TYPE_ARRAY, + .map_name = "tag_type_check_btf", + .key_size = sizeof(int), + .value_size = 4, + .key_type_id = 1, + .value_type_id = 1, + .max_entries = 1, + .btf_load_err = true, + .err_str = "Type tags don't precede modifiers", +}, }; /* struct btf_raw_test raw_tests[] */ @@ -4560,6 +4677,8 @@ static void do_test_file(unsigned int test_num) has_btf_ext = btf_ext != NULL; btf_ext__free(btf_ext); + /* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */ + libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS); obj = bpf_object__open(test->file); err = libbpf_get_error(obj); if (CHECK(err, "obj: %d", err)) @@ -4684,6 +4803,8 @@ skip: fprintf(stderr, "OK"); done: + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + btf__free(btf); free(func_info); bpf_object__close(obj); @@ -6533,7 +6654,7 @@ done: static void do_test_info_raw(unsigned int test_num) { const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1]; - unsigned int raw_btf_size, linfo_str_off, linfo_size; + unsigned int raw_btf_size, linfo_str_off, linfo_size = 0; int btf_fd = -1, prog_fd = -1, err = 0; void *raw_btf, *patched_linfo = NULL; const char *ret_next_str; diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 9e26903f9170..5fce7008d1ff 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -148,22 +148,38 @@ static void test_btf_dump_incremental(void) /* First, generate BTF corresponding to the following C code: * - * enum { VAL = 1 }; + * enum x; + * + * enum x { X = 1 }; + * + * enum { Y = 1 }; + * + * struct s; * * struct s { int x; }; * */ + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 1, "enum_declaration_id"); + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 2, "named_enum_id"); + err = btf__add_enum_value(btf, "X", 1); + ASSERT_OK(err, "named_enum_val_ok"); + id = btf__add_enum(btf, NULL, 4); - ASSERT_EQ(id, 1, "enum_id"); - err = btf__add_enum_value(btf, "VAL", 1); - ASSERT_OK(err, "enum_val_ok"); + ASSERT_EQ(id, 3, "anon_enum_id"); + err = btf__add_enum_value(btf, "Y", 1); + ASSERT_OK(err, "anon_enum_val_ok"); id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); - ASSERT_EQ(id, 2, "int_id"); + ASSERT_EQ(id, 4, "int_id"); + + id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT); + ASSERT_EQ(id, 5, "fwd_id"); id = btf__add_struct(btf, "s", 4); - ASSERT_EQ(id, 3, "struct_id"); - err = btf__add_field(btf, "x", 2, 0, 0); + ASSERT_EQ(id, 6, "struct_id"); + err = btf__add_field(btf, "x", 4, 0, 0); ASSERT_OK(err, "field_ok"); for (i = 1; i < btf__type_cnt(btf); i++) { @@ -173,11 +189,20 @@ static void test_btf_dump_incremental(void) fflush(dump_buf_file); dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ + ASSERT_STREQ(dump_buf, +"enum x;\n" +"\n" +"enum x {\n" +" X = 1,\n" +"};\n" +"\n" "enum {\n" -" VAL = 1,\n" +" Y = 1,\n" "};\n" "\n" +"struct s;\n" +"\n" "struct s {\n" " int x;\n" "};\n\n", "c_dump1"); @@ -199,10 +224,12 @@ static void test_btf_dump_incremental(void) fseek(dump_buf_file, 0, SEEK_SET); id = btf__add_struct(btf, "s", 4); - ASSERT_EQ(id, 4, "struct_id"); - err = btf__add_field(btf, "x", 1, 0, 0); + ASSERT_EQ(id, 7, "struct_id"); + err = btf__add_field(btf, "x", 2, 0, 0); + ASSERT_OK(err, "field_ok"); + err = btf__add_field(btf, "y", 3, 32, 0); ASSERT_OK(err, "field_ok"); - err = btf__add_field(btf, "s", 3, 32, 0); + err = btf__add_field(btf, "s", 6, 64, 0); ASSERT_OK(err, "field_ok"); for (i = 1; i < btf__type_cnt(btf); i++) { @@ -214,9 +241,10 @@ static void test_btf_dump_incremental(void) dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ ASSERT_STREQ(dump_buf, "struct s___2 {\n" +" enum x x;\n" " enum {\n" -" VAL___2 = 1,\n" -" } x;\n" +" Y___2 = 1,\n" +" } y;\n" " struct s s;\n" "};\n\n" , "c_dump1"); diff --git a/tools/testing/selftests/bpf/prog_tests/btf_tag.c b/tools/testing/selftests/bpf/prog_tests/btf_tag.c index 88d63e23e35f..071430cd54de 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_tag.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_tag.c @@ -1,19 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ #include <test_progs.h> -#include "btf_decl_tag.skel.h" +#include <bpf/btf.h> +#include "test_btf_decl_tag.skel.h" /* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */ struct btf_type_tag_test { int **p; }; #include "btf_type_tag.skel.h" +#include "btf_type_tag_user.skel.h" +#include "btf_type_tag_percpu.skel.h" static void test_btf_decl_tag(void) { - struct btf_decl_tag *skel; + struct test_btf_decl_tag *skel; - skel = btf_decl_tag__open_and_load(); + skel = test_btf_decl_tag__open_and_load(); if (!ASSERT_OK_PTR(skel, "btf_decl_tag")) return; @@ -22,7 +25,7 @@ static void test_btf_decl_tag(void) test__skip(); } - btf_decl_tag__destroy(skel); + test_btf_decl_tag__destroy(skel); } static void test_btf_type_tag(void) @@ -41,10 +44,206 @@ static void test_btf_type_tag(void) btf_type_tag__destroy(skel); } +/* loads vmlinux_btf as well as module_btf. If the caller passes NULL as + * module_btf, it will not load module btf. + * + * Returns 0 on success. + * Return -1 On error. In case of error, the loaded btf will be freed and the + * input parameters will be set to pointing to NULL. + */ +static int load_btfs(struct btf **vmlinux_btf, struct btf **module_btf, + bool needs_vmlinux_tag) +{ + const char *module_name = "bpf_testmod"; + __s32 type_id; + + if (!env.has_testmod) { + test__skip(); + return -1; + } + + *vmlinux_btf = btf__load_vmlinux_btf(); + if (!ASSERT_OK_PTR(*vmlinux_btf, "could not load vmlinux BTF")) + return -1; + + if (!needs_vmlinux_tag) + goto load_module_btf; + + /* skip the test if the vmlinux does not have __user tags */ + type_id = btf__find_by_name_kind(*vmlinux_btf, "user", BTF_KIND_TYPE_TAG); + if (type_id <= 0) { + printf("%s:SKIP: btf_type_tag attribute not in vmlinux btf", __func__); + test__skip(); + goto free_vmlinux_btf; + } + +load_module_btf: + /* skip loading module_btf, if not requested by caller */ + if (!module_btf) + return 0; + + *module_btf = btf__load_module_btf(module_name, *vmlinux_btf); + if (!ASSERT_OK_PTR(*module_btf, "could not load module BTF")) + goto free_vmlinux_btf; + + /* skip the test if the module does not have __user tags */ + type_id = btf__find_by_name_kind(*module_btf, "user", BTF_KIND_TYPE_TAG); + if (type_id <= 0) { + printf("%s:SKIP: btf_type_tag attribute not in %s", __func__, module_name); + test__skip(); + goto free_module_btf; + } + + return 0; + +free_module_btf: + btf__free(*module_btf); +free_vmlinux_btf: + btf__free(*vmlinux_btf); + + *vmlinux_btf = NULL; + if (module_btf) + *module_btf = NULL; + return -1; +} + +static void test_btf_type_tag_mod_user(bool load_test_user1) +{ + struct btf *vmlinux_btf = NULL, *module_btf = NULL; + struct btf_type_tag_user *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + + skel = btf_type_tag_user__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_sys_getsockname, false); + if (load_test_user1) + bpf_program__set_autoload(skel->progs.test_user2, false); + else + bpf_program__set_autoload(skel->progs.test_user1, false); + + err = btf_type_tag_user__load(skel); + ASSERT_ERR(err, "btf_type_tag_user"); + + btf_type_tag_user__destroy(skel); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_user(void) +{ + struct btf_type_tag_user *skel; + struct btf *vmlinux_btf = NULL; + int err; + + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) + return; + + skel = btf_type_tag_user__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_user")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_user2, false); + bpf_program__set_autoload(skel->progs.test_user1, false); + + err = btf_type_tag_user__load(skel); + ASSERT_ERR(err, "btf_type_tag_user"); + + btf_type_tag_user__destroy(skel); + +cleanup: + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_mod_percpu(bool load_test_percpu1) +{ + struct btf *vmlinux_btf, *module_btf; + struct btf_type_tag_percpu *skel; + int err; + + if (load_btfs(&vmlinux_btf, &module_btf, /*needs_vmlinux_tag=*/false)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + if (load_test_percpu1) + bpf_program__set_autoload(skel->progs.test_percpu2, false); + else + bpf_program__set_autoload(skel->progs.test_percpu1, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu"); + + btf_type_tag_percpu__destroy(skel); + +cleanup: + btf__free(module_btf); + btf__free(vmlinux_btf); +} + +static void test_btf_type_tag_vmlinux_percpu(bool load_test) +{ + struct btf_type_tag_percpu *skel; + struct btf *vmlinux_btf = NULL; + int err; + + if (load_btfs(&vmlinux_btf, NULL, /*needs_vmlinux_tag=*/true)) + return; + + skel = btf_type_tag_percpu__open(); + if (!ASSERT_OK_PTR(skel, "btf_type_tag_percpu")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.test_percpu2, false); + bpf_program__set_autoload(skel->progs.test_percpu1, false); + if (load_test) { + bpf_program__set_autoload(skel->progs.test_percpu_helper, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_ERR(err, "btf_type_tag_percpu_load"); + } else { + bpf_program__set_autoload(skel->progs.test_percpu_load, false); + + err = btf_type_tag_percpu__load(skel); + ASSERT_OK(err, "btf_type_tag_percpu_helper"); + } + + btf_type_tag_percpu__destroy(skel); + +cleanup: + btf__free(vmlinux_btf); +} + void test_btf_tag(void) { if (test__start_subtest("btf_decl_tag")) test_btf_decl_tag(); if (test__start_subtest("btf_type_tag")) test_btf_type_tag(); + + if (test__start_subtest("btf_type_tag_user_mod1")) + test_btf_type_tag_mod_user(true); + if (test__start_subtest("btf_type_tag_user_mod2")) + test_btf_type_tag_mod_user(false); + if (test__start_subtest("btf_type_tag_sys_user_vmlinux")) + test_btf_type_tag_vmlinux_user(); + + if (test__start_subtest("btf_type_tag_percpu_mod1")) + test_btf_type_tag_mod_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_mod2")) + test_btf_type_tag_mod_percpu(false); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_load")) + test_btf_type_tag_vmlinux_percpu(true); + if (test__start_subtest("btf_type_tag_percpu_vmlinux_helper")) + test_btf_type_tag_vmlinux_percpu(false); } diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c index 858916d11e2e..9367bd2f0ae1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c @@ -14,7 +14,7 @@ static int prog_load(void) BPF_MOV64_IMM(BPF_REG_0, 1), /* r0 = 1 */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c index d3e8f729c623..db0b7bac78d1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c @@ -63,7 +63,7 @@ static int prog_load_cnt(int verdict, int val) BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); int ret; ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, @@ -194,14 +194,14 @@ void serial_test_cgroup_attach_multi(void) attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE; attach_opts.replace_prog_fd = allow_prog[0]; - if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "fail_prog_replace_override", "unexpected success\n")) goto err; CHECK_FAIL(errno != EINVAL); attach_opts.flags = BPF_F_REPLACE; - if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "fail_prog_replace_no_multi", "unexpected success\n")) goto err; @@ -209,7 +209,7 @@ void serial_test_cgroup_attach_multi(void) attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE; attach_opts.replace_prog_fd = -1; - if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "fail_prog_replace_bad_fd", "unexpected success\n")) goto err; @@ -217,7 +217,7 @@ void serial_test_cgroup_attach_multi(void) /* replacing a program that is not attached to cgroup should fail */ attach_opts.replace_prog_fd = allow_prog[3]; - if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(!bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "fail_prog_replace_no_ent", "unexpected success\n")) goto err; @@ -225,14 +225,14 @@ void serial_test_cgroup_attach_multi(void) /* replace 1st from the top program */ attach_opts.replace_prog_fd = allow_prog[0]; - if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "prog_replace", "errno=%d\n", errno)) goto err; /* replace program with itself */ attach_opts.replace_prog_fd = allow_prog[6]; - if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, + if (CHECK(bpf_prog_attach_opts(allow_prog[6], cg1, BPF_CGROUP_INET_EGRESS, &attach_opts), "prog_replace", "errno=%d\n", errno)) goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c index 356547e849e2..9421a5b7f4e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c @@ -16,7 +16,7 @@ static int prog_load(int verdict) BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ BPF_EXIT_INSN(), }; - size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); + size_t insns_cnt = ARRAY_SIZE(prog); return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, prog, insns_cnt, "GPL", 0, diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c new file mode 100644 index 000000000000..0b47c3c000c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2021 Google LLC. + */ + +#include <test_progs.h> +#include <cgroup_helpers.h> +#include <network_helpers.h> + +#include "cgroup_getset_retval_setsockopt.skel.h" +#include "cgroup_getset_retval_getsockopt.skel.h" + +#define SOL_CUSTOM 0xdeadbeef + +static int zero; + +static void test_setsockopt_set(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, assert that + * we actually get that error when we run setsockopt() + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, and one that gets the + * previously set errno. Assert that we get the same errno back. + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that gets the previously set errno. + * Assert that, without anything setting one, we get 0. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_OK(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_get_retval = NULL, *link_set_eunatch = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that gets the previously set errno, and then + * one that sets the errno to EUNATCH. Assert that the get does not + * see EUNATCH set later, and does not prevent EUNATCH from being set. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + bpf_link__destroy(link_set_eunatch); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_set_eisconn = NULL; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, + * and then one that gets the exported errno. Assert both the syscall + * and the helper sees the last set errno. + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EISCONN, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EISCONN, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_set_eisconn); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_legacy_eperm = NULL, *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that return a reject without setting errno + * (legacy reject), and one that gets the errno. Assert that for + * backward compatibility the syscall result in EPERM, and this + * is also visible to the helper. + */ + link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, + cgroup_fd); + if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EPERM, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 2, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EPERM, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_legacy_eperm); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_setsockopt *obj; + struct bpf_link *link_set_eunatch = NULL, *link_legacy_eperm = NULL; + struct bpf_link *link_get_retval = NULL; + + obj = cgroup_getset_retval_setsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach setsockopt that sets EUNATCH, then one that return a reject + * without setting errno, and then one that gets the exported errno. + * Assert both the syscall and the helper's errno are unaffected by + * the second prog (i.e. legacy rejects does not override the errno + * to EPERM). + */ + link_set_eunatch = bpf_program__attach_cgroup(obj->progs.set_eunatch, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eunatch, "cg-attach-set_eunatch")) + goto close_bpf_object; + link_legacy_eperm = bpf_program__attach_cgroup(obj->progs.legacy_eperm, + cgroup_fd); + if (!ASSERT_OK_PTR(link_legacy_eperm, "cg-attach-legacy_eperm")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR, + &zero, sizeof(int)), "setsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EUNATCH, "setsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EUNATCH, "retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eunatch); + bpf_link__destroy(link_legacy_eperm); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_setsockopt__destroy(obj); +} + +static void test_getsockopt_get(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_get_retval = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that gets previously set errno. Assert that the + * error from kernel is in both ctx_retval_value and retval_value. + */ + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EOPNOTSUPP, "getsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, -EOPNOTSUPP, "retval_value")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->ctx_retval_value, -EOPNOTSUPP, "ctx_retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_override(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_set_eisconn = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that sets retval to -EISCONN. Assert that this + * overrides the value from kernel. + */ + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + + if (!ASSERT_ERR(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + if (!ASSERT_EQ(errno, EISCONN, "getsockopt-errno")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 1, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eisconn); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) +{ + struct cgroup_getset_retval_getsockopt *obj; + struct bpf_link *link_set_eisconn = NULL, *link_clear_retval = NULL; + struct bpf_link *link_get_retval = NULL; + int buf; + socklen_t optlen = sizeof(buf); + + obj = cgroup_getset_retval_getsockopt__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) + return; + + /* Attach getsockopt that sets retval to -EISCONN, and one that clears + * ctx retval. Assert that the clearing ctx retval is synced to helper + * and clears any errors both from kernel and BPF.. + */ + link_set_eisconn = bpf_program__attach_cgroup(obj->progs.set_eisconn, + cgroup_fd); + if (!ASSERT_OK_PTR(link_set_eisconn, "cg-attach-set_eisconn")) + goto close_bpf_object; + link_clear_retval = bpf_program__attach_cgroup(obj->progs.clear_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_clear_retval, "cg-attach-clear_retval")) + goto close_bpf_object; + link_get_retval = bpf_program__attach_cgroup(obj->progs.get_retval, + cgroup_fd); + if (!ASSERT_OK_PTR(link_get_retval, "cg-attach-get_retval")) + goto close_bpf_object; + + if (!ASSERT_OK(getsockopt(sock_fd, SOL_CUSTOM, 0, + &buf, &optlen), "getsockopt")) + goto close_bpf_object; + + if (!ASSERT_EQ(obj->bss->invocations, 3, "invocations")) + goto close_bpf_object; + if (!ASSERT_FALSE(obj->bss->assertion_error, "assertion_error")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->retval_value, 0, "retval_value")) + goto close_bpf_object; + if (!ASSERT_EQ(obj->bss->ctx_retval_value, 0, "ctx_retval_value")) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(link_set_eisconn); + bpf_link__destroy(link_clear_retval); + bpf_link__destroy(link_get_retval); + + cgroup_getset_retval_getsockopt__destroy(obj); +} + +void test_cgroup_getset_retval(void) +{ + int cgroup_fd = -1; + int sock_fd = -1; + + cgroup_fd = test__join_cgroup("/cgroup_getset_retval"); + if (!ASSERT_GE(cgroup_fd, 0, "cg-create")) + goto close_fd; + + sock_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0); + if (!ASSERT_GE(sock_fd, 0, "start-server")) + goto close_fd; + + if (test__start_subtest("setsockopt-set")) + test_setsockopt_set(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-set_and_get")) + test_setsockopt_set_and_get(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-default_zero")) + test_setsockopt_default_zero(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-default_zero_and_set")) + test_setsockopt_default_zero_and_set(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-override")) + test_setsockopt_override(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-legacy_eperm")) + test_setsockopt_legacy_eperm(cgroup_fd, sock_fd); + + if (test__start_subtest("setsockopt-legacy_no_override")) + test_setsockopt_legacy_no_override(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-get")) + test_getsockopt_get(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-override")) + test_getsockopt_override(cgroup_fd, sock_fd); + + if (test__start_subtest("getsockopt-retval_sync")) + test_getsockopt_retval_sync(cgroup_fd, sock_fd); + +close_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index f73e6e36b74d..12f4395f18b3 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -79,28 +79,21 @@ static void test_check_mtu_run_xdp(struct test_check_mtu *skel, struct bpf_program *prog, __u32 mtu_expect) { - const char *prog_name = bpf_program__name(prog); int retval_expect = XDP_PASS; __u32 mtu_result = 0; char buf[256] = {}; - int err; - struct bpf_prog_test_run_attr tattr = { + int err, prog_fd = bpf_program__fd(prog); + LIBBPF_OPTS(bpf_test_run_opts, topts, .repeat = 1, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .data_out = buf, .data_size_out = sizeof(buf), - .prog_fd = bpf_program__fd(prog), - }; - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != 0, "bpf_prog_test_run", - "prog_name:%s (err %d errno %d retval %d)\n", - prog_name, err, errno, tattr.retval); + ); - CHECK(tattr.retval != retval_expect, "retval", - "progname:%s unexpected retval=%d expected=%d\n", - prog_name, tattr.retval, retval_expect); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, retval_expect, "retval"); /* Extract MTU that BPF-prog got */ mtu_result = skel->bss->global_bpf_mtu_xdp; @@ -139,28 +132,21 @@ static void test_check_mtu_run_tc(struct test_check_mtu *skel, struct bpf_program *prog, __u32 mtu_expect) { - const char *prog_name = bpf_program__name(prog); int retval_expect = BPF_OK; __u32 mtu_result = 0; char buf[256] = {}; - int err; - struct bpf_prog_test_run_attr tattr = { - .repeat = 1, + int err, prog_fd = bpf_program__fd(prog); + LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .data_out = buf, .data_size_out = sizeof(buf), - .prog_fd = bpf_program__fd(prog), - }; - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != 0, "bpf_prog_test_run", - "prog_name:%s (err %d errno %d retval %d)\n", - prog_name, err, errno, tattr.retval); + .repeat = 1, + ); - CHECK(tattr.retval != retval_expect, "retval", - "progname:%s unexpected retval=%d expected=%d\n", - prog_name, tattr.retval, retval_expect); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, retval_expect, "retval"); /* Extract MTU that BPF-prog got */ mtu_result = skel->bss->global_bpf_mtu_tc; diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c index e075d03ab630..224f016b0a53 100644 --- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c @@ -161,7 +161,7 @@ static socklen_t prepare_addr(struct sockaddr_storage *addr, int family) } } -static bool was_decapsulated(struct bpf_prog_test_run_attr *tattr) +static bool was_decapsulated(struct bpf_test_run_opts *tattr) { return tattr->data_size_out < tattr->data_size_in; } @@ -367,12 +367,12 @@ static void close_fds(int *fds, int n) static void test_cls_redirect_common(struct bpf_program *prog) { - struct bpf_prog_test_run_attr tattr = {}; + LIBBPF_OPTS(bpf_test_run_opts, tattr); int families[] = { AF_INET, AF_INET6 }; struct sockaddr_storage ss; struct sockaddr *addr; socklen_t slen; - int i, j, err; + int i, j, err, prog_fd; int servers[__NR_KIND][ARRAY_SIZE(families)] = {}; int conns[__NR_KIND][ARRAY_SIZE(families)] = {}; struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)]; @@ -394,7 +394,7 @@ static void test_cls_redirect_common(struct bpf_program *prog) goto cleanup; } - tattr.prog_fd = bpf_program__fd(prog); + prog_fd = bpf_program__fd(prog); for (i = 0; i < ARRAY_SIZE(tests); i++) { struct test_cfg *test = &tests[i]; @@ -415,7 +415,7 @@ static void test_cls_redirect_common(struct bpf_program *prog) if (CHECK_FAIL(!tattr.data_size_in)) continue; - err = bpf_prog_test_run_xattr(&tattr); + err = bpf_prog_test_run_opts(prog_fd, &tattr); if (CHECK_FAIL(err)) continue; diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c index 1dfe14ff6aa4..f2ce4fd1cdae 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_autosize.c +++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c @@ -167,7 +167,7 @@ void test_core_autosize(void) if (!ASSERT_OK_PTR(bss_map, "bss_map_find")) goto cleanup; - err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out); + err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0); if (!ASSERT_OK(err, "bss_lookup")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern.c b/tools/testing/selftests/bpf/prog_tests/core_kern.c index 561c5185d886..6a5a1c019a5d 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_kern.c +++ b/tools/testing/selftests/bpf/prog_tests/core_kern.c @@ -7,8 +7,22 @@ void test_core_kern_lskel(void) { struct core_kern_lskel *skel; + int link_fd; skel = core_kern_lskel__open_and_load(); - ASSERT_OK_PTR(skel, "open_and_load"); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + link_fd = core_kern_lskel__core_relo_proto__attach(skel); + if (!ASSERT_GT(link_fd, 0, "attach(core_relo_proto)")) + goto cleanup; + + /* trigger tracepoints */ + usleep(1); + ASSERT_TRUE(skel->bss->proto_out[0], "bpf_core_type_exists"); + ASSERT_FALSE(skel->bss->proto_out[1], "!bpf_core_type_exists"); + ASSERT_TRUE(skel->bss->proto_out[2], "bpf_core_type_exists. nested"); + +cleanup: core_kern_lskel__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c new file mode 100644 index 000000000000..04cc145bc26a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "test_progs.h" +#include "core_kern_overflow.lskel.h" + +void test_core_kern_overflow_lskel(void) +{ + struct core_kern_overflow_lskel *skel; + + skel = core_kern_overflow_lskel__open_and_load(); + if (!ASSERT_NULL(skel, "open_and_load")) + core_kern_overflow_lskel__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index b8bdd1c3efca..3712dfe1be59 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -2,6 +2,7 @@ #include <test_progs.h> #include "progs/core_reloc_types.h" #include "bpf_testmod/bpf_testmod.h" +#include <linux/limits.h> #include <sys/mman.h> #include <sys/syscall.h> #include <bpf/btf.h> @@ -276,13 +277,21 @@ static int duration = 0; #define SIZE_OUTPUT_DATA(type) \ STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ .int_sz = sizeof(((type *)0)->int_field), \ + .int_off = offsetof(type, int_field), \ .struct_sz = sizeof(((type *)0)->struct_field), \ + .struct_off = offsetof(type, struct_field), \ .union_sz = sizeof(((type *)0)->union_field), \ + .union_off = offsetof(type, union_field), \ .arr_sz = sizeof(((type *)0)->arr_field), \ - .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ + .arr_off = offsetof(type, arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[1]), \ + .arr_elem_off = offsetof(type, arr_field[1]), \ .ptr_sz = 8, /* always 8-byte pointer for BPF */ \ + .ptr_off = offsetof(type, ptr_field), \ .enum_sz = sizeof(((type *)0)->enum_field), \ + .enum_off = offsetof(type, enum_field), \ .float_sz = sizeof(((type *)0)->float_field), \ + .float_off = offsetof(type, float_field), \ } #define SIZE_CASE(name) { \ @@ -511,7 +520,7 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test) } -static struct core_reloc_test_case test_cases[] = { +static const struct core_reloc_test_case test_cases[] = { /* validate we can find kernel image and use its BTF for relocs */ { .case_name = "kernel", @@ -713,9 +722,10 @@ static struct core_reloc_test_case test_cases[] = { }), BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), - /* size relocation checks */ + /* field size and offset relocation checks */ SIZE_CASE(size), SIZE_CASE(size___diff_sz), + SIZE_CASE(size___diff_offs), SIZE_ERR_CASE(size___err_ambiguous), /* validate type existence and size relocations */ @@ -836,13 +846,27 @@ static size_t roundup_page(size_t sz) return (sz + page_size - 1) / page_size * page_size; } -void test_core_reloc(void) +static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objpath) +{ + char command[4096]; + int n; + + n = snprintf(command, sizeof(command), + "./bpftool gen min_core_btf %s %s %s", + src_btf, dst_btf, objpath); + if (n < 0 || n >= sizeof(command)) + return -1; + + return system(command); +} + +static void run_core_reloc_tests(bool use_btfgen) { const size_t mmap_sz = roundup_page(sizeof(struct data)); DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); - struct core_reloc_test_case *test_case; + struct core_reloc_test_case *test_case, test_case_copy; const char *tp_name, *probe_name; - int err, i, equal; + int err, i, equal, fd; struct bpf_link *link = NULL; struct bpf_map *data_map; struct bpf_program *prog; @@ -854,7 +878,11 @@ void test_core_reloc(void) my_pid_tgid = getpid() | ((uint64_t)syscall(SYS_gettid) << 32); for (i = 0; i < ARRAY_SIZE(test_cases); i++) { - test_case = &test_cases[i]; + char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; + + test_case_copy = test_cases[i]; + test_case = &test_case_copy; + if (!test__start_subtest(test_case->case_name)) continue; @@ -863,6 +891,26 @@ void test_core_reloc(void) continue; } + /* generate a "minimal" BTF file and use it as source */ + if (use_btfgen) { + + if (!test_case->btf_src_file || test_case->fails) { + test__skip(); + continue; + } + + fd = mkstemp(btf_file); + if (!ASSERT_GE(fd, 0, "btf_tmp")) + continue; + close(fd); /* we only need the path */ + err = run_btfgen(test_case->btf_src_file, btf_file, + test_case->bpf_obj_file); + if (!ASSERT_OK(err, "run_btfgen")) + continue; + + test_case->btf_src_file = btf_file; + } + if (test_case->setup) { err = test_case->setup(test_case); if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err)) @@ -872,7 +920,7 @@ void test_core_reloc(void) if (test_case->btf_src_file) { err = access(test_case->btf_src_file, R_OK); if (!ASSERT_OK(err, "btf_src_file")) - goto cleanup; + continue; } open_opts.btf_custom_path = test_case->btf_src_file; @@ -954,8 +1002,20 @@ cleanup: CHECK_FAIL(munmap(mmap_data, mmap_sz)); mmap_data = NULL; } + if (use_btfgen) + remove(test_case->btf_src_file); bpf_link__destroy(link); link = NULL; bpf_object__close(obj); } } + +void test_core_reloc(void) +{ + run_core_reloc_tests(false); +} + +void test_core_reloc_btfgen(void) +{ + run_core_reloc_tests(true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c index 6acb0e94d4d7..4a2c256c8db6 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_retro.c +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -6,31 +6,32 @@ void test_core_retro(void) { - int err, zero = 0, res, duration = 0, my_pid = getpid(); + int err, zero = 0, res, my_pid = getpid(); struct test_core_retro *skel; /* load program */ skel = test_core_retro__open_and_load(); - if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + if (!ASSERT_OK_PTR(skel, "skel_load")) goto out_close; - err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0); - if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno)) + err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero), + &my_pid, sizeof(my_pid), 0); + if (!ASSERT_OK(err, "map_update")) goto out_close; /* attach probe */ err = test_core_retro__attach(skel); - if (CHECK(err, "attach_kprobe", "err %d\n", err)) + if (!ASSERT_OK(err, "attach_kprobe")) goto out_close; /* trigger */ usleep(1); - err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); - if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) + err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0); + if (!ASSERT_OK(err, "map_lookup")) goto out_close; - CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid); + ASSERT_EQ(res, my_pid, "pid_check"); out_close: test_core_retro__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c new file mode 100644 index 000000000000..b2dfc5954aea --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "test_custom_sec_handlers.skel.h" + +#define COOKIE_ABC1 1 +#define COOKIE_ABC2 2 +#define COOKIE_CUSTOM 3 +#define COOKIE_FALLBACK 4 +#define COOKIE_KPROBE 5 + +static int custom_setup_prog(struct bpf_program *prog, long cookie) +{ + if (cookie == COOKIE_ABC1) + bpf_program__set_autoload(prog, false); + + return 0; +} + +static int custom_prepare_load_prog(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) +{ + if (cookie == COOKIE_FALLBACK) + opts->prog_flags |= BPF_F_SLEEPABLE; + else if (cookie == COOKIE_ABC1) + ASSERT_FALSE(true, "unexpected preload for abc"); + + return 0; +} + +static int custom_attach_prog(const struct bpf_program *prog, long cookie, + struct bpf_link **link) +{ + switch (cookie) { + case COOKIE_ABC2: + *link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + return libbpf_get_error(*link); + case COOKIE_CUSTOM: + *link = bpf_program__attach_tracepoint(prog, "syscalls", "sys_enter_nanosleep"); + return libbpf_get_error(*link); + case COOKIE_KPROBE: + case COOKIE_FALLBACK: + /* no auto-attach for SEC("xyz") and SEC("kprobe") */ + *link = NULL; + return 0; + default: + ASSERT_FALSE(true, "unexpected cookie"); + return -EINVAL; + } +} + +static int abc1_id; +static int abc2_id; +static int custom_id; +static int fallback_id; +static int kprobe_id; + +__attribute__((constructor)) +static void register_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, abc1_opts, + .cookie = COOKIE_ABC1, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = NULL, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, abc2_opts, + .cookie = COOKIE_ABC2, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + LIBBPF_OPTS(libbpf_prog_handler_opts, custom_opts, + .cookie = COOKIE_CUSTOM, + .prog_setup_fn = NULL, + .prog_prepare_load_fn = NULL, + .prog_attach_fn = custom_attach_prog, + ); + + abc1_id = libbpf_register_prog_handler("abc", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc1_opts); + abc2_id = libbpf_register_prog_handler("abc/", BPF_PROG_TYPE_RAW_TRACEPOINT, 0, &abc2_opts); + custom_id = libbpf_register_prog_handler("custom+", BPF_PROG_TYPE_TRACEPOINT, 0, &custom_opts); +} + +__attribute__((destructor)) +static void unregister_sec_handlers(void) +{ + libbpf_unregister_prog_handler(abc1_id); + libbpf_unregister_prog_handler(abc2_id); + libbpf_unregister_prog_handler(custom_id); +} + +void test_custom_sec_handlers(void) +{ + LIBBPF_OPTS(libbpf_prog_handler_opts, opts, + .prog_setup_fn = custom_setup_prog, + .prog_prepare_load_fn = custom_prepare_load_prog, + .prog_attach_fn = custom_attach_prog, + ); + struct test_custom_sec_handlers* skel; + int err; + + ASSERT_GT(abc1_id, 0, "abc1_id"); + ASSERT_GT(abc2_id, 0, "abc2_id"); + ASSERT_GT(custom_id, 0, "custom_id"); + + /* override libbpf's handle of SEC("kprobe/...") but also allow pure + * SEC("kprobe") due to "kprobe+" specifier. Register it as + * TRACEPOINT, just for fun. + */ + opts.cookie = COOKIE_KPROBE; + kprobe_id = libbpf_register_prog_handler("kprobe+", BPF_PROG_TYPE_TRACEPOINT, 0, &opts); + /* fallback treats everything as BPF_PROG_TYPE_SYSCALL program to test + * setting custom BPF_F_SLEEPABLE bit in preload handler + */ + opts.cookie = COOKIE_FALLBACK; + fallback_id = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_SYSCALL, 0, &opts); + + if (!ASSERT_GT(fallback_id, 0, "fallback_id") /* || !ASSERT_GT(kprobe_id, 0, "kprobe_id")*/) { + if (fallback_id > 0) + libbpf_unregister_prog_handler(fallback_id); + if (kprobe_id > 0) + libbpf_unregister_prog_handler(kprobe_id); + return; + } + + /* open skeleton and validate assumptions */ + skel = test_custom_sec_handlers__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__type(skel->progs.abc1), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc1_type"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.abc1), "abc1_autoload"); + + ASSERT_EQ(bpf_program__type(skel->progs.abc2), BPF_PROG_TYPE_RAW_TRACEPOINT, "abc2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom1), BPF_PROG_TYPE_TRACEPOINT, "custom1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.custom2), BPF_PROG_TYPE_TRACEPOINT, "custom2_type"); + ASSERT_EQ(bpf_program__type(skel->progs.kprobe1), BPF_PROG_TYPE_TRACEPOINT, "kprobe1_type"); + ASSERT_EQ(bpf_program__type(skel->progs.xyz), BPF_PROG_TYPE_SYSCALL, "xyz_type"); + + skel->rodata->my_pid = getpid(); + + /* now attempt to load everything */ + err = test_custom_sec_handlers__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + /* now try to auto-attach everything */ + err = test_custom_sec_handlers__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + skel->links.xyz = bpf_program__attach(skel->progs.kprobe1); + ASSERT_EQ(errno, EOPNOTSUPP, "xyz_attach_err"); + ASSERT_ERR_PTR(skel->links.xyz, "xyz_attach"); + + /* trigger programs */ + usleep(1); + + /* SEC("abc") is set to not auto-loaded */ + ASSERT_FALSE(skel->bss->abc1_called, "abc1_called"); + ASSERT_TRUE(skel->bss->abc2_called, "abc2_called"); + ASSERT_TRUE(skel->bss->custom1_called, "custom1_called"); + ASSERT_TRUE(skel->bss->custom2_called, "custom2_called"); + /* SEC("kprobe") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->kprobe1_called, "kprobe1_called"); + /* SEC("xyz") shouldn't be auto-attached */ + ASSERT_FALSE(skel->bss->xyz_called, "xyz_called"); + +cleanup: + test_custom_sec_handlers__destroy(skel); + + ASSERT_OK(libbpf_unregister_prog_handler(fallback_id), "unregister_fallback"); + ASSERT_OK(libbpf_unregister_prog_handler(kprobe_id), "unregister_kprobe"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c index cbaa44ffb8c6..c11832657d2b 100644 --- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c +++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021. Huawei Technologies Co., Ltd */ #include <test_progs.h> #include "dummy_st_ops.skel.h" +#include "trace_dummy_st_ops.skel.h" /* Need to keep consistent with definition in include/linux/bpf.h */ struct bpf_dummy_ops_state { @@ -26,10 +27,10 @@ static void test_dummy_st_ops_attach(void) static void test_dummy_init_ret_value(void) { __u64 args[1] = {0}; - struct bpf_prog_test_run_attr attr = { - .ctx_size_in = sizeof(args), + LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, - }; + .ctx_size_in = sizeof(args), + ); struct dummy_st_ops *skel; int fd, err; @@ -38,8 +39,7 @@ static void test_dummy_init_ret_value(void) return; fd = bpf_program__fd(skel->progs.test_1); - attr.prog_fd = fd; - err = bpf_prog_test_run_xattr(&attr); + err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret"); @@ -53,10 +53,11 @@ static void test_dummy_init_ptr_arg(void) .val = exp_retval, }; __u64 args[1] = {(unsigned long)&in_state}; - struct bpf_prog_test_run_attr attr = { - .ctx_size_in = sizeof(args), + LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, - }; + .ctx_size_in = sizeof(args), + ); + struct trace_dummy_st_ops *trace_skel; struct dummy_st_ops *skel; int fd, err; @@ -65,22 +66,42 @@ static void test_dummy_init_ptr_arg(void) return; fd = bpf_program__fd(skel->progs.test_1); - attr.prog_fd = fd; - err = bpf_prog_test_run_xattr(&attr); + + trace_skel = trace_dummy_st_ops__open(); + if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open")) + goto done; + + err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1, + fd, "test_1"); + if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)")) + goto done; + + err = trace_dummy_st_ops__load(trace_skel); + if (!ASSERT_OK(err, "load(trace_skel)")) + goto done; + + err = trace_dummy_st_ops__attach(trace_skel); + if (!ASSERT_OK(err, "attach(trace_skel)")) + goto done; + + err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret"); ASSERT_EQ(attr.retval, exp_retval, "test_ret"); + ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val"); +done: dummy_st_ops__destroy(skel); + trace_dummy_st_ops__destroy(trace_skel); } static void test_dummy_multiple_args(void) { __u64 args[5] = {0, -100, 0x8a5f, 'c', 0x1234567887654321ULL}; - struct bpf_prog_test_run_attr attr = { - .ctx_size_in = sizeof(args), + LIBBPF_OPTS(bpf_test_run_opts, attr, .ctx_in = args, - }; + .ctx_size_in = sizeof(args), + ); struct dummy_st_ops *skel; int fd, err; size_t i; @@ -91,8 +112,7 @@ static void test_dummy_multiple_args(void) return; fd = bpf_program__fd(skel->progs.test_2); - attr.prog_fd = fd; - err = bpf_prog_test_run_xattr(&attr); + err = bpf_prog_test_run_opts(fd, &attr); ASSERT_OK(err, "test_run"); for (i = 0; i < ARRAY_SIZE(args); i++) { snprintf(name, sizeof(name), "arg %zu", i); diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c new file mode 100644 index 000000000000..3c7aa82b98e2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Facebook */ + +#include <test_progs.h> +#include "dynptr_fail.skel.h" +#include "dynptr_success.skel.h" + +static size_t log_buf_sz = 1048576; /* 1 MB */ +static char obj_log_buf[1048576]; + +static struct { + const char *prog_name; + const char *expected_err_msg; +} dynptr_tests[] = { + /* failure cases */ + {"ringbuf_missing_release1", "Unreleased reference id=1"}, + {"ringbuf_missing_release2", "Unreleased reference id=2"}, + {"ringbuf_missing_release_callback", "Unreleased reference id"}, + {"use_after_invalid", "Expected an initialized dynptr as arg #3"}, + {"ringbuf_invalid_api", "type=mem expected=alloc_mem"}, + {"add_dynptr_to_map1", "invalid indirect read from stack"}, + {"add_dynptr_to_map2", "invalid indirect read from stack"}, + {"data_slice_out_of_bounds_ringbuf", "value is outside of the allowed memory range"}, + {"data_slice_out_of_bounds_map_value", "value is outside of the allowed memory range"}, + {"data_slice_use_after_release", "invalid mem access 'scalar'"}, + {"data_slice_missing_null_check1", "invalid mem access 'mem_or_null'"}, + {"data_slice_missing_null_check2", "invalid mem access 'mem_or_null'"}, + {"invalid_helper1", "invalid indirect read from stack"}, + {"invalid_helper2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write1", "Expected an initialized dynptr as arg #1"}, + {"invalid_write2", "Expected an initialized dynptr as arg #3"}, + {"invalid_write3", "Expected an initialized ringbuf dynptr as arg #1"}, + {"invalid_write4", "arg 1 is an unacquired reference"}, + {"invalid_read1", "invalid read from stack"}, + {"invalid_read2", "cannot pass in dynptr at an offset"}, + {"invalid_read3", "invalid read from stack"}, + {"invalid_read4", "invalid read from stack"}, + {"invalid_offset", "invalid write to stack"}, + {"global", "type=map_value expected=fp"}, + {"release_twice", "arg 1 is an unacquired reference"}, + {"release_twice_callback", "arg 1 is an unacquired reference"}, + {"dynptr_from_mem_invalid_api", + "Unsupported reg type fp for bpf_dynptr_from_mem data"}, + + /* success cases */ + {"test_read_write", NULL}, + {"test_data_slice", NULL}, + {"test_ringbuf", NULL}, +}; + +static void verify_fail(const char *prog_name, const char *expected_err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct bpf_program *prog; + struct dynptr_fail *skel; + int err; + + opts.kernel_log_buf = obj_log_buf; + opts.kernel_log_size = log_buf_sz; + opts.kernel_log_level = 1; + + skel = dynptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + bpf_program__set_autoload(prog, true); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + err = dynptr_fail__load(skel); + if (!ASSERT_ERR(err, "unexpected load success")) + goto cleanup; + + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); + } + +cleanup: + dynptr_fail__destroy(skel); +} + +static void verify_success(const char *prog_name) +{ + struct dynptr_success *skel; + struct bpf_program *prog; + struct bpf_link *link; + + skel = dynptr_success__open(); + if (!ASSERT_OK_PTR(skel, "dynptr_success__open")) + return; + + skel->bss->pid = getpid(); + + bpf_map__set_max_entries(skel->maps.ringbuf, getpagesize()); + + dynptr_success__load(skel); + if (!ASSERT_OK_PTR(skel, "dynptr_success__load")) + goto cleanup; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto cleanup; + + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "bpf_program__attach")) + goto cleanup; + + usleep(1); + + ASSERT_EQ(skel->bss->err, 0, "err"); + + bpf_link__destroy(link); + +cleanup: + dynptr_success__destroy(skel); +} + +void test_dynptr(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dynptr_tests); i++) { + if (!test__start_subtest(dynptr_tests[i].prog_name)) + continue; + + if (dynptr_tests[i].expected_err_msg) + verify_fail(dynptr_tests[i].prog_name, + dynptr_tests[i].expected_err_msg); + else + verify_success(dynptr_tests[i].prog_name); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c index 4374ac8a8a91..130f5b82d2e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -9,38 +9,34 @@ void test_fentry_fexit(void) struct fentry_test_lskel *fentry_skel = NULL; struct fexit_test_lskel *fexit_skel = NULL; __u64 *fentry_res, *fexit_res; - __u32 duration = 0, retval; int err, prog_fd, i; + LIBBPF_OPTS(bpf_test_run_opts, topts); fentry_skel = fentry_test_lskel__open_and_load(); - if (CHECK(!fentry_skel, "fentry_skel_load", "fentry skeleton failed\n")) + if (!ASSERT_OK_PTR(fentry_skel, "fentry_skel_load")) goto close_prog; fexit_skel = fexit_test_lskel__open_and_load(); - if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n")) + if (!ASSERT_OK_PTR(fexit_skel, "fexit_skel_load")) goto close_prog; err = fentry_test_lskel__attach(fentry_skel); - if (CHECK(err, "fentry_attach", "fentry attach failed: %d\n", err)) + if (!ASSERT_OK(err, "fentry_attach")) goto close_prog; err = fexit_test_lskel__attach(fexit_skel); - if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err)) + if (!ASSERT_OK(err, "fexit_attach")) goto close_prog; prog_fd = fexit_skel->progs.test1.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv6", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_OK(topts.retval, "ipv6 test retval"); fentry_res = (__u64 *)fentry_skel->bss; fexit_res = (__u64 *)fexit_skel->bss; printf("%lld\n", fentry_skel->bss->test1_result); for (i = 0; i < 8; i++) { - CHECK(fentry_res[i] != 1, "result", - "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]); - CHECK(fexit_res[i] != 1, "result", - "fexit_test%d failed err %lld\n", i + 1, fexit_res[i]); + ASSERT_EQ(fentry_res[i], 1, "fentry result"); + ASSERT_EQ(fexit_res[i], 1, "fexit result"); } close_prog: diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c index 12921b3850d2..c0d1d61d5f66 100644 --- a/tools/testing/selftests/bpf/prog_tests/fentry_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -6,9 +6,9 @@ static int fentry_test(struct fentry_test_lskel *fentry_skel) { int err, prog_fd, i; - __u32 duration = 0, retval; int link_fd; __u64 *result; + LIBBPF_OPTS(bpf_test_run_opts, topts); err = fentry_test_lskel__attach(fentry_skel); if (!ASSERT_OK(err, "fentry_attach")) @@ -20,10 +20,9 @@ static int fentry_test(struct fentry_test_lskel *fentry_skel) return -1; prog_fd = fentry_skel->progs.test1.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); result = (__u64 *)fentry_skel->bss; for (i = 0; i < sizeof(*fentry_skel->bss) / sizeof(__u64); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index c52f99f6a909..02bb8cbf9194 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -58,12 +58,17 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, test_cb cb) { struct bpf_object *obj = NULL, *tgt_obj; - __u32 retval, tgt_prog_id, info_len; + __u32 tgt_prog_id, info_len; struct bpf_prog_info prog_info = {}; struct bpf_program **prog = NULL, *p; struct bpf_link **link = NULL; int err, tgt_fd, i; struct btf *btf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .repeat = 1, + ); err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, &tgt_obj, &tgt_fd); @@ -132,7 +137,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, &link_info, &info_len); ASSERT_OK(err, "link_fd_get_info"); ASSERT_EQ(link_info.tracing.attach_type, - bpf_program__get_expected_attach_type(prog[i]), + bpf_program__expected_attach_type(prog[i]), "link_attach_type"); ASSERT_EQ(link_info.tracing.target_obj_id, tgt_prog_id, "link_tgt_obj_id"); ASSERT_EQ(link_info.tracing.target_btf_id, btf_id, "link_tgt_btf_id"); @@ -147,10 +152,9 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, if (!run_prog) goto close_prog; - err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, NULL); + err = bpf_prog_test_run_opts(tgt_fd, &topts); ASSERT_OK(err, "prog_run"); - ASSERT_EQ(retval, 0, "prog_run_ret"); + ASSERT_EQ(topts.retval, 0, "prog_run_ret"); if (check_data_map(obj, prog_cnt, false)) goto close_prog; @@ -225,29 +229,31 @@ static int test_second_attach(struct bpf_object *obj) const char *tgt_obj_file = "./test_pkt_access.o"; struct bpf_program *prog = NULL; struct bpf_object *tgt_obj; - __u32 duration = 0, retval; struct bpf_link *link; int err = 0, tgt_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .repeat = 1, + ); prog = bpf_object__find_program_by_name(obj, prog_name); - if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name)) + if (!ASSERT_OK_PTR(prog, "find_prog")) return -ENOENT; err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC, &tgt_obj, &tgt_fd); - if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n", - tgt_obj_file, err, errno)) + if (!ASSERT_OK(err, "second_prog_load")) return err; link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name); if (!ASSERT_OK_PTR(link, "second_link")) goto out; - err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, &duration); - if (CHECK(err || retval, "ipv6", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration)) + err = bpf_prog_test_run_opts(tgt_fd, &topts); + if (!ASSERT_OK(err, "ipv6 test_run")) + goto out; + if (!ASSERT_OK(topts.retval, "ipv6 retval")) goto out; err = check_data_map(obj, 1, true); @@ -389,6 +395,18 @@ static void test_func_map_prog_compatibility(void) "./test_attach_probe.o"); } +static void test_func_replace_global_func(void) +{ + const char *prog_name[] = { + "freplace/test_pkt_access", + }; + + test_fexit_bpf2bpf_common("./freplace_global_func.o", + "./test_pkt_access.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + /* NOTE: affect other tests, must run in serial mode */ void serial_test_fexit_bpf2bpf(void) { @@ -410,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void) test_func_replace_multi(); if (test__start_subtest("fmod_ret_freplace")) test_fmod_ret_freplace(); + if (test__start_subtest("func_replace_global_func")) + test_func_replace_global_func(); } diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index e4cede6b4b2d..a7e74297f15f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -5,14 +5,12 @@ /* that's kernel internal BPF_MAX_TRAMP_PROGS define */ #define CNT 38 -void test_fexit_stress(void) +void serial_test_fexit_stress(void) { char test_skb[128] = {}; int fexit_fd[CNT] = {}; int link_fd[CNT] = {}; - __u32 duration = 0; char error[4096]; - __u32 prog_ret; int err, i, filter_fd; const struct bpf_insn trace_program[] = { @@ -36,9 +34,15 @@ void test_fexit_stress(void) .log_size = sizeof(error), ); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = test_skb, + .data_size_in = sizeof(test_skb), + .repeat = 1, + ); + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", trace_opts.expected_attach_type); - if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) + if (!ASSERT_GT(err, 0, "find_vmlinux_btf_id")) goto out; trace_opts.attach_btf_id = err; @@ -47,24 +51,20 @@ void test_fexit_stress(void) trace_program, sizeof(trace_program) / sizeof(struct bpf_insn), &trace_opts); - if (CHECK(fexit_fd[i] < 0, "fexit loaded", - "failed: %d errno %d\n", fexit_fd[i], errno)) + if (!ASSERT_GE(fexit_fd[i], 0, "fexit load")) goto out; - link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); - if (CHECK(link_fd[i] < 0, "fexit attach failed", - "prog %d failed: %d err %d\n", i, link_fd[i], errno)) + link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL); + if (!ASSERT_GE(link_fd[i], 0, "fexit attach")) goto out; } filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", skb_program, sizeof(skb_program) / sizeof(struct bpf_insn), &skb_opts); - if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", - filter_fd, errno)) + if (!ASSERT_GE(filter_fd, 0, "test_program_loaded")) goto out; - err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, - 0, &prog_ret, 0); + err = bpf_prog_test_run_opts(filter_fd, &topts); close(filter_fd); CHECK_FAIL(err); out: diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c index d4887d8bb396..101b7343036b 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_test.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -6,9 +6,9 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel) { int err, prog_fd, i; - __u32 duration = 0, retval; int link_fd; __u64 *result; + LIBBPF_OPTS(bpf_test_run_opts, topts); err = fexit_test_lskel__attach(fexit_skel); if (!ASSERT_OK(err, "fexit_attach")) @@ -20,10 +20,9 @@ static int fexit_test(struct fexit_test_lskel *fexit_skel) return -1; prog_fd = fexit_skel->progs.test1.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); result = (__u64 *)fexit_skel->bss; for (i = 0; i < sizeof(*fexit_skel->bss) / sizeof(__u64); i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c index b74b3c0c555a..5165b38f0e59 100644 --- a/tools/testing/selftests/bpf/prog_tests/find_vma.c +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -7,12 +7,14 @@ #include "find_vma_fail1.skel.h" #include "find_vma_fail2.skel.h" -static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret) +static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret, bool need_test) { - ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); - ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); - ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); - ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + if (need_test) { + ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec"); + ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret"); + ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret"); + ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs"); + } skel->bss->found_vm_exec = 0; skel->data->find_addr_ret = -1; @@ -30,17 +32,26 @@ static int open_pe(void) attr.type = PERF_TYPE_HARDWARE; attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC); return pfd >= 0 ? pfd : -errno; } +static bool find_vma_pe_condition(struct find_vma *skel) +{ + return skel->bss->found_vm_exec == 0 || + skel->data->find_addr_ret != 0 || + skel->data->find_zero_ret == -1 || + strcmp(skel->bss->d_iname, "test_progs") != 0; +} + static void test_find_vma_pe(struct find_vma *skel) { struct bpf_link *link = NULL; volatile int j = 0; int pfd, i; + const int one_bn = 1000000000; pfd = open_pe(); if (pfd < 0) { @@ -57,10 +68,10 @@ static void test_find_vma_pe(struct find_vma *skel) if (!ASSERT_OK_PTR(link, "attach_perf_event")) goto cleanup; - for (i = 0; i < 1000000; ++i) + for (i = 0; i < one_bn && find_vma_pe_condition(skel); ++i) ++j; - test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */); + test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */, i == one_bn); cleanup: bpf_link__destroy(link); close(pfd); @@ -75,7 +86,7 @@ static void test_find_vma_kprobe(struct find_vma *skel) return; getpgid(skel->bss->target_pid); - test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */); + test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */, true); } static void test_illegal_write_vma(void) @@ -108,7 +119,6 @@ void serial_test_find_vma(void) skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe; test_find_vma_pe(skel); - usleep(100000); /* allow the irq_work to finish */ test_find_vma_kprobe(skel); find_vma__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index ac54e3f91d42..0c1661ea996e 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -13,8 +13,9 @@ #endif #define CHECK_FLOW_KEYS(desc, got, expected) \ - CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ + _CHECK(memcmp(&got, &expected, sizeof(got)) != 0, \ desc, \ + topts.duration, \ "nhoff=%u/%u " \ "thoff=%u/%u " \ "addr_proto=0x%x/0x%x " \ @@ -457,7 +458,7 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) if (map_fd < 0) return -1; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -487,7 +488,7 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) /* Keep in sync with 'flags' from eth_get_headlen. */ __u32 eth_get_headlen_flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; - struct bpf_prog_test_run_attr tattr = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts); struct bpf_flow_keys flow_keys = {}; __u32 key = (__u32)(tests[i].keys.sport) << 16 | tests[i].keys.dport; @@ -503,13 +504,12 @@ static void run_tests_skb_less(int tap_fd, struct bpf_map *keys) CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); - CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); + ASSERT_OK(err, "bpf_map_lookup_elem"); - CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); err = bpf_map_delete_elem(keys_fd, &key); - CHECK_ATTR(err, tests[i].name, "bpf_map_delete_elem %d\n", err); + ASSERT_OK(err, "bpf_map_delete_elem"); } } @@ -573,27 +573,24 @@ void test_flow_dissector(void) for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_flow_keys flow_keys; - struct bpf_prog_test_run_attr tattr = { - .prog_fd = prog_fd, + LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &tests[i].pkt, .data_size_in = sizeof(tests[i].pkt), .data_out = &flow_keys, - }; + ); static struct bpf_flow_keys ctx = {}; if (tests[i].flags) { - tattr.ctx_in = &ctx; - tattr.ctx_size_in = sizeof(ctx); + topts.ctx_in = &ctx; + topts.ctx_size_in = sizeof(ctx); ctx.flags = tests[i].flags; } - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || - err || tattr.retval != 1, - tests[i].name, - "err %d errno %d retval %d duration %d size %u/%zu\n", - err, errno, tattr.retval, tattr.duration, - tattr.data_size_out, sizeof(flow_keys)); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 1, "test_run retval"); + ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), + "test_run data_size_out"); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); } diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c index 93ac3f28226c..36afb409c25f 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c @@ -5,7 +5,6 @@ void serial_test_flow_dissector_load_bytes(void) { struct bpf_flow_keys flow_keys; - __u32 duration = 0, retval, size; struct bpf_insn prog[] = { // BPF_REG_1 - 1st argument: context // BPF_REG_2 - 2nd argument: offset, start at first byte @@ -27,22 +26,25 @@ void serial_test_flow_dissector_load_bytes(void) BPF_EXIT_INSN(), }; int fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = &flow_keys, + .data_size_out = sizeof(flow_keys), + .repeat = 1, + ); /* make sure bpf_skb_load_bytes is not allowed from skb-less context */ fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); - CHECK(fd < 0, - "flow_dissector-bpf_skb_load_bytes-load", - "fd %d errno %d\n", - fd, errno); + ASSERT_GE(fd, 0, "bpf_test_load_program good fd"); - err = bpf_prog_test_run(fd, 1, &pkt_v4, sizeof(pkt_v4), - &flow_keys, &size, &retval, &duration); - CHECK(size != sizeof(flow_keys) || err || retval != 1, - "flow_dissector-bpf_skb_load_bytes", - "err %d errno %d retval %d duration %d size %u/%zu\n", - err, errno, retval, duration, size, sizeof(flow_keys)); + err = bpf_prog_test_run_opts(fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.data_size_out, sizeof(flow_keys), + "test_run data_size_out"); + ASSERT_EQ(topts.retval, 1, "test_run retval"); if (fd >= -1) close(fd); diff --git a/tools/testing/selftests/bpf/prog_tests/for_each.c b/tools/testing/selftests/bpf/prog_tests/for_each.c index 68eb12a287d4..8963f8a549f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/for_each.c +++ b/tools/testing/selftests/bpf/prog_tests/for_each.c @@ -4,56 +4,63 @@ #include <network_helpers.h> #include "for_each_hash_map_elem.skel.h" #include "for_each_array_map_elem.skel.h" +#include "for_each_map_elem_write_key.skel.h" static unsigned int duration; static void test_hash_map(void) { - int i, err, hashmap_fd, max_entries, percpu_map_fd; + int i, err, max_entries; struct for_each_hash_map_elem *skel; __u64 *percpu_valbuf = NULL; - __u32 key, num_cpus, retval; + size_t percpu_val_sz; + __u32 key, num_cpus; __u64 val; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); skel = for_each_hash_map_elem__open_and_load(); if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load")) return; - hashmap_fd = bpf_map__fd(skel->maps.hashmap); max_entries = bpf_map__max_entries(skel->maps.hashmap); for (i = 0; i < max_entries; i++) { key = i; val = i + 1; - err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY); + err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); if (!ASSERT_OK(err, "map_update")) goto out; } num_cpus = bpf_num_possible_cpus(); - percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); - percpu_valbuf = malloc(sizeof(__u64) * num_cpus); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) goto out; key = 1; for (i = 0; i < num_cpus; i++) percpu_valbuf[i] = i + 1; - err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); if (!ASSERT_OK(err, "percpu_map_update")) goto out; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access), - 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, - &retval, &duration); - if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n", - err, errno, retval)) + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); + duration = topts.duration; + if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", + err, errno, topts.retval)) goto out; ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output"); ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems"); key = 1; - err = bpf_map_lookup_elem(hashmap_fd, &key, &val); + err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0); ASSERT_ERR(err, "hashmap_lookup"); ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called"); @@ -69,17 +76,22 @@ out: static void test_array_map(void) { - __u32 key, num_cpus, max_entries, retval; - int i, arraymap_fd, percpu_map_fd, err; + __u32 key, num_cpus, max_entries; + int i, err; struct for_each_array_map_elem *skel; __u64 *percpu_valbuf = NULL; + size_t percpu_val_sz; __u64 val, expected_total; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); skel = for_each_array_map_elem__open_and_load(); if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load")) return; - arraymap_fd = bpf_map__fd(skel->maps.arraymap); expected_total = 0; max_entries = bpf_map__max_entries(skel->maps.arraymap); for (i = 0; i < max_entries; i++) { @@ -88,29 +100,30 @@ static void test_array_map(void) /* skip the last iteration for expected total */ if (i != max_entries - 1) expected_total += val; - err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY); + err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key), + &val, sizeof(val), BPF_ANY); if (!ASSERT_OK(err, "map_update")) goto out; } num_cpus = bpf_num_possible_cpus(); - percpu_map_fd = bpf_map__fd(skel->maps.percpu_map); - percpu_valbuf = malloc(sizeof(__u64) * num_cpus); + percpu_val_sz = sizeof(__u64) * num_cpus; + percpu_valbuf = malloc(percpu_val_sz); if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf")) goto out; key = 0; for (i = 0; i < num_cpus; i++) percpu_valbuf[i] = i + 1; - err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY); + err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key), + percpu_valbuf, percpu_val_sz, BPF_ANY); if (!ASSERT_OK(err, "percpu_map_update")) goto out; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.test_pkt_access), - 1, &pkt_v4, sizeof(pkt_v4), NULL, NULL, - &retval, &duration); - if (CHECK(err || retval, "ipv4", "err %d errno %d retval %d\n", - err, errno, retval)) + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts); + duration = topts.duration; + if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n", + err, errno, topts.retval)) goto out; ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output"); @@ -121,10 +134,21 @@ out: for_each_array_map_elem__destroy(skel); } +static void test_write_map_key(void) +{ + struct for_each_map_elem_write_key *skel; + + skel = for_each_map_elem_write_key__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load")) + for_each_map_elem_write_key__destroy(skel); +} + void test_for_each(void) { if (test__start_subtest("hash_map")) test_hash_map(); if (test__start_subtest("array_map")) test_array_map(); + if (test__start_subtest("write_map_key")) + test_write_map_key(); } diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c index 85c427119fe9..28cf63963cb7 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_args_test.c @@ -5,8 +5,8 @@ void test_get_func_args_test(void) { struct get_func_args_test *skel = NULL; - __u32 duration = 0, retval; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); skel = get_func_args_test__open_and_load(); if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load")) @@ -20,19 +20,17 @@ void test_get_func_args_test(void) * fentry/fexit programs. */ prog_fd = bpf_program__fd(skel->progs.test1); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); /* This runs bpf_modify_return_test function and triggers * fmod_ret_test and fexit_test programs. */ prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 1234, "test_run"); + ASSERT_EQ(topts.retval, 1234, "test_run"); ASSERT_EQ(skel->bss->test1_result, 1, "test1_result"); ASSERT_EQ(skel->bss->test2_result, 1, "test2_result"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index 02a465f36d59..938dbd4d7c2f 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -5,8 +5,8 @@ void test_get_func_ip_test(void) { struct get_func_ip_test *skel = NULL; - __u32 duration = 0, retval; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts); skel = get_func_ip_test__open(); if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) @@ -29,14 +29,12 @@ void test_get_func_ip_test(void) goto cleanup; prog_fd = bpf_program__fd(skel->progs.test1); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); prog_fd = bpf_program__fd(skel->progs.test5); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c index e834a01de16a..16048978a1ef 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c @@ -29,11 +29,8 @@ static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size) */ struct get_stack_trace_t e; int i, num_stack; - static __u64 cnt; struct ksym *ks; - cnt++; - memset(&e, 0, sizeof(e)); memcpy(&e, data, size <= sizeof(e) ? size : sizeof(e)); diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c index 8d5a6023a1bb..5308de1ed478 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -27,7 +27,7 @@ void test_get_stackid_cannot_attach(void) return; /* override program type */ - bpf_program__set_perf_event(skel->progs.oncpu); + bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); err = test_stacktrace_build_id__load(skel); if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index 9da131b32e13..027685858925 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -29,7 +29,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) { "relocate .rodata reference", 10, ~0 }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); CHECK(err || num != tests[i].num, tests[i].name, "err %d result %llx expected %llx\n", @@ -58,7 +58,7 @@ static void test_global_data_string(struct bpf_object *obj, __u32 duration) { "relocate .bss reference", 4, "\0\0hello" }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, str); CHECK(err || memcmp(str, tests[i].str, sizeof(str)), tests[i].name, "err %d result \'%s\' expected \'%s\'\n", @@ -92,7 +92,7 @@ static void test_global_data_struct(struct bpf_object *obj, __u32 duration) { "relocate .data reference", 3, { 41, 0xeeeeefef, 0x2111111111111111ULL, } }, }; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &val); CHECK(err || memcmp(&val, &tests[i].val, sizeof(val)), tests[i].name, "err %d result { %u, %u, %llu } expected { %u, %u, %llu }\n", @@ -121,7 +121,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) if (CHECK_FAIL(map_fd < 0)) return; - buff = malloc(bpf_map__def(map)->value_size); + buff = malloc(bpf_map__value_size(map)); if (buff) err = bpf_map_update_elem(map_fd, &zero, buff, 0); free(buff); @@ -132,24 +132,26 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) void test_global_data(void) { const char *file = "./test_global_data.o"; - __u32 duration = 0, retval; struct bpf_object *obj; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (CHECK(err, "load program", "error %d loading %s\n", err, file)) + if (!ASSERT_OK(err, "load program")) return; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "pass global data run", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "pass global data run err"); + ASSERT_OK(topts.retval, "pass global data run retval"); - test_global_data_number(obj, duration); - test_global_data_string(obj, duration); - test_global_data_struct(obj, duration); - test_global_data_rdonly(obj, duration); + test_global_data_number(obj, topts.duration); + test_global_data_string(obj, topts.duration); + test_global_data_struct(obj, topts.duration); + test_global_data_rdonly(obj, topts.duration); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c index 1db86eab101b..57331c606964 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c @@ -20,7 +20,7 @@ void test_global_data_init(void) if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) goto out; - sz = bpf_map__def(map)->value_size; + sz = bpf_map__value_size(map); newval = malloc(sz); if (CHECK_FAIL(!newval)) goto out; diff --git a/tools/testing/selftests/bpf/prog_tests/global_func_args.c b/tools/testing/selftests/bpf/prog_tests/global_func_args.c index 93a2439237b0..29039a36cce5 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_func_args.c +++ b/tools/testing/selftests/bpf/prog_tests/global_func_args.c @@ -40,19 +40,21 @@ static void test_global_func_args0(struct bpf_object *obj) void test_global_func_args(void) { const char *file = "./test_global_func_args.o"; - __u32 retval; struct bpf_object *obj; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); if (CHECK(err, "load program", "error %d loading %s\n", err, file)) return; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "pass global func args run", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); test_global_func_args0(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c index e1de5f80c3b2..0354f9b82c65 100644 --- a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c +++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c @@ -6,11 +6,10 @@ void test_helper_restricted(void) { int prog_i = 0, prog_cnt; - int duration = 0; do { struct test_helper_restricted *test; - int maybeOK; + int err; test = test_helper_restricted__open(); if (!ASSERT_OK_PTR(test, "open")) @@ -21,12 +20,11 @@ void test_helper_restricted(void) for (int j = 0; j < prog_cnt; ++j) { struct bpf_program *prog = *test->skeleton->progs[j].prog; - maybeOK = bpf_program__set_autoload(prog, prog_i == j); - ASSERT_OK(maybeOK, "set autoload"); + bpf_program__set_autoload(prog, true); } - maybeOK = test_helper_restricted__load(test); - CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted"); + err = test_helper_restricted__load(test); + ASSERT_ERR(err, "load_should_fail"); test_helper_restricted__destroy(test); } while (++prog_i < prog_cnt); diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index ce10d2fc3a6c..1cee6957285e 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -53,24 +53,24 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void serial_test_kfree_skb(void) { struct __sk_buff skb = {}; - struct bpf_prog_test_run_attr tattr = { + LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v6, .data_size_in = sizeof(pkt_v6), .ctx_in = &skb, .ctx_size_in = sizeof(skb), - }; + ); struct kfree_skb *skel = NULL; struct bpf_link *link; struct bpf_object *obj; struct perf_buffer *pb = NULL; - int err; + int err, prog_fd; bool passed = false; __u32 duration = 0; const int zero = 0; bool test_ok[2]; err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, - &obj, &tattr.prog_fd); + &obj, &prog_fd); if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; @@ -100,11 +100,9 @@ void serial_test_kfree_skb(void) goto close_prog; memcpy(skb.cb, &cb, sizeof(cb)); - err = bpf_prog_test_run_xattr(&tattr); - duration = tattr.duration; - CHECK(err || tattr.retval, "ipv6", - "err %d errno %d retval %d duration %d\n", - err, errno, tattr.retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_OK(topts.retval, "ipv6 test_run retval"); /* read perf buffer */ err = perf_buffer__poll(pb, 100); diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 7d7445ccc141..c00eb974eb85 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -9,23 +9,31 @@ static void test_main(void) { struct kfunc_call_test_lskel *skel; - int prog_fd, retval, err; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); skel = kfunc_call_test_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; prog_fd = skel->progs.kfunc_call_test1.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "bpf_prog_test_run(test1)"); - ASSERT_EQ(retval, 12, "test1-retval"); + ASSERT_EQ(topts.retval, 12, "test1-retval"); prog_fd = skel->progs.kfunc_call_test2.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "bpf_prog_test_run(test2)"); - ASSERT_EQ(retval, 3, "test2-retval"); + ASSERT_EQ(topts.retval, 3, "test2-retval"); + + prog_fd = skel->progs.kfunc_call_test_ref_btf_id.prog_fd; + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "bpf_prog_test_run(test_ref_btf_id)"); + ASSERT_EQ(topts.retval, 0, "test_ref_btf_id-retval"); kfunc_call_test_lskel__destroy(skel); } @@ -33,17 +41,21 @@ static void test_main(void) static void test_subprog(void) { struct kfunc_call_test_subprog *skel; - int prog_fd, retval, err; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); skel = kfunc_call_test_subprog__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1); - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "bpf_prog_test_run(test1)"); - ASSERT_EQ(retval, 10, "test1-retval"); + ASSERT_EQ(topts.retval, 10, "test1-retval"); ASSERT_NEQ(skel->data->active_res, -1, "active_res"); ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); @@ -53,17 +65,21 @@ static void test_subprog(void) static void test_subprog_lskel(void) { struct kfunc_call_test_subprog_lskel *skel; - int prog_fd, retval, err; + int prog_fd, err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); skel = kfunc_call_test_subprog_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel")) return; prog_fd = skel->progs.kfunc_call_test1.prog_fd; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "bpf_prog_test_run(test1)"); - ASSERT_EQ(retval, 10, "test1-retval"); + ASSERT_EQ(topts.retval, 10, "test1-retval"); ASSERT_NEQ(skel->data->active_res, -1, "active_res"); ASSERT_EQ(skel->data->sk_state_res, BPF_TCP_CLOSE, "sk_state_res"); diff --git a/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c new file mode 100644 index 000000000000..5b93d5d0bd93 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "kprobe_multi.skel.h" +#include "trace_helpers.h" +#include "kprobe_multi_empty.skel.h" +#include "bpf/libbpf_internal.h" +#include "bpf/hashmap.h" + +static void kprobe_multi_test_run(struct kprobe_multi *skel, bool test_return) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + prog_fd = bpf_program__fd(skel->progs.trigger); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + + ASSERT_EQ(skel->bss->kprobe_test1_result, 1, "kprobe_test1_result"); + ASSERT_EQ(skel->bss->kprobe_test2_result, 1, "kprobe_test2_result"); + ASSERT_EQ(skel->bss->kprobe_test3_result, 1, "kprobe_test3_result"); + ASSERT_EQ(skel->bss->kprobe_test4_result, 1, "kprobe_test4_result"); + ASSERT_EQ(skel->bss->kprobe_test5_result, 1, "kprobe_test5_result"); + ASSERT_EQ(skel->bss->kprobe_test6_result, 1, "kprobe_test6_result"); + ASSERT_EQ(skel->bss->kprobe_test7_result, 1, "kprobe_test7_result"); + ASSERT_EQ(skel->bss->kprobe_test8_result, 1, "kprobe_test8_result"); + + if (test_return) { + ASSERT_EQ(skel->bss->kretprobe_test1_result, 1, "kretprobe_test1_result"); + ASSERT_EQ(skel->bss->kretprobe_test2_result, 1, "kretprobe_test2_result"); + ASSERT_EQ(skel->bss->kretprobe_test3_result, 1, "kretprobe_test3_result"); + ASSERT_EQ(skel->bss->kretprobe_test4_result, 1, "kretprobe_test4_result"); + ASSERT_EQ(skel->bss->kretprobe_test5_result, 1, "kretprobe_test5_result"); + ASSERT_EQ(skel->bss->kretprobe_test6_result, 1, "kretprobe_test6_result"); + ASSERT_EQ(skel->bss->kretprobe_test7_result, 1, "kretprobe_test7_result"); + ASSERT_EQ(skel->bss->kretprobe_test8_result, 1, "kretprobe_test8_result"); + } +} + +static void test_skel_api(void) +{ + struct kprobe_multi *skel = NULL; + int err; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi__open_and_load")) + goto cleanup; + + skel->bss->pid = getpid(); + err = kprobe_multi__attach(skel); + if (!ASSERT_OK(err, "kprobe_multi__attach")) + goto cleanup; + + kprobe_multi_test_run(skel, true); + +cleanup: + kprobe_multi__destroy(skel); +} + +static void test_link_api(struct bpf_link_create_opts *opts) +{ + int prog_fd, link1_fd = -1, link2_fd = -1; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + prog_fd = bpf_program__fd(skel->progs.test_kprobe); + link1_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link1_fd, 0, "link_fd")) + goto cleanup; + + opts->kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN; + prog_fd = bpf_program__fd(skel->progs.test_kretprobe); + link2_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, opts); + if (!ASSERT_GE(link2_fd, 0, "link_fd")) + goto cleanup; + + kprobe_multi_test_run(skel, true); + +cleanup: + if (link1_fd != -1) + close(link1_fd); + if (link2_fd != -1) + close(link2_fd); + kprobe_multi__destroy(skel); +} + +#define GET_ADDR(__sym, __addr) ({ \ + __addr = ksym_get_addr(__sym); \ + if (!ASSERT_NEQ(__addr, 0, "kallsyms load failed for " #__sym)) \ + return; \ +}) + +static void test_link_api_addrs(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.kprobe_multi.addrs = (const unsigned long*) addrs; + opts.kprobe_multi.cnt = ARRAY_SIZE(addrs); + test_link_api(&opts); +} + +static void test_link_api_syms(void) +{ + LIBBPF_OPTS(bpf_link_create_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.kprobe_multi.syms = syms; + opts.kprobe_multi.cnt = ARRAY_SIZE(syms); + test_link_api(&opts); +} + +static void +test_attach_api(const char *pattern, struct bpf_kprobe_multi_opts *opts) +{ + struct bpf_link *link1 = NULL, *link2 = NULL; + struct kprobe_multi *skel = NULL; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + link1 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + pattern, opts); + if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + if (opts) { + opts->retprobe = true; + link2 = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kretprobe_manual, + pattern, opts); + if (!ASSERT_OK_PTR(link2, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + } + + kprobe_multi_test_run(skel, !!opts); + +cleanup: + bpf_link__destroy(link2); + bpf_link__destroy(link1); + kprobe_multi__destroy(skel); +} + +static void test_attach_api_pattern(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + + test_attach_api("bpf_fentry_test*", &opts); + test_attach_api("bpf_fentry_test?", NULL); +} + +static void test_attach_api_addrs(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + unsigned long long addrs[8]; + + GET_ADDR("bpf_fentry_test1", addrs[0]); + GET_ADDR("bpf_fentry_test2", addrs[1]); + GET_ADDR("bpf_fentry_test3", addrs[2]); + GET_ADDR("bpf_fentry_test4", addrs[3]); + GET_ADDR("bpf_fentry_test5", addrs[4]); + GET_ADDR("bpf_fentry_test6", addrs[5]); + GET_ADDR("bpf_fentry_test7", addrs[6]); + GET_ADDR("bpf_fentry_test8", addrs[7]); + + opts.addrs = (const unsigned long *) addrs; + opts.cnt = ARRAY_SIZE(addrs); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_syms(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *syms[8] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + "bpf_fentry_test3", + "bpf_fentry_test4", + "bpf_fentry_test5", + "bpf_fentry_test6", + "bpf_fentry_test7", + "bpf_fentry_test8", + }; + + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + test_attach_api(NULL, &opts); +} + +static void test_attach_api_fails(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi *skel = NULL; + struct bpf_link *link = NULL; + unsigned long long addrs[2]; + const char *syms[2] = { + "bpf_fentry_test1", + "bpf_fentry_test2", + }; + __u64 cookies[2]; + + addrs[0] = ksym_get_addr("bpf_fentry_test1"); + addrs[1] = ksym_get_addr("bpf_fentry_test2"); + + if (!ASSERT_FALSE(!addrs[0] || !addrs[1], "ksym_get_addr")) + goto cleanup; + + skel = kprobe_multi__open_and_load(); + if (!ASSERT_OK_PTR(skel, "fentry_raw_skel_load")) + goto cleanup; + + skel->bss->pid = getpid(); + + /* fail_1 - pattern and opts NULL */ + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + NULL, NULL); + if (!ASSERT_ERR_PTR(link, "fail_1")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_1_error")) + goto cleanup; + + /* fail_2 - both addrs and syms set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = syms; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + NULL, &opts); + if (!ASSERT_ERR_PTR(link, "fail_2")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_2_error")) + goto cleanup; + + /* fail_3 - pattern and addrs set */ + opts.addrs = (const unsigned long *) addrs; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_3")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_3_error")) + goto cleanup; + + /* fail_4 - pattern and cnt set */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = ARRAY_SIZE(syms); + opts.cookies = NULL; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_4")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_4_error")) + goto cleanup; + + /* fail_5 - pattern and cookies */ + opts.addrs = NULL; + opts.syms = NULL; + opts.cnt = 0; + opts.cookies = cookies; + + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_manual, + "ksys_*", &opts); + if (!ASSERT_ERR_PTR(link, "fail_5")) + goto cleanup; + + if (!ASSERT_EQ(libbpf_get_error(link), -EINVAL, "fail_5_error")) + goto cleanup; + +cleanup: + bpf_link__destroy(link); + kprobe_multi__destroy(skel); +} + +static inline __u64 get_time_ns(void) +{ + struct timespec t; + + clock_gettime(CLOCK_MONOTONIC, &t); + return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; +} + +static size_t symbol_hash(const void *key, void *ctx __maybe_unused) +{ + return str_hash((const char *) key); +} + +static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused) +{ + return strcmp((const char *) key1, (const char *) key2) == 0; +} + +static int get_syms(char ***symsp, size_t *cntp) +{ + size_t cap = 0, cnt = 0, i; + char *name, **syms = NULL; + struct hashmap *map; + char buf[256]; + FILE *f; + int err; + + /* + * The available_filter_functions contains many duplicates, + * but other than that all symbols are usable in kprobe multi + * interface. + * Filtering out duplicates by using hashmap__add, which won't + * add existing entry. + */ + f = fopen("/sys/kernel/debug/tracing/available_filter_functions", "r"); + if (!f) + return -EINVAL; + + map = hashmap__new(symbol_hash, symbol_equal, NULL); + if (IS_ERR(map)) { + err = libbpf_get_error(map); + goto error; + } + + while (fgets(buf, sizeof(buf), f)) { + /* skip modules */ + if (strchr(buf, '[')) + continue; + if (sscanf(buf, "%ms$*[^\n]\n", &name) != 1) + continue; + /* + * We attach to almost all kernel functions and some of them + * will cause 'suspicious RCU usage' when fprobe is attached + * to them. Filter out the current culprits - arch_cpu_idle + * and rcu_* functions. + */ + if (!strcmp(name, "arch_cpu_idle")) + continue; + if (!strncmp(name, "rcu_", 4)) + continue; + if (!strncmp(name, "__ftrace_invalid_address__", + sizeof("__ftrace_invalid_address__") - 1)) + continue; + err = hashmap__add(map, name, NULL); + if (err) { + free(name); + if (err == -EEXIST) + continue; + goto error; + } + err = libbpf_ensure_mem((void **) &syms, &cap, + sizeof(*syms), cnt + 1); + if (err) { + free(name); + goto error; + } + syms[cnt] = name; + cnt++; + } + + *symsp = syms; + *cntp = cnt; + +error: + fclose(f); + hashmap__free(map); + if (err) { + for (i = 0; i < cnt; i++) + free(syms[cnt]); + free(syms); + } + return err; +} + +static void test_bench_attach(void) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + struct kprobe_multi_empty *skel = NULL; + long attach_start_ns, attach_end_ns; + long detach_start_ns, detach_end_ns; + double attach_delta, detach_delta; + struct bpf_link *link = NULL; + char **syms = NULL; + size_t cnt, i; + + if (!ASSERT_OK(get_syms(&syms, &cnt), "get_syms")) + return; + + skel = kprobe_multi_empty__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kprobe_multi_empty__open_and_load")) + goto cleanup; + + opts.syms = (const char **) syms; + opts.cnt = cnt; + + attach_start_ns = get_time_ns(); + link = bpf_program__attach_kprobe_multi_opts(skel->progs.test_kprobe_empty, + NULL, &opts); + attach_end_ns = get_time_ns(); + + if (!ASSERT_OK_PTR(link, "bpf_program__attach_kprobe_multi_opts")) + goto cleanup; + + detach_start_ns = get_time_ns(); + bpf_link__destroy(link); + detach_end_ns = get_time_ns(); + + attach_delta = (attach_end_ns - attach_start_ns) / 1000000000.0; + detach_delta = (detach_end_ns - detach_start_ns) / 1000000000.0; + + printf("%s: found %lu functions\n", __func__, cnt); + printf("%s: attached in %7.3lfs\n", __func__, attach_delta); + printf("%s: detached in %7.3lfs\n", __func__, detach_delta); + +cleanup: + kprobe_multi_empty__destroy(skel); + if (syms) { + for (i = 0; i < cnt; i++) + free(syms[i]); + free(syms); + } +} + +void test_kprobe_multi_test(void) +{ + if (!ASSERT_OK(load_kallsyms(), "load_kallsyms")) + return; + + if (test__start_subtest("skel_api")) + test_skel_api(); + if (test__start_subtest("link_api_addrs")) + test_link_api_syms(); + if (test__start_subtest("link_api_syms")) + test_link_api_addrs(); + if (test__start_subtest("attach_api_pattern")) + test_attach_api_pattern(); + if (test__start_subtest("attach_api_addrs")) + test_attach_api_addrs(); + if (test__start_subtest("attach_api_syms")) + test_attach_api_syms(); + if (test__start_subtest("attach_api_fails")) + test_attach_api_fails(); + if (test__start_subtest("bench_attach")) + test_bench_attach(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c index f6933b06daf8..1d7a2f1e0731 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c @@ -138,12 +138,16 @@ cleanup: test_ksyms_weak_lskel__destroy(skel); } -static void test_write_check(void) +static void test_write_check(bool test_handler1) { struct test_ksyms_btf_write_check *skel; - skel = test_ksyms_btf_write_check__open_and_load(); - ASSERT_ERR_PTR(skel, "unexpected load of a prog writing to ksym memory\n"); + skel = test_ksyms_btf_write_check__open(); + if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open")) + return; + bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false); + ASSERT_ERR(test_ksyms_btf_write_check__load(skel), + "unexpected load of a prog writing to ksym memory\n"); test_ksyms_btf_write_check__destroy(skel); } @@ -179,6 +183,9 @@ void test_ksyms_btf(void) if (test__start_subtest("weak_ksyms_lskel")) test_weak_syms_lskel(); - if (test__start_subtest("write_check")) - test_write_check(); + if (test__start_subtest("write_check1")) + test_write_check(true); + + if (test__start_subtest("write_check2")) + test_write_check(false); } diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index d490ad80eccb..a1ebac70ec29 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -6,11 +6,15 @@ #include "test_ksyms_module.lskel.h" #include "test_ksyms_module.skel.h" -void test_ksyms_module_lskel(void) +static void test_ksyms_module_lskel(void) { struct test_ksyms_module_lskel *skel; - int retval; int err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); if (!env.has_testmod) { test__skip(); @@ -20,20 +24,24 @@ void test_ksyms_module_lskel(void) skel = test_ksyms_module_lskel__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_ksyms_module_lskel__open_and_load")) return; - err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(skel->progs.load.prog_fd, &topts); if (!ASSERT_OK(err, "bpf_prog_test_run")) goto cleanup; - ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(topts.retval, 0, "retval"); ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); cleanup: test_ksyms_module_lskel__destroy(skel); } -void test_ksyms_module_libbpf(void) +static void test_ksyms_module_libbpf(void) { struct test_ksyms_module *skel; - int retval, err; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); if (!env.has_testmod) { test__skip(); @@ -43,11 +51,10 @@ void test_ksyms_module_libbpf(void) skel = test_ksyms_module__open_and_load(); if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open")) return; - err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4, - sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL); + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.load), &topts); if (!ASSERT_OK(err, "bpf_prog_test_run")) goto cleanup; - ASSERT_EQ(retval, 0, "retval"); + ASSERT_EQ(topts.retval, 0, "retval"); ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym"); cleanup: test_ksyms_module__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c index 540ef28fabff..55f733ff4109 100644 --- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c +++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c @@ -23,12 +23,16 @@ static void test_l4lb(const char *file) __u8 flags; } real_def = {.dst = MAGIC_VAL}; __u32 ch_key = 11, real_num = 3; - __u32 duration, retval, size; int err, i, prog_fd, map_fd; __u64 bytes = 0, pkts = 0; struct bpf_object *obj; char buf[128]; u32 *magic = (u32 *)buf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = NUM_ITER, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) @@ -49,19 +53,24 @@ static void test_l4lb(const char *file) goto out; bpf_map_update_elem(map_fd, &real_num, &real_def, 0); - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 || - *magic != MAGIC_VAL, "ipv4", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); + topts.data_in = &pkt_v4; + topts.data_size_in = sizeof(pkt_v4); - err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 || - *magic != MAGIC_VAL, "ipv6", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 magic"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); /* reset out size */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 7 /*TC_ACT_REDIRECT*/, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 magic"); map_fd = bpf_find_map(__func__, obj, "stats"); if (map_fd < 0) diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c index e9916f2817ec..cad664546912 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c @@ -14,6 +14,12 @@ void test_linked_funcs(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; + /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and + * are set to not autoload by default + */ + bpf_program__set_autoload(skel->progs.handler1, true); + bpf_program__set_autoload(skel->progs.handler2, true); + skel->rodata->my_tid = syscall(SYS_gettid); skel->bss->syscall_id = SYS_getpgid; diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index e469b023962b..fe9a23e65ef4 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -78,7 +78,7 @@ static void obj_load_log_buf(void) ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), "libbpf_log_not_empty"); ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); - ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_verbose"); ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), "bad_log_not_empty"); @@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void) opts.log_level = 2; fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", good_prog_insns, good_prog_insn_cnt, &opts); - ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2"); ASSERT_GE(fd, 0, "good_fd2"); if (fd >= 0) close(fd); @@ -202,7 +202,7 @@ static void bpf_btf_load_log_buf(void) const void *raw_btf_data; __u32 raw_btf_size; struct btf *btf; - char *log_buf; + char *log_buf = NULL; int fd = -1; btf = btf__new_empty(); diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c new file mode 100644 index 000000000000..f4ffdcabf4e4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_log_fixup.skel.h" + +enum trunc_type { + TRUNC_NONE, + TRUNC_PARTIAL, + TRUNC_FULL, +}; + +static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo, true); + memset(log_buf, 0, sizeof(log_buf)); + bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + "0: <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_sz> ", + "log_buf_part1"); + + switch (trunc_type) { + case TRUNC_NONE: + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + ASSERT_HAS_SUBSTR(log_buf, + "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n", + "log_buf_end"); + break; + case TRUNC_PARTIAL: + /* we should get full libbpf message patch */ + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field (0:1 @ offset 4)\n", + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + case TRUNC_FULL: + /* we shouldn't get second part of libbpf message patch */ + ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"), + "log_buf_part2"); + /* we shouldn't get full end of BPF verifier log */ + ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"), + "log_buf_end"); + break; + } + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); +cleanup: + test_log_fixup__destroy(skel); +} + +static void bad_core_relo_subprog(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_program__set_autoload(skel->progs.bad_relo_subprog, true); + bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log_buf, + ": <invalid CO-RE relocation>\n" + "failed to resolve CO-RE relocation <byte_off> ", + "log_buf"); + ASSERT_HAS_SUBSTR(log_buf, + "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +static void missing_map(void) +{ + char log_buf[8 * 1024]; + struct test_log_fixup* skel; + int err; + + skel = test_log_fixup__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bpf_map__set_autocreate(skel->maps.missing_map, false); + + bpf_program__set_autoload(skel->progs.use_missing_map, true); + bpf_program__set_log_buf(skel->progs.use_missing_map, log_buf, sizeof(log_buf)); + + err = test_log_fixup__load(skel); + if (!ASSERT_ERR(err, "load_fail")) + goto cleanup; + + ASSERT_TRUE(bpf_map__autocreate(skel->maps.existing_map), "existing_map_autocreate"); + ASSERT_FALSE(bpf_map__autocreate(skel->maps.missing_map), "missing_map_autocreate"); + + ASSERT_HAS_SUBSTR(log_buf, + "8: <invalid BPF map reference>\n" + "BPF map 'missing_map' is referenced but wasn't created\n", + "log_buf"); + + if (env.verbosity > VERBOSE_NONE) + printf("LOG: \n=================\n%s=================\n", log_buf); + +cleanup: + test_log_fixup__destroy(skel); +} + +void test_log_fixup(void) +{ + if (test__start_subtest("bad_core_relo_trunc_none")) + bad_core_relo(0, TRUNC_NONE /* full buf */); + if (test__start_subtest("bad_core_relo_trunc_partial")) + bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); + if (test__start_subtest("bad_core_relo_trunc_full")) + bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */); + if (test__start_subtest("bad_core_relo_subprog")) + bad_core_relo_subprog(); + if (test__start_subtest("missing_map")) + missing_map(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c index beebfa9730e1..a767bb4a271c 100644 --- a/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c +++ b/tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c @@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void) /* Lookup and delete element. */ key = 1; - err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void) /* Lookup and delete element. */ key = 1; - err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void) goto cleanup; /* Lookup and delete element 3. */ - err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value); + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), &value, sizeof(value), 0); if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; @@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void) value[i] = 0; /* Lookup and delete element 3. */ - err = bpf_map_lookup_and_delete_elem(map_fd, &key, value); - if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) { + err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map, + &key, sizeof(key), value, sizeof(value), 0); + if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) goto cleanup; - } /* Check if only one CPU has set the value. */ for (i = 0; i < nr_cpus; i++) { diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c new file mode 100644 index 000000000000..fdcea7a61491 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "map_kptr.skel.h" +#include "map_kptr_fail.skel.h" + +static char log_buf[1024 * 1024]; + +struct { + const char *prog_name; + const char *err_msg; +} map_kptr_fail_tests[] = { + { "size_not_bpf_dw", "kptr access size must be BPF_DW" }, + { "non_const_var_off", "kptr access cannot have variable offset" }, + { "non_const_var_off_kptr_xchg", "R1 doesn't have constant offset. kptr has to be" }, + { "misaligned_access_write", "kptr access misaligned expected=8 off=7" }, + { "misaligned_access_read", "kptr access misaligned expected=8 off=1" }, + { "reject_var_off_store", "variable untrusted_ptr_ access var_off=(0x0; 0x1e0)" }, + { "reject_bad_type_match", "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc" }, + { "marked_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "correct_btf_id_check_size", "access beyond struct prog_test_ref_kfunc at off 32 size 4" }, + { "inherit_untrusted_on_walk", "R1 type=untrusted_ptr_ expected=percpu_ptr_" }, + { "reject_kptr_xchg_on_unref", "off=8 kptr isn't referenced kptr" }, + { "reject_kptr_get_no_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_null_map_val", "arg#0 expected pointer to map value" }, + { "reject_kptr_get_no_kptr", "arg#0 no referenced kptr at map value offset=0" }, + { "reject_kptr_get_on_unref", "arg#0 no referenced kptr at map value offset=8" }, + { "reject_kptr_get_bad_type_match", "kernel function bpf_kfunc_call_test_kptr_get args#0" }, + { "mark_ref_as_untrusted_or_null", "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_" }, + { "reject_untrusted_store_to_ref", "store to referenced kptr disallowed" }, + { "reject_bad_type_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member" }, + { "reject_untrusted_xchg", "R2 type=untrusted_ptr_ expected=ptr_" }, + { "reject_member_of_ref_xchg", "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc" }, + { "reject_indirect_helper_access", "kptr cannot be accessed indirectly by helper" }, + { "reject_indirect_global_func_access", "kptr cannot be accessed indirectly by helper" }, + { "kptr_xchg_ref_state", "Unreleased reference id=5 alloc_insn=" }, + { "kptr_get_ref_state", "Unreleased reference id=3 alloc_insn=" }, +}; + +static void test_map_kptr_fail_prog(const char *prog_name, const char *err_msg) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf, + .kernel_log_size = sizeof(log_buf), + .kernel_log_level = 1); + struct map_kptr_fail *skel; + struct bpf_program *prog; + int ret; + + skel = map_kptr_fail__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "map_kptr_fail__open_opts")) + return; + + prog = bpf_object__find_program_by_name(skel->obj, prog_name); + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) + goto end; + + bpf_program__set_autoload(prog, true); + + ret = map_kptr_fail__load(skel); + if (!ASSERT_ERR(ret, "map_kptr__load must fail")) + goto end; + + if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) { + fprintf(stderr, "Expected: %s\n", err_msg); + fprintf(stderr, "Verifier: %s\n", log_buf); + } + +end: + map_kptr_fail__destroy(skel); +} + +static void test_map_kptr_fail(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(map_kptr_fail_tests); i++) { + if (!test__start_subtest(map_kptr_fail_tests[i].prog_name)) + continue; + test_map_kptr_fail_prog(map_kptr_fail_tests[i].prog_name, + map_kptr_fail_tests[i].err_msg); + } +} + +static void test_map_kptr_success(bool test_run) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct map_kptr *skel; + int key = 0, ret; + char buf[16]; + + skel = map_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref), &opts); + ASSERT_OK(ret, "test_map_kptr_ref refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref retval"); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_map_kptr_ref2), &opts); + ASSERT_OK(ret, "test_map_kptr_ref2 refcount"); + ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); + + if (test_run) + return; + + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update"); + ret = bpf_map__update_elem(skel->maps.array_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "array_map update2"); + + ret = bpf_map__update_elem(skel->maps.hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_map delete"); + + ret = bpf_map__update_elem(skel->maps.hash_malloc_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "hash_malloc_map update"); + ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "hash_malloc_map delete"); + + ret = bpf_map__update_elem(skel->maps.lru_hash_map, + &key, sizeof(key), buf, sizeof(buf), 0); + ASSERT_OK(ret, "lru_hash_map update"); + ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0); + ASSERT_OK(ret, "lru_hash_map delete"); + + map_kptr__destroy(skel); +} + +void test_map_kptr(void) +{ + if (test__start_subtest("success")) { + test_map_kptr_success(false); + /* Do test_run twice, so that we see refcount going back to 1 + * after we leave it in map from first iteration. + */ + test_map_kptr_success(true); + } + test_map_kptr_fail(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_lock.c b/tools/testing/selftests/bpf/prog_tests/map_lock.c index 23d19e9cf26a..e4e99b37df64 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/map_lock.c @@ -4,14 +4,17 @@ static void *spin_lock_thread(void *arg) { - __u32 duration, retval; int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); - err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); pthread_exit(arg); } diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c new file mode 100644 index 000000000000..bfb1bf3fd427 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Bytedance */ + +#include <test_progs.h> +#include "test_map_lookup_percpu_elem.skel.h" + +void test_map_lookup_percpu_elem(void) +{ + struct test_map_lookup_percpu_elem *skel; + __u64 key = 0, sum; + int ret, i, nr_cpus = libbpf_num_possible_cpus(); + __u64 *buf; + + buf = malloc(nr_cpus*sizeof(__u64)); + if (!ASSERT_OK_PTR(buf, "malloc")) + return; + + for (i = 0; i < nr_cpus; i++) + buf[i] = i; + sum = (nr_cpus - 1) * nr_cpus / 2; + + skel = test_map_lookup_percpu_elem__open(); + if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open")) + goto exit; + + skel->rodata->my_pid = getpid(); + skel->rodata->nr_cpus = nr_cpus; + + ret = test_map_lookup_percpu_elem__load(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__load")) + goto cleanup; + + ret = test_map_lookup_percpu_elem__attach(skel); + if (!ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach")) + goto cleanup; + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_array_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_hash_map update"); + + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0); + ASSERT_OK(ret, "percpu_lru_hash_map update"); + + syscall(__NR_getuid); + + test_map_lookup_percpu_elem__detach(skel); + + ASSERT_EQ(skel->bss->percpu_array_elem_sum, sum, "percpu_array lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_hash_elem_sum, sum, "percpu_hash lookup percpu elem"); + ASSERT_EQ(skel->bss->percpu_lru_hash_elem_sum, sum, "percpu_lru_hash lookup percpu elem"); + +cleanup: + test_map_lookup_percpu_elem__destroy(skel); +exit: + free(buf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c index 273725504f11..43e502acf050 100644 --- a/tools/testing/selftests/bpf/prog_tests/map_ptr.c +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -9,10 +9,16 @@ void test_map_ptr(void) { struct map_ptr_kern_lskel *skel; - __u32 duration = 0, retval; char buf[128]; int err; int page_size = getpagesize(); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); skel = map_ptr_kern_lskel__open(); if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -26,14 +32,12 @@ void test_map_ptr(void) skel->bss->page_size = page_size; - err = bpf_prog_test_run(skel->progs.cg_skb.prog_fd, 1, &pkt_v4, - sizeof(pkt_v4), buf, NULL, &retval, NULL); + err = bpf_prog_test_run_opts(skel->progs.cg_skb.prog_fd, &topts); - if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) + if (!ASSERT_OK(err, "test_run")) goto cleanup; - if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval, - skel->bss->g_map_type, skel->bss->g_line)) + if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) goto cleanup; cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/modify_return.c b/tools/testing/selftests/bpf/prog_tests/modify_return.c index b772fe30ce9b..5d9955af6247 100644 --- a/tools/testing/selftests/bpf/prog_tests/modify_return.c +++ b/tools/testing/selftests/bpf/prog_tests/modify_return.c @@ -15,39 +15,31 @@ static void run_test(__u32 input_retval, __u16 want_side_effect, __s16 want_ret) { struct modify_return *skel = NULL; int err, prog_fd; - __u32 duration = 0, retval; __u16 side_effect; __s16 ret; + LIBBPF_OPTS(bpf_test_run_opts, topts); skel = modify_return__open_and_load(); - if (CHECK(!skel, "skel_load", "modify_return skeleton failed\n")) + if (!ASSERT_OK_PTR(skel, "skel_load")) goto cleanup; err = modify_return__attach(skel); - if (CHECK(err, "modify_return", "attach failed: %d\n", err)) + if (!ASSERT_OK(err, "modify_return__attach failed")) goto cleanup; skel->bss->input_retval = input_retval; prog_fd = bpf_program__fd(skel->progs.fmod_ret_test); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, NULL, 0, - &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); - CHECK(err, "test_run", "err %d errno %d\n", err, errno); + side_effect = UPPER(topts.retval); + ret = LOWER(topts.retval); - side_effect = UPPER(retval); - ret = LOWER(retval); - - CHECK(ret != want_ret, "test_run", - "unexpected ret: %d, expected: %d\n", ret, want_ret); - CHECK(side_effect != want_side_effect, "modify_return", - "unexpected side_effect: %d\n", side_effect); - - CHECK(skel->bss->fentry_result != 1, "modify_return", - "fentry failed\n"); - CHECK(skel->bss->fexit_result != 1, "modify_return", - "fexit failed\n"); - CHECK(skel->bss->fmod_ret_result != 1, "modify_return", - "fmod_ret failed\n"); + ASSERT_EQ(ret, want_ret, "test_run ret"); + ASSERT_EQ(side_effect, want_side_effect, "modify_return side_effect"); + ASSERT_EQ(skel->bss->fentry_result, 1, "modify_return fentry_result"); + ASSERT_EQ(skel->bss->fexit_result, 1, "modify_return fexit_result"); + ASSERT_EQ(skel->bss->fmod_ret_result, 1, "modify_return fmod_ret_result"); cleanup: modify_return__destroy(skel); @@ -63,4 +55,3 @@ void serial_test_modify_return(void) 0 /* want_side_effect */, -EINVAL /* want_ret */); } - diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c new file mode 100644 index 000000000000..59f08d6d1d53 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Tessares SA. */ +/* Copyright (c) 2022, SUSE. */ + +#include <test_progs.h> +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "mptcp_sock.skel.h" + +#ifndef TCP_CA_NAME_MAX +#define TCP_CA_NAME_MAX 16 +#endif + +struct mptcp_storage { + __u32 invoked; + __u32 is_mptcp; + struct sock *sk; + __u32 token; + struct sock *first; + char ca_name[TCP_CA_NAME_MAX]; +}; + +static int verify_tsk(int map_fd, int client_fd) +{ + int err, cfd = client_fd; + struct mptcp_storage val; + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 0, "unexpected is_mptcp")) + err++; + + return err; +} + +static void get_msk_ca_name(char ca_name[]) +{ + size_t len; + int fd; + + fd = open("/proc/sys/net/ipv4/tcp_congestion_control", O_RDONLY); + if (!ASSERT_GE(fd, 0, "failed to open tcp_congestion_control")) + return; + + len = read(fd, ca_name, TCP_CA_NAME_MAX); + if (!ASSERT_GT(len, 0, "failed to read ca_name")) + goto err; + + if (len > 0 && ca_name[len - 1] == '\n') + ca_name[len - 1] = '\0'; + +err: + close(fd); +} + +static int verify_msk(int map_fd, int client_fd, __u32 token) +{ + char ca_name[TCP_CA_NAME_MAX]; + int err, cfd = client_fd; + struct mptcp_storage val; + + if (!ASSERT_GT(token, 0, "invalid token")) + return -1; + + get_msk_ca_name(ca_name); + + err = bpf_map_lookup_elem(map_fd, &cfd, &val); + if (!ASSERT_OK(err, "bpf_map_lookup_elem")) + return err; + + if (!ASSERT_EQ(val.invoked, 1, "unexpected invoked count")) + err++; + + if (!ASSERT_EQ(val.is_mptcp, 1, "unexpected is_mptcp")) + err++; + + if (!ASSERT_EQ(val.token, token, "unexpected token")) + err++; + + if (!ASSERT_EQ(val.first, val.sk, "unexpected first")) + err++; + + if (!ASSERT_STRNEQ(val.ca_name, ca_name, TCP_CA_NAME_MAX, "unexpected ca_name")) + err++; + + return err; +} + +static int run_test(int cgroup_fd, int server_fd, bool is_mptcp) +{ + int client_fd, prog_fd, map_fd, err; + struct mptcp_sock *sock_skel; + + sock_skel = mptcp_sock__open_and_load(); + if (!ASSERT_OK_PTR(sock_skel, "skel_open_load")) + return -EIO; + + err = mptcp_sock__attach(sock_skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + prog_fd = bpf_program__fd(sock_skel->progs._sockops); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) { + err = -EIO; + goto out; + } + + map_fd = bpf_map__fd(sock_skel->maps.socket_storage_map); + if (!ASSERT_GE(map_fd, 0, "bpf_map__fd")) { + err = -EIO; + goto out; + } + + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); + if (!ASSERT_OK(err, "bpf_prog_attach")) + goto out; + + client_fd = connect_to_fd(server_fd, 0); + if (!ASSERT_GE(client_fd, 0, "connect to fd")) { + err = -EIO; + goto out; + } + + err += is_mptcp ? verify_msk(map_fd, client_fd, sock_skel->bss->token) : + verify_tsk(map_fd, client_fd); + + close(client_fd); + +out: + mptcp_sock__destroy(sock_skel); + return err; +} + +static void test_base(void) +{ + int server_fd, cgroup_fd; + + cgroup_fd = test__join_cgroup("/mptcp"); + if (!ASSERT_GE(cgroup_fd, 0, "test__join_cgroup")) + return; + + /* without MPTCP */ + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_server")) + goto with_mptcp; + + ASSERT_OK(run_test(cgroup_fd, server_fd, false), "run_test tcp"); + + close(server_fd); + +with_mptcp: + /* with MPTCP */ + server_fd = start_mptcp_server(AF_INET, NULL, 0, 0); + if (!ASSERT_GE(server_fd, 0, "start_mptcp_server")) + goto close_cgroup_fd; + + ASSERT_OK(run_test(cgroup_fd, server_fd, true), "run_test mptcp"); + + close(server_fd); + +close_cgroup_fd: + close(cgroup_fd); +} + +void test_mptcp(void) +{ + if (test__start_subtest("base")) + test_base(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c index 954964f0ac3d..d3915c58d0e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/netcnt.c +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -25,7 +25,7 @@ void serial_test_netcnt(void) if (!ASSERT_OK_PTR(skel, "netcnt_prog__open_and_load")) return; - nproc = get_nprocs_conf(); + nproc = bpf_num_possible_cpus(); percpu_netcnt = malloc(sizeof(*percpu_netcnt) * nproc); if (!ASSERT_OK_PTR(percpu_netcnt, "malloc(percpu_netcnt)")) goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/obj_name.c b/tools/testing/selftests/bpf/prog_tests/obj_name.c index 6194b776a28b..7093edca6e08 100644 --- a/tools/testing/selftests/bpf/prog_tests/obj_name.c +++ b/tools/testing/selftests/bpf/prog_tests/obj_name.c @@ -20,7 +20,7 @@ void test_obj_name(void) __u32 duration = 0; int i; - for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { + for (i = 0; i < ARRAY_SIZE(tests); i++) { size_t name_len = strlen(tests[i].name) + 1; union bpf_attr attr; size_t ncopy; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_branches.c b/tools/testing/selftests/bpf/prog_tests/perf_branches.c index 12c4f45cee1a..bc24f83339d6 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_branches.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_branches.c @@ -110,7 +110,7 @@ static void test_perf_branches_hw(void) attr.type = PERF_TYPE_HARDWARE; attr.config = PERF_COUNT_HW_CPU_CYCLES; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; attr.sample_type = PERF_SAMPLE_BRANCH_STACK; attr.branch_sample_type = PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); @@ -151,7 +151,7 @@ static void test_perf_branches_no_hw(void) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (CHECK(pfd < 0, "perf_event_open", "err %d\n", pfd)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/perf_link.c b/tools/testing/selftests/bpf/prog_tests/perf_link.c index ede07344f264..224eba6fef2e 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_link.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_link.c @@ -39,7 +39,7 @@ void serial_test_perf_link(void) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 4000; + attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (!ASSERT_GE(pfd, 0, "perf_fd")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_access.c index 6628710ec3c6..0bcccdc34fbc 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_access.c @@ -6,23 +6,27 @@ void test_pkt_access(void) { const char *file = "./test_pkt_access.o"; struct bpf_object *obj; - __u32 duration, retval; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 100000, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; - err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv4", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4 test_run_opts err"); + ASSERT_OK(topts.retval, "ipv4 test_run_opts retval"); + + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = 0; /* reset from last call */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6 test_run_opts err"); + ASSERT_OK(topts.retval, "ipv6 test_run_opts retval"); - err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv6", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c index c9d2d6a1bfcc..00ee1dd792aa 100644 --- a/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c +++ b/tools/testing/selftests/bpf/prog_tests/pkt_md_access.c @@ -6,18 +6,20 @@ void test_pkt_md_access(void) { const char *file = "./test_pkt_md_access.o"; struct bpf_object *obj; - __u32 duration, retval; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); if (CHECK_FAIL(err)) return; - err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c new file mode 100644 index 000000000000..1ccd2bdf8fa8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_opts.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +#include "test_pkt_access.skel.h" + +static const __u32 duration; + +static void check_run_cnt(int prog_fd, __u64 run_cnt) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) + return; + + CHECK(run_cnt != info.run_cnt, "run_cnt", + "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); +} + +void test_prog_run_opts(void) +{ + struct test_pkt_access *skel; + int err, stats_fd = -1, prog_fd; + char buf[10] = {}; + __u64 run_cnt = 0; + + LIBBPF_OPTS(bpf_test_run_opts, topts, + .repeat = 1, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = 5, + ); + + stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); + if (!ASSERT_GE(stats_fd, 0, "enable_stats good fd")) + return; + + skel = test_pkt_access__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test_pkt_access); + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_EQ(errno, ENOSPC, "test_run errno"); + ASSERT_ERR(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); + + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), "test_run data_size_out"); + ASSERT_EQ(buf[5], 0, "overflow, BPF_PROG_TEST_RUN ignored size hint"); + + run_cnt += topts.repeat; + check_run_cnt(prog_fd, run_cnt); + + topts.data_out = NULL; + topts.data_size_out = 0; + topts.repeat = 2; + errno = 0; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(errno, "run_no_output errno"); + ASSERT_OK(err, "run_no_output err"); + ASSERT_OK(topts.retval, "run_no_output retval"); + + run_cnt += topts.repeat; + check_run_cnt(prog_fd, run_cnt); + +cleanup: + if (skel) + test_pkt_access__destroy(skel); + if (stats_fd >= 0) + close(stats_fd); +} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c deleted file mode 100644 index 89fc98faf19e..000000000000 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c +++ /dev/null @@ -1,83 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <test_progs.h> -#include <network_helpers.h> - -#include "test_pkt_access.skel.h" - -static const __u32 duration; - -static void check_run_cnt(int prog_fd, __u64 run_cnt) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - int err; - - err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); - if (CHECK(err, "get_prog_info", "failed to get bpf_prog_info for fd %d\n", prog_fd)) - return; - - CHECK(run_cnt != info.run_cnt, "run_cnt", - "incorrect number of repetitions, want %llu have %llu\n", run_cnt, info.run_cnt); -} - -void test_prog_run_xattr(void) -{ - struct test_pkt_access *skel; - int err, stats_fd = -1; - char buf[10] = {}; - __u64 run_cnt = 0; - - struct bpf_prog_test_run_attr tattr = { - .repeat = 1, - .data_in = &pkt_v4, - .data_size_in = sizeof(pkt_v4), - .data_out = buf, - .data_size_out = 5, - }; - - stats_fd = bpf_enable_stats(BPF_STATS_RUN_TIME); - if (CHECK_ATTR(stats_fd < 0, "enable_stats", "failed %d\n", errno)) - return; - - skel = test_pkt_access__open_and_load(); - if (CHECK_ATTR(!skel, "open_and_load", "failed\n")) - goto cleanup; - - tattr.prog_fd = bpf_program__fd(skel->progs.test_pkt_access); - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err >= 0 || errno != ENOSPC || tattr.retval, "run", - "err %d errno %d retval %d\n", err, errno, tattr.retval); - - CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", - "incorrect output size, want %zu have %u\n", - sizeof(pkt_v4), tattr.data_size_out); - - CHECK_ATTR(buf[5] != 0, "overflow", - "BPF_PROG_TEST_RUN ignored size hint\n"); - - run_cnt += tattr.repeat; - check_run_cnt(tattr.prog_fd, run_cnt); - - tattr.data_out = NULL; - tattr.data_size_out = 0; - tattr.repeat = 2; - errno = 0; - - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err || errno || tattr.retval, "run_no_output", - "err %d errno %d retval %d\n", err, errno, tattr.retval); - - tattr.data_size_out = 1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err); - - run_cnt += tattr.repeat; - check_run_cnt(tattr.prog_fd, run_cnt); - -cleanup: - if (skel) - test_pkt_access__destroy(skel); - if (stats_fd >= 0) - close(stats_fd); -} diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c new file mode 100644 index 000000000000..14f2796076e0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +#include "test_progs.h" +#include "testing_helpers.h" + +static void clear_test_state(struct test_state *state) +{ + state->error_cnt = 0; + state->sub_succ_cnt = 0; + state->skip_cnt = 0; +} + +void test_prog_tests_framework(void) +{ + struct test_state *state = env.test_state; + + /* in all the ASSERT calls below we need to return on the first + * error due to the fact that we are cleaning the test state after + * each dummy subtest + */ + + /* test we properly count skipped tests with subtests */ + if (test__start_subtest("test_good_subtest")) + test__end_subtest(); + if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (test__start_subtest("test_skip_subtest")) { + test__skip(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check")) + return; + clear_test_state(state); + + if (test__start_subtest("test_fail_subtest")) { + test__fail(); + test__end_subtest(); + } + if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check")) + return; + if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check")) + return; + clear_test_state(state); +} diff --git a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c index b9822f914eeb..d2743fc10032 100644 --- a/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c +++ b/tools/testing/selftests/bpf/prog_tests/queue_stack_map.c @@ -10,11 +10,18 @@ enum { static void test_queue_stack_map_by_type(int type) { const int MAP_SIZE = 32; - __u32 vals[MAP_SIZE], duration, retval, size, val; + __u32 vals[MAP_SIZE], val; int i, err, prog_fd, map_in_fd, map_out_fd; char file[32], buf[128]; struct bpf_object *obj; struct iphdr iph; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); /* Fill test values to be used */ for (i = 0; i < MAP_SIZE; i++) @@ -58,38 +65,37 @@ static void test_queue_stack_map_by_type(int type) pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5; } - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - if (err || retval || size != sizeof(pkt_v4)) + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (err || topts.retval || + topts.data_size_out != sizeof(pkt_v4)) break; memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); if (iph.daddr != val) break; } - CHECK(err || retval || size != sizeof(pkt_v4) || iph.daddr != val, - "bpf_map_pop_elem", - "err %d errno %d retval %d size %d iph->daddr %u\n", - err, errno, retval, size, iph.daddr); + ASSERT_OK(err, "bpf_map_pop_elem"); + ASSERT_OK(topts.retval, "bpf_map_pop_elem test retval"); + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), + "bpf_map_pop_elem data_size_out"); + ASSERT_EQ(iph.daddr, val, "bpf_map_pop_elem iph.daddr"); /* Queue is empty, program should return TC_ACT_SHOT */ - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 2 /* TC_ACT_SHOT */|| size != sizeof(pkt_v4), - "check-queue-stack-map-empty", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "check-queue-stack-map-empty"); + ASSERT_EQ(topts.retval, 2 /* TC_ACT_SHOT */, + "check-queue-stack-map-empty test retval"); + ASSERT_EQ(topts.data_size_out, sizeof(pkt_v4), + "check-queue-stack-map-empty data_size_out"); /* Check that the program pushed elements correctly */ for (i = 0; i < MAP_SIZE; i++) { err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val); - if (err || val != vals[i] * 5) - break; + ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"); + ASSERT_EQ(val, vals[i] * 5, "bpf_map_push_elem val"); } - - CHECK(i != MAP_SIZE && (err || val != vals[i] * 5), - "bpf_map_push_elem", "err %d value %u\n", err, val); - out: pkt_v4.iph.saddr = 0; bpf_object__close(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c index 41720a62c4fa..fe5b8fae2c36 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c @@ -5,59 +5,54 @@ #include "bpf/libbpf_internal.h" #include "test_raw_tp_test_run.skel.h" -static int duration; - void test_raw_tp_test_run(void) { - struct bpf_prog_test_run_attr test_attr = {}; int comm_fd = -1, err, nr_online, i, prog_fd; __u64 args[2] = {0x1234ULL, 0x5678ULL}; int expected_retval = 0x1234 + 0x5678; struct test_raw_tp_test_run *skel; char buf[] = "new_name"; bool *online = NULL; - DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, - .ctx_in = args, - .ctx_size_in = sizeof(args), - .flags = BPF_F_TEST_RUN_ON_CPU, - ); + LIBBPF_OPTS(bpf_test_run_opts, opts, + .ctx_in = args, + .ctx_size_in = sizeof(args), + .flags = BPF_F_TEST_RUN_ON_CPU, + ); err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online, &nr_online); - if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err)) + if (!ASSERT_OK(err, "parse_cpu_mask_file")) return; skel = test_raw_tp_test_run__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "skel_open")) goto cleanup; err = test_raw_tp_test_run__attach(skel); - if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + if (!ASSERT_OK(err, "skel_attach")) goto cleanup; comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); - if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno)) + if (!ASSERT_GE(comm_fd, 0, "open /proc/self/comm")) goto cleanup; err = write(comm_fd, buf, sizeof(buf)); - CHECK(err < 0, "task rename", "err %d", errno); + ASSERT_GE(err, 0, "task rename"); - CHECK(skel->bss->count == 0, "check_count", "didn't increase\n"); - CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n"); + ASSERT_NEQ(skel->bss->count, 0, "check_count"); + ASSERT_EQ(skel->data->on_cpu, 0xffffffff, "check_on_cpu"); prog_fd = bpf_program__fd(skel->progs.rename); - test_attr.prog_fd = prog_fd; - test_attr.ctx_in = args; - test_attr.ctx_size_in = sizeof(__u64); + opts.ctx_in = args; + opts.ctx_size_in = sizeof(__u64); - err = bpf_prog_test_run_xattr(&test_attr); - CHECK(err == 0, "test_run", "should fail for too small ctx\n"); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_NEQ(err, 0, "test_run should fail for too small ctx"); - test_attr.ctx_size_in = sizeof(args); - err = bpf_prog_test_run_xattr(&test_attr); - CHECK(err < 0, "test_run", "err %d\n", errno); - CHECK(test_attr.retval != expected_retval, "check_retval", - "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval); + opts.ctx_size_in = sizeof(args); + err = bpf_prog_test_run_opts(prog_fd, &opts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); for (i = 0; i < nr_online; i++) { if (!online[i]) @@ -66,28 +61,23 @@ void test_raw_tp_test_run(void) opts.cpu = i; opts.retval = 0; err = bpf_prog_test_run_opts(prog_fd, &opts); - CHECK(err < 0, "test_run_opts", "err %d\n", errno); - CHECK(skel->data->on_cpu != i, "check_on_cpu", - "expect %d got %d\n", i, skel->data->on_cpu); - CHECK(opts.retval != expected_retval, - "check_retval", "expect 0x%x, got 0x%x\n", - expected_retval, opts.retval); + ASSERT_OK(err, "test_run_opts"); + ASSERT_EQ(skel->data->on_cpu, i, "check_on_cpu"); + ASSERT_EQ(opts.retval, expected_retval, "check_retval"); } /* invalid cpu ID should fail with ENXIO */ opts.cpu = 0xffffffff; err = bpf_prog_test_run_opts(prog_fd, &opts); - CHECK(err >= 0 || errno != ENXIO, - "test_run_opts_fail", - "should failed with ENXIO\n"); + ASSERT_EQ(errno, ENXIO, "test_run_opts should fail with ENXIO"); + ASSERT_ERR(err, "test_run_opts_fail"); /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */ opts.cpu = 1; opts.flags = 0; err = bpf_prog_test_run_opts(prog_fd, &opts); - CHECK(err >= 0 || errno != EINVAL, - "test_run_opts_fail", - "should failed with EINVAL\n"); + ASSERT_EQ(errno, EINVAL, "test_run_opts should fail with EINVAL"); + ASSERT_ERR(err, "test_run_opts_fail"); cleanup: close(comm_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c index 239baccabccb..f4aa7dab4766 100644 --- a/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c @@ -56,21 +56,23 @@ void serial_test_raw_tp_writable_test_run(void) 0, }; - __u32 prog_ret; - int err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, - 0, &prog_ret, 0); + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = test_skb, + .data_size_in = sizeof(test_skb), + .repeat = 1, + ); + int err = bpf_prog_test_run_opts(filter_fd, &topts); CHECK(err != 42, "test_run", "tracepoint did not modify return value\n"); - CHECK(prog_ret != 0, "test_run_ret", + CHECK(topts.retval != 0, "test_run_ret", "socket_filter did not return 0\n"); close(tp_fd); - err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, 0, - &prog_ret, 0); + err = bpf_prog_test_run_opts(filter_fd, &topts); CHECK(err != 0, "test_run_notrace", "test_run failed with %d errno %d\n", err, errno); - CHECK(prog_ret != 0, "test_run_ret_notrace", + CHECK(topts.retval != 0, "test_run_ret_notrace", "socket_filter did not return 0\n"); out_filterfd: diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c index 873323fb18ba..739d2ea6ca55 100644 --- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c +++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c @@ -1,21 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> -static void toggle_object_autoload_progs(const struct bpf_object *obj, - const char *name_load) -{ - struct bpf_program *prog; - - bpf_object__for_each_program(prog, obj) { - const char *name = bpf_program__name(prog); - - if (!strcmp(name_load, name)) - bpf_program__set_autoload(prog, true); - else - bpf_program__set_autoload(prog, false); - } -} - void test_reference_tracking(void) { const char *file = "test_sk_lookup_kern.o"; @@ -39,6 +24,7 @@ void test_reference_tracking(void) goto cleanup; bpf_object__for_each_program(prog, obj_iter) { + struct bpf_program *p; const char *name; name = bpf_program__name(prog); @@ -49,7 +35,12 @@ void test_reference_tracking(void) if (!ASSERT_OK_PTR(obj, "obj_open_file")) goto cleanup; - toggle_object_autoload_progs(obj, name); + /* all programs are not loaded by default, so just set + * autoload to true for the single prog under test + */ + p = bpf_object__find_program_by_name(obj, name); + bpf_program__set_autoload(p, true); + /* Expect verifier failure if test name has 'err' */ if (strncmp(name, "err_", sizeof("err_") - 1) == 0) { libbpf_print_fn_t old_print_fn; diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c index e945195b24c9..eb5f7f5aa81a 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c @@ -50,18 +50,6 @@ void test_ringbuf_multi(void) if (CHECK(!skel, "skel_open", "skeleton open failed\n")) return; - err = bpf_map__set_max_entries(skel->maps.ringbuf1, page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - - err = bpf_map__set_max_entries(skel->maps.ringbuf2, page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - - err = bpf_map__set_max_entries(bpf_map__inner_map(skel->maps.ringbuf_arr), page_size); - if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n")) - goto cleanup; - proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL); if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 776916b61c40..d71226e34c34 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -4,11 +4,11 @@ #include <sys/resource.h> #include "test_send_signal_kern.skel.h" -int sigusr1_received = 0; +static int sigusr1_received; static void sigusr1_handler(int signum) { - sigusr1_received++; + sigusr1_received = 1; } static void test_send_signal_common(struct perf_event_attr *attr, @@ -40,9 +40,10 @@ static void test_send_signal_common(struct perf_event_attr *attr, if (pid == 0) { int old_prio; + volatile int j = 0; /* install signal handler and notify parent */ - signal(SIGUSR1, sigusr1_handler); + ASSERT_NEQ(signal(SIGUSR1, sigusr1_handler), SIG_ERR, "signal"); close(pipe_c2p[0]); /* close read */ close(pipe_p2c[1]); /* close write */ @@ -63,9 +64,11 @@ static void test_send_signal_common(struct perf_event_attr *attr, ASSERT_EQ(read(pipe_p2c[0], buf, 1), 1, "pipe_read"); /* wait a little for signal handler */ - sleep(1); + for (int i = 0; i < 100000000 && !sigusr1_received; i++) + j /= i + j + 1; buf[0] = sigusr1_received ? '2' : '0'; + ASSERT_EQ(sigusr1_received, 1, "sigusr1_received"); ASSERT_EQ(write(pipe_c2p[1], buf, 1), 1, "pipe_write"); /* wait for parent notification and exit */ @@ -93,7 +96,7 @@ static void test_send_signal_common(struct perf_event_attr *attr, goto destroy_skel; } } else { - pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1, + pmu_fd = syscall(__NR_perf_event_open, attr, pid, -1 /* cpu */, -1 /* group id */, 0 /* flags */); if (!ASSERT_GE(pmu_fd, 0, "perf_event_open")) { err = -1; @@ -110,9 +113,9 @@ static void test_send_signal_common(struct perf_event_attr *attr, ASSERT_EQ(read(pipe_c2p[0], buf, 1), 1, "pipe_read"); /* trigger the bpf send_signal */ - skel->bss->pid = pid; - skel->bss->sig = SIGUSR1; skel->bss->signal_thread = signal_thread; + skel->bss->sig = SIGUSR1; + skel->bss->pid = pid; /* notify child that bpf program can send_signal now */ ASSERT_EQ(write(pipe_p2c[1], buf, 1), 1, "pipe_write"); diff --git a/tools/testing/selftests/bpf/prog_tests/signal_pending.c b/tools/testing/selftests/bpf/prog_tests/signal_pending.c index aecfe662c070..70b49da5ca0a 100644 --- a/tools/testing/selftests/bpf/prog_tests/signal_pending.c +++ b/tools/testing/selftests/bpf/prog_tests/signal_pending.c @@ -13,10 +13,14 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) struct itimerval timeo = { .it_value.tv_usec = 100000, /* 100ms */ }; - __u32 duration = 0, retval; int prog_fd; int err; int i; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 0xffffffff, + ); for (i = 0; i < ARRAY_SIZE(prog); i++) prog[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0); @@ -24,20 +28,17 @@ static void test_signal_pending_by_type(enum bpf_prog_type prog_type) prog_fd = bpf_test_load_program(prog_type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0); - CHECK(prog_fd < 0, "test-run", "errno %d\n", errno); + ASSERT_GE(prog_fd, 0, "test-run load"); err = sigaction(SIGALRM, &sigalrm_action, NULL); - CHECK(err, "test-run-signal-sigaction", "errno %d\n", errno); + ASSERT_OK(err, "test-run-signal-sigaction"); err = setitimer(ITIMER_REAL, &timeo, NULL); - CHECK(err, "test-run-signal-timer", "errno %d\n", errno); + ASSERT_OK(err, "test-run-signal-timer"); - err = bpf_prog_test_run(prog_fd, 0xffffffff, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(duration > 500000000, /* 500ms */ - "test-run-signal-duration", - "duration %dns > 500ms\n", - duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_LE(topts.duration, 500000000 /* 500ms */, + "test-run-signal-duration"); signal(SIGALRM, SIG_DFL); } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index b5319ba2ee27..ce0e555b5e38 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -20,97 +20,72 @@ void test_skb_ctx(void) .gso_size = 10, .hwtstamp = 11, }; - struct bpf_prog_test_run_attr tattr = { + LIBBPF_OPTS(bpf_test_run_opts, tattr, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .ctx_in = &skb, .ctx_size_in = sizeof(skb), .ctx_out = &skb, .ctx_size_out = sizeof(skb), - }; + ); struct bpf_object *obj; - int err; - int i; + int err, prog_fd, i; - err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &tattr.prog_fd); - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + err = bpf_prog_test_load("./test_skb_ctx.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &prog_fd); + if (!ASSERT_OK(err, "load")) return; /* ctx_in != NULL, ctx_size_in == 0 */ tattr.ctx_size_in = 0; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "ctx_size_in", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "ctx_size_in"); tattr.ctx_size_in = sizeof(skb); /* ctx_out != NULL, ctx_size_out == 0 */ tattr.ctx_size_out = 0; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "ctx_size_out", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "ctx_size_out"); tattr.ctx_size_out = sizeof(skb); /* non-zero [len, tc_index] fields should be rejected*/ skb.len = 1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "len", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "len"); skb.len = 0; skb.tc_index = 1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "tc_index", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "tc_index"); skb.tc_index = 0; /* non-zero [hash, sk] fields should be rejected */ skb.hash = 1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "hash", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "hash"); skb.hash = 0; skb.sk = (struct bpf_sock *)1; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err == 0, "sk", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_NEQ(err, 0, "sk"); skb.sk = 0; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err != 0 || tattr.retval, - "run", - "err %d errno %d retval %d\n", - err, errno, tattr.retval); - - CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), - "ctx_size_out", - "incorrect output size, want %zu have %u\n", - sizeof(skb), tattr.ctx_size_out); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + ASSERT_OK(err, "test_run"); + ASSERT_OK(tattr.retval, "test_run retval"); + ASSERT_EQ(tattr.ctx_size_out, sizeof(skb), "ctx_size_out"); for (i = 0; i < 5; i++) - CHECK_ATTR(skb.cb[i] != i + 2, - "ctx_out_cb", - "skb->cb[i] == %d, expected %d\n", - skb.cb[i], i + 2); - CHECK_ATTR(skb.priority != 7, - "ctx_out_priority", - "skb->priority == %d, expected %d\n", - skb.priority, 7); - CHECK_ATTR(skb.ifindex != 1, - "ctx_out_ifindex", - "skb->ifindex == %d, expected %d\n", - skb.ifindex, 1); - CHECK_ATTR(skb.ingress_ifindex != 11, - "ctx_out_ingress_ifindex", - "skb->ingress_ifindex == %d, expected %d\n", - skb.ingress_ifindex, 11); - CHECK_ATTR(skb.tstamp != 8, - "ctx_out_tstamp", - "skb->tstamp == %lld, expected %d\n", - skb.tstamp, 8); - CHECK_ATTR(skb.mark != 10, - "ctx_out_mark", - "skb->mark == %u, expected %d\n", - skb.mark, 10); + ASSERT_EQ(skb.cb[i], i + 2, "ctx_out_cb"); + ASSERT_EQ(skb.priority, 7, "ctx_out_priority"); + ASSERT_EQ(skb.ifindex, 1, "ctx_out_ifindex"); + ASSERT_EQ(skb.ingress_ifindex, 11, "ctx_out_ingress_ifindex"); + ASSERT_EQ(skb.tstamp, 8, "ctx_out_tstamp"); + ASSERT_EQ(skb.mark, 10, "ctx_out_mark"); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c index 6f802a1c0800..97dc8b14be48 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_helpers.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_helpers.c @@ -9,22 +9,22 @@ void test_skb_helpers(void) .gso_segs = 8, .gso_size = 10, }; - struct bpf_prog_test_run_attr tattr = { + LIBBPF_OPTS(bpf_test_run_opts, topts, .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .ctx_in = &skb, .ctx_size_in = sizeof(skb), .ctx_out = &skb, .ctx_size_out = sizeof(skb), - }; + ); struct bpf_object *obj; - int err; + int err, prog_fd; - err = bpf_prog_test_load("./test_skb_helpers.o", BPF_PROG_TYPE_SCHED_CLS, &obj, - &tattr.prog_fd); - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + err = bpf_prog_test_load("./test_skb_helpers.o", + BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); + if (!ASSERT_OK(err, "load")) return; - err = bpf_prog_test_run_xattr(&tattr); - CHECK_ATTR(err, "len", "err %d errno %d\n", err, errno); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c new file mode 100644 index 000000000000..d7f83c0a40a5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include "skb_load_bytes.skel.h" + +void test_skb_load_bytes(void) +{ + struct skb_load_bytes *skel; + int err, prog_fd, test_result; + struct __sk_buff skb = { 0 }; + + LIBBPF_OPTS(bpf_test_run_opts, tattr, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + ); + + skel = skb_load_bytes__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + prog_fd = bpf_program__fd(skel->progs.skb_process); + if (!ASSERT_GE(prog_fd, 0, "prog_fd")) + goto out; + + skel->bss->load_offset = (uint32_t)(-1); + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, -EFAULT, "offset -1")) + goto out; + + skel->bss->load_offset = (uint32_t)10; + err = bpf_prog_test_run_opts(prog_fd, &tattr); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto out; + test_result = skel->bss->test_result; + if (!ASSERT_EQ(test_result, 0, "offset 10")) + goto out; + +out: + skb_load_bytes__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index 394ebfc3bbf3..4be6fdb78c6a 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -83,8 +83,6 @@ cleanup: test_snprintf__destroy(skel); } -#define min(a, b) ((a) < (b) ? (a) : (b)) - /* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */ static int load_single_snprintf(char *fmt) { @@ -95,7 +93,7 @@ static int load_single_snprintf(char *fmt) if (!skel) return -EINVAL; - memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10)); + memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10)); ret = test_snprintf_single__load(skel); test_snprintf_single__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c index 9fc040eaa482..9d211b5c22c4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_fields.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c @@ -1,9 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ +#define _GNU_SOURCE #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> +#include <sched.h> #include <stdlib.h> #include <string.h> #include <errno.h> @@ -20,6 +22,7 @@ enum bpf_linum_array_idx { EGRESS_LINUM_IDX, INGRESS_LINUM_IDX, + READ_SK_DST_PORT_LINUM_IDX, __NR_BPF_LINUM_ARRAY_IDX, }; @@ -42,8 +45,16 @@ static __u64 child_cg_id; static int linum_map_fd; static __u32 duration; -static __u32 egress_linum_idx = EGRESS_LINUM_IDX; -static __u32 ingress_linum_idx = INGRESS_LINUM_IDX; +static bool create_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return false; + + if (!ASSERT_OK(system("ip link set dev lo up"), "bring up lo")) + return false; + + return true; +} static void print_sk(const struct bpf_sock *sk, const char *prefix) { @@ -91,19 +102,24 @@ static void check_result(void) { struct bpf_tcp_sock srv_tp, cli_tp, listen_tp; struct bpf_sock srv_sk, cli_sk, listen_sk; - __u32 ingress_linum, egress_linum; + __u32 idx, ingress_linum, egress_linum, linum; int err; - err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx, - &egress_linum); + idx = EGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &egress_linum); CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)", "err:%d errno:%d\n", err, errno); - err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx, - &ingress_linum); + idx = INGRESS_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &ingress_linum); CHECK(err < 0, "bpf_map_lookup_elem(linum_map_fd)", "err:%d errno:%d\n", err, errno); + idx = READ_SK_DST_PORT_LINUM_IDX; + err = bpf_map_lookup_elem(linum_map_fd, &idx, &linum); + ASSERT_OK(err, "bpf_map_lookup_elem(linum_map_fd, READ_SK_DST_PORT_IDX)"); + ASSERT_EQ(linum, 0, "failure in read_sk_dst_port on line"); + memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk)); memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp)); memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk)); @@ -262,7 +278,7 @@ static void test(void) char buf[DATA_LEN]; /* Prepare listen_fd */ - listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); + listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0xcafe, 0); /* start_server() has logged the error details */ if (CHECK_FAIL(listen_fd == -1)) goto done; @@ -330,8 +346,12 @@ done: void serial_test_sock_fields(void) { - struct bpf_link *egress_link = NULL, *ingress_link = NULL; int parent_cg_fd = -1, child_cg_fd = -1; + struct bpf_link *link; + + /* Use a dedicated netns to have a fixed listen port */ + if (!create_netns()) + return; /* Create a cgroup, get fd, and join it */ parent_cg_fd = test__join_cgroup(PARENT_CGROUP); @@ -352,15 +372,20 @@ void serial_test_sock_fields(void) if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n")) goto done; - egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, - child_cg_fd); - if (!ASSERT_OK_PTR(egress_link, "attach_cgroup(egress)")) + link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(egress_read_sock_fields)")) + goto done; + skel->links.egress_read_sock_fields = link; + + link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(ingress_read_sock_fields)")) goto done; + skel->links.ingress_read_sock_fields = link; - ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields, - child_cg_fd); - if (!ASSERT_OK_PTR(ingress_link, "attach_cgroup(ingress)")) + link = bpf_program__attach_cgroup(skel->progs.read_sk_dst_port, child_cg_fd); + if (!ASSERT_OK_PTR(link, "attach_cgroup(read_sk_dst_port")) goto done; + skel->links.read_sk_dst_port = link; linum_map_fd = bpf_map__fd(skel->maps.linum_map); sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt); @@ -369,8 +394,7 @@ void serial_test_sock_fields(void) test(); done: - bpf_link__destroy(egress_link); - bpf_link__destroy(ingress_link); + test_sock_fields__detach(skel); test_sock_fields__destroy(skel); if (child_cg_fd >= 0) close(child_cg_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c index 85db0f4cdd95..cec5c0882372 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c @@ -8,6 +8,7 @@ #include "test_sockmap_update.skel.h" #include "test_sockmap_invalid_update.skel.h" #include "test_sockmap_skb_verdict_attach.skel.h" +#include "test_sockmap_progs_query.skel.h" #include "bpf_iter_sockmap.skel.h" #define TCP_REPAIR 19 /* TCP sock is under repair right now */ @@ -139,12 +140,16 @@ out: static void test_sockmap_update(enum bpf_map_type map_type) { - struct bpf_prog_test_run_attr tattr; int err, prog, src, duration = 0; struct test_sockmap_update *skel; struct bpf_map *dst_map; const __u32 zero = 0; char dummy[14] = {0}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = dummy, + .data_size_in = sizeof(dummy), + .repeat = 1, + ); __s64 sk; sk = connected_socket_v4(); @@ -166,16 +171,10 @@ static void test_sockmap_update(enum bpf_map_type map_type) if (CHECK(err, "update_elem(src)", "errno=%u\n", errno)) goto out; - tattr = (struct bpf_prog_test_run_attr){ - .prog_fd = prog, - .repeat = 1, - .data_in = dummy, - .data_size_in = sizeof(dummy), - }; - - err = bpf_prog_test_run_xattr(&tattr); - if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run", - "errno=%u retval=%u\n", errno, tattr.retval)) + err = bpf_prog_test_run_opts(prog, &topts); + if (!ASSERT_OK(err, "test_run")) + goto out; + if (!ASSERT_NEQ(topts.retval, 0, "test_run retval")) goto out; compare_cookies(skel->maps.src, dst_map); @@ -315,6 +314,63 @@ out: test_sockmap_skb_verdict_attach__destroy(skel); } +static __u32 query_prog_id(int prog_fd) +{ + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + int err; + + err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd") || + !ASSERT_EQ(info_len, sizeof(info), "bpf_obj_get_info_by_fd")) + return 0; + + return info.id; +} + +static void test_sockmap_progs_query(enum bpf_attach_type attach_type) +{ + struct test_sockmap_progs_query *skel; + int err, map_fd, verdict_fd; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + + skel = test_sockmap_progs_query__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.sock_map); + + if (attach_type == BPF_SK_MSG_VERDICT) + verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict); + else + verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict); + + err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + ASSERT_OK(err, "bpf_prog_query failed"); + ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); + ASSERT_EQ(prog_cnt, 0, "wrong program count on query"); + + err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0); + if (!ASSERT_OK(err, "bpf_prog_attach failed")) + goto out; + + prog_cnt = 1; + err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + ASSERT_OK(err, "bpf_prog_query failed"); + ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query"); + ASSERT_EQ(prog_cnt, 1, "wrong program count on query"); + ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd), + "wrong prog_ids on query"); + + bpf_prog_detach2(verdict_fd, map_fd, attach_type); +out: + test_sockmap_progs_query__destroy(skel); +} + void test_sockmap_basic(void) { if (test__start_subtest("sockmap create_update_free")) @@ -341,4 +397,12 @@ void test_sockmap_basic(void) test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT, BPF_SK_SKB_VERDICT); } + if (test__start_subtest("sockmap msg_verdict progs query")) + test_sockmap_progs_query(BPF_SK_MSG_VERDICT); + if (test__start_subtest("sockmap stream_parser progs query")) + test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER); + if (test__start_subtest("sockmap stream_verdict progs query")) + test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT); + if (test__start_subtest("sockmap skb_verdict progs query")) + test_sockmap_progs_query(BPF_SK_SKB_VERDICT); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index af293ea1542c..e172d89e92e1 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -4,6 +4,7 @@ * Tests for sockmap/sockhash holding kTLS sockets. */ +#include <netinet/tcp.h> #include "test_progs.h" #define MAX_TEST_NAME 80 @@ -92,9 +93,78 @@ close_srv: close(srv); } +static void test_sockmap_ktls_update_fails_when_sock_has_ulp(int family, int map) +{ + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + struct sockaddr_in6 *v6; + struct sockaddr_in *v4; + int err, s, zero = 0; + + switch (family) { + case AF_INET: + v4 = (struct sockaddr_in *)&addr; + v4->sin_family = AF_INET; + break; + case AF_INET6: + v6 = (struct sockaddr_in6 *)&addr; + v6->sin6_family = AF_INET6; + break; + default: + PRINT_FAIL("unsupported socket family %d", family); + return; + } + + s = socket(family, SOCK_STREAM, 0); + if (!ASSERT_GE(s, 0, "socket")) + return; + + err = bind(s, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "bind")) + goto close; + + err = getsockname(s, (struct sockaddr *)&addr, &len); + if (!ASSERT_OK(err, "getsockname")) + goto close; + + err = connect(s, (struct sockaddr *)&addr, len); + if (!ASSERT_OK(err, "connect")) + goto close; + + /* save sk->sk_prot and set it to tls_prots */ + err = setsockopt(s, IPPROTO_TCP, TCP_ULP, "tls", strlen("tls")); + if (!ASSERT_OK(err, "setsockopt(TCP_ULP)")) + goto close; + + /* sockmap update should not affect saved sk_prot */ + err = bpf_map_update_elem(map, &zero, &s, BPF_ANY); + if (!ASSERT_ERR(err, "sockmap update elem")) + goto close; + + /* call sk->sk_prot->setsockopt to dispatch to saved sk_prot */ + err = setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &zero, sizeof(zero)); + ASSERT_OK(err, "setsockopt(TCP_NODELAY)"); + +close: + close(s); +} + +static const char *fmt_test_name(const char *subtest_name, int family, + enum bpf_map_type map_type) +{ + const char *map_type_str = BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH"; + const char *family_str = AF_INET ? "IPv4" : "IPv6"; + static char test_name[MAX_TEST_NAME]; + + snprintf(test_name, MAX_TEST_NAME, + "sockmap_ktls %s %s %s", + subtest_name, family_str, map_type_str); + + return test_name; +} + static void run_tests(int family, enum bpf_map_type map_type) { - char test_name[MAX_TEST_NAME]; int map; map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL); @@ -103,14 +173,10 @@ static void run_tests(int family, enum bpf_map_type map_type) return; } - snprintf(test_name, MAX_TEST_NAME, - "sockmap_ktls disconnect_after_delete %s %s", - family == AF_INET ? "IPv4" : "IPv6", - map_type == BPF_MAP_TYPE_SOCKMAP ? "SOCKMAP" : "SOCKHASH"); - if (!test__start_subtest(test_name)) - return; - - test_sockmap_ktls_disconnect_after_delete(family, map); + if (test__start_subtest(fmt_test_name("disconnect_after_delete", family, map_type))) + test_sockmap_ktls_disconnect_after_delete(family, map); + if (test__start_subtest(fmt_test_name("update_fails_when_sock_has_ulp", family, map_type))) + test_sockmap_ktls_update_fails_when_sock_has_ulp(family, map); close(map); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c index 7e21bfab6358..2cf0c7a3fe23 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c @@ -1413,14 +1413,12 @@ close_srv1: static void test_ops_cleanup(const struct bpf_map *map) { - const struct bpf_map_def *def; int err, mapfd; u32 key; - def = bpf_map__def(map); mapfd = bpf_map__fd(map); - for (key = 0; key < def->max_entries; key++) { + for (key = 0; key < bpf_map__max_entries(map); key++) { err = bpf_map_delete_elem(mapfd, &key); if (err && errno != EINVAL && errno != ENOENT) FAIL_ERRNO("map_delete: expected EINVAL/ENOENT"); @@ -1443,13 +1441,13 @@ static const char *family_str(sa_family_t family) static const char *map_type_str(const struct bpf_map *map) { - const struct bpf_map_def *def; + int type; - def = bpf_map__def(map); - if (IS_ERR(def)) + if (!map) return "invalid"; + type = bpf_map__type(map); - switch (def->type) { + switch (type) { case BPF_MAP_TYPE_SOCKMAP: return "sockmap"; case BPF_MAP_TYPE_SOCKHASH: diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c index 4b937e5dbaca..30a99d2ed5c6 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c @@ -173,11 +173,11 @@ static int getsetsockopt(void) } memset(&buf, 0, sizeof(buf)); - buf.zc.address = 12345; /* rejected by BPF */ + buf.zc.address = 12345; /* Not page aligned. Rejected by tcp_zerocopy_receive() */ optlen = sizeof(buf.zc); errno = 0; err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen); - if (errno != EPERM) { + if (errno != EINVAL) { log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d", err, errno); goto err; diff --git a/tools/testing/selftests/bpf/prog_tests/spinlock.c b/tools/testing/selftests/bpf/prog_tests/spinlock.c index 6307f5d2b417..8e329eaee6d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/spinlock.c +++ b/tools/testing/selftests/bpf/prog_tests/spinlock.c @@ -4,14 +4,16 @@ static void *spin_lock_thread(void *arg) { - __u32 duration, retval; int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); - err = bpf_prog_test_run(prog_fd, 10000, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_OK(topts.retval, "test_run retval"); pthread_exit(arg); } diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c index e8399ae50e77..9ad09a6c538a 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c @@ -8,7 +8,7 @@ void test_stacktrace_build_id(void) int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd; struct test_stacktrace_build_id *skel; int err, stack_trace_len; - __u32 key, previous_key, val, duration = 0; + __u32 key, prev_key, val, duration = 0; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -58,7 +58,7 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -79,8 +79,8 @@ retry: if (strstr(buf, build_id) != NULL) build_id_matches = 1; } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); /* stack_map_get_build_id_offset() is racy and sometimes can return * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c index 0a91d8d9954b..f4ea1a215ce4 100644 --- a/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c @@ -27,7 +27,7 @@ void test_stacktrace_build_id_nmi(void) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, }; - __u32 key, previous_key, val, duration = 0; + __u32 key, prev_key, val, duration = 0; char buf[256]; int i, j; struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; @@ -42,7 +42,7 @@ retry: return; /* override program type */ - bpf_program__set_perf_event(skel->progs.oncpu); + bpf_program__set_type(skel->progs.oncpu, BPF_PROG_TYPE_PERF_EVENT); err = test_stacktrace_build_id__load(skel); if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) @@ -100,7 +100,7 @@ retry: "err %d errno %d\n", err, errno)) goto cleanup; - err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + err = bpf_map__get_next_key(skel->maps.stackmap, NULL, &key, sizeof(key)); if (CHECK(err, "get_next_key from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -108,7 +108,8 @@ retry: do { char build_id[64]; - err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + err = bpf_map__lookup_elem(skel->maps.stackmap, &key, sizeof(key), + id_offs, sizeof(id_offs), 0); if (CHECK(err, "lookup_elem from stackmap", "err %d, errno %d\n", err, errno)) goto cleanup; @@ -121,8 +122,8 @@ retry: if (strstr(buf, build_id) != NULL) build_id_matches = 1; } - previous_key = key; - } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + prev_key = key; + } while (bpf_map__get_next_key(skel->maps.stackmap, &prev_key, &key, sizeof(key)) == 0); /* stack_map_get_build_id_offset() is racy and sometimes can return * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID; diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c new file mode 100644 index 000000000000..1932b1e0685c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include "stacktrace_map_skip.skel.h" + +#define TEST_STACK_DEPTH 2 + +void test_stacktrace_map_skip(void) +{ + struct stacktrace_map_skip *skel; + int stackid_hmap_fd, stackmap_fd, stack_amap_fd; + int err, stack_trace_len; + + skel = stacktrace_map_skip__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) + return; + + /* find map fds */ + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap); + if (!ASSERT_GE(stackid_hmap_fd, 0, "stackid_hmap fd")) + goto out; + + stackmap_fd = bpf_map__fd(skel->maps.stackmap); + if (!ASSERT_GE(stackmap_fd, 0, "stackmap fd")) + goto out; + + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap); + if (!ASSERT_GE(stack_amap_fd, 0, "stack_amap fd")) + goto out; + + skel->bss->pid = getpid(); + + err = stacktrace_map_skip__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto out; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + skel->bss->control = 1; + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackid_hmap vs. stackmap")) + goto out; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (!ASSERT_OK(err, "compare_map_keys stackmap vs. stackid_hmap")) + goto out; + + stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64); + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); + if (!ASSERT_OK(err, "compare_stack_ips stackmap vs. stack_amap")) + goto out; + + if (!ASSERT_EQ(skel->bss->failed, 0, "skip_failed")) + goto out; + +out: + stacktrace_map_skip__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c index 3f3d2ac4dd57..903f35a9e62e 100644 --- a/tools/testing/selftests/bpf/prog_tests/subprogs.c +++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c @@ -1,32 +1,83 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include <test_progs.h> -#include <time.h> #include "test_subprogs.skel.h" #include "test_subprogs_unused.skel.h" -static int duration; +struct toggler_ctx { + int fd; + bool stop; +}; -void test_subprogs(void) +static void *toggle_jit_harden(void *arg) +{ + struct toggler_ctx *ctx = arg; + char two = '2'; + char zero = '0'; + + while (!ctx->stop) { + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &two, sizeof(two)); + lseek(ctx->fd, SEEK_SET, 0); + write(ctx->fd, &zero, sizeof(zero)); + } + + return NULL; +} + +static void test_subprogs_with_jit_harden_toggling(void) +{ + struct toggler_ctx ctx; + pthread_t toggler; + int err; + unsigned int i, loop = 10; + + ctx.fd = open("/proc/sys/net/core/bpf_jit_harden", O_RDWR); + if (!ASSERT_GE(ctx.fd, 0, "open bpf_jit_harden")) + return; + + ctx.stop = false; + err = pthread_create(&toggler, NULL, toggle_jit_harden, &ctx); + if (!ASSERT_OK(err, "new toggler")) + goto out; + + /* Make toggler thread to run */ + usleep(1); + + for (i = 0; i < loop; i++) { + struct test_subprogs *skel = test_subprogs__open_and_load(); + + if (!ASSERT_OK_PTR(skel, "skel open")) + break; + test_subprogs__destroy(skel); + } + + ctx.stop = true; + pthread_join(toggler, NULL); +out: + close(ctx.fd); +} + +static void test_subprogs_alone(void) { struct test_subprogs *skel; struct test_subprogs_unused *skel2; int err; skel = test_subprogs__open_and_load(); - if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + if (!ASSERT_OK_PTR(skel, "skel_open")) return; err = test_subprogs__attach(skel); - if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err)) + if (!ASSERT_OK(err, "skel attach")) goto cleanup; usleep(1); - CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12); - CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17); - CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19); - CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36); + ASSERT_EQ(skel->bss->res1, 12, "res1"); + ASSERT_EQ(skel->bss->res2, 17, "res2"); + ASSERT_EQ(skel->bss->res3, 19, "res3"); + ASSERT_EQ(skel->bss->res4, 36, "res4"); skel2 = test_subprogs_unused__open_and_load(); ASSERT_OK_PTR(skel2, "unused_progs_skel"); @@ -35,3 +86,11 @@ void test_subprogs(void) cleanup: test_subprogs__destroy(skel); } + +void test_subprogs(void) +{ + if (test__start_subtest("subprogs_alone")) + test_subprogs_alone(); + if (test__start_subtest("subprogs_and_jit_harden")) + test_subprogs_with_jit_harden_toggling(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/subskeleton.c b/tools/testing/selftests/bpf/prog_tests/subskeleton.c new file mode 100644 index 000000000000..9c31b7004f9c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/subskeleton.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include <test_progs.h> +#include "test_subskeleton.skel.h" +#include "test_subskeleton_lib.subskel.h" + +static void subskeleton_lib_setup(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return; + + *lib->rodata.var1 = 1; + *lib->data.var2 = 2; + lib->bss.var3->var3_1 = 3; + lib->bss.var3->var3_2 = 4; + + test_subskeleton_lib__destroy(lib); +} + +static int subskeleton_lib_subresult(struct bpf_object *obj) +{ + struct test_subskeleton_lib *lib = test_subskeleton_lib__open(obj); + int result; + + if (!ASSERT_OK_PTR(lib, "open subskeleton")) + return -EINVAL; + + result = *lib->bss.libout1; + ASSERT_EQ(result, 1 + 2 + 3 + 4 + 5 + 6, "lib subresult"); + + ASSERT_OK_PTR(lib->progs.lib_perf_handler, "lib_perf_handler"); + ASSERT_STREQ(bpf_program__name(lib->progs.lib_perf_handler), + "lib_perf_handler", "program name"); + + ASSERT_OK_PTR(lib->maps.map1, "map1"); + ASSERT_STREQ(bpf_map__name(lib->maps.map1), "map1", "map name"); + + ASSERT_EQ(*lib->data.var5, 5, "__weak var5"); + ASSERT_EQ(*lib->data.var6, 6, "extern var6"); + ASSERT_TRUE(*lib->kconfig.CONFIG_BPF_SYSCALL, "CONFIG_BPF_SYSCALL"); + + test_subskeleton_lib__destroy(lib); + return result; +} + +void test_subskeleton(void) +{ + int err, result; + struct test_subskeleton *skel; + + skel = test_subskeleton__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->rodata->rovar1 = 10; + skel->rodata->var1 = 1; + subskeleton_lib_setup(skel->obj); + + err = test_subskeleton__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + err = test_subskeleton__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + result = subskeleton_lib_subresult(skel->obj) * 10; + ASSERT_EQ(skel->bss->out1, result, "unexpected calculation"); + +cleanup: + test_subskeleton__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/syscall.c b/tools/testing/selftests/bpf/prog_tests/syscall.c index 81e997a69f7a..f4d40001155a 100644 --- a/tools/testing/selftests/bpf/prog_tests/syscall.c +++ b/tools/testing/selftests/bpf/prog_tests/syscall.c @@ -20,20 +20,20 @@ void test_syscall(void) .log_buf = (uintptr_t) verifier_log, .log_size = sizeof(verifier_log), }; - struct bpf_prog_test_run_attr tattr = { + LIBBPF_OPTS(bpf_test_run_opts, tattr, .ctx_in = &ctx, .ctx_size_in = sizeof(ctx), - }; + ); struct syscall *skel = NULL; __u64 key = 12, value = 0; - int err; + int err, prog_fd; skel = syscall__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_load")) goto cleanup; - tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog); - err = bpf_prog_test_run_xattr(&tattr); + prog_fd = bpf_program__fd(skel->progs.bpf_prog); + err = bpf_prog_test_run_opts(prog_fd, &tattr); ASSERT_EQ(err, 0, "err"); ASSERT_EQ(tattr.retval, 1, "retval"); ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd"); diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c index 5dc0f425bd11..19c70880cfb3 100644 --- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c +++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c @@ -12,9 +12,13 @@ static void test_tailcall_1(void) struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char prog_name[32]; char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -37,7 +41,7 @@ static void test_tailcall_1(void) if (CHECK_FAIL(map_fd < 0)) goto out; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -53,23 +57,21 @@ static void test_tailcall_1(void) goto out; } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != i, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; } - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -85,13 +87,12 @@ static void test_tailcall_1(void) goto out; } - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - j = bpf_map__def(prog_array)->max_entries - 1 - i; + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + j = bpf_map__max_entries(prog_array) - 1 - i; snprintf(prog_name, sizeof(prog_name), "classifier_%d", j); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -107,33 +108,30 @@ static void test_tailcall_1(void) goto out; } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { - j = bpf_map__def(prog_array)->max_entries - 1 - i; + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { + j = bpf_map__max_entries(prog_array) - 1 - i; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != j, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, j, "tailcall retval"); err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; } - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err >= 0 || errno != ENOENT)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: @@ -150,9 +148,13 @@ static void test_tailcall_2(void) struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char prog_name[32]; char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -175,7 +177,7 @@ static void test_tailcall_2(void) if (CHECK_FAIL(map_fd < 0)) goto out; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -191,30 +193,27 @@ static void test_tailcall_2(void) goto out; } - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 2, "tailcall retval"); i = 2; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); out: bpf_object__close(obj); } @@ -225,8 +224,12 @@ static void test_tailcall_count(const char *which) struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -262,10 +265,9 @@ static void test_tailcall_count(const char *which) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) @@ -277,18 +279,17 @@ static void test_tailcall_count(const char *which) i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); - CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", - err, errno, val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 33, "tailcall count"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); out: bpf_object__close(obj); } @@ -319,10 +320,14 @@ static void test_tailcall_4(void) struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; static const int zero = 0; char buff[128] = {}; char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -353,7 +358,7 @@ static void test_tailcall_4(void) if (CHECK_FAIL(map_fd < 0)) return; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -369,18 +374,17 @@ static void test_tailcall_4(void) goto out; } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != i, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); if (CHECK_FAIL(err)) goto out; @@ -389,10 +393,9 @@ static void test_tailcall_4(void) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: bpf_object__close(obj); @@ -407,10 +410,14 @@ static void test_tailcall_5(void) struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; static const int zero = 0; char buff[128] = {}; char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -441,7 +448,7 @@ static void test_tailcall_5(void) if (CHECK_FAIL(map_fd < 0)) return; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -457,18 +464,17 @@ static void test_tailcall_5(void) goto out; } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != i, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, i, "tailcall retval"); } - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); if (CHECK_FAIL(err)) goto out; @@ -477,10 +483,9 @@ static void test_tailcall_5(void) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 3, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 3, "tailcall retval"); } out: bpf_object__close(obj); @@ -495,8 +500,12 @@ static void test_tailcall_bpf2bpf_1(void) struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -520,7 +529,7 @@ static void test_tailcall_bpf2bpf_1(void) goto out; /* nop -> jmp */ - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -536,10 +545,9 @@ static void test_tailcall_bpf2bpf_1(void) goto out; } - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - 0, &retval, &duration); - CHECK(err || retval != 1, "tailcall", - "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); /* jmp -> nop, call subprog that will do tailcall */ i = 1; @@ -547,10 +555,9 @@ static void test_tailcall_bpf2bpf_1(void) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - 0, &retval, &duration); - CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); /* make sure that subprog can access ctx and entry prog that * called this subprog can properly return @@ -560,11 +567,9 @@ static void test_tailcall_bpf2bpf_1(void) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - 0, &retval, &duration); - CHECK(err || retval != sizeof(pkt_v4) * 2, - "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); out: bpf_object__close(obj); } @@ -579,8 +584,12 @@ static void test_tailcall_bpf2bpf_2(void) struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char buff[128] = {}; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = buff, + .data_size_in = sizeof(buff), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -616,10 +625,9 @@ static void test_tailcall_bpf2bpf_2(void) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, 1, "tailcall retval"); data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) @@ -631,18 +639,17 @@ static void test_tailcall_bpf2bpf_2(void) i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); - CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", - err, errno, val); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val, 33, "tailcall count"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, - &duration, &retval, NULL); - CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_OK(topts.retval, "tailcall retval"); out: bpf_object__close(obj); } @@ -657,8 +664,12 @@ static void test_tailcall_bpf2bpf_3(void) struct bpf_map *prog_array; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -681,7 +692,7 @@ static void test_tailcall_bpf2bpf_3(void) if (CHECK_FAIL(map_fd < 0)) goto out; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -697,33 +708,27 @@ static void test_tailcall_bpf2bpf_3(void) goto out; } - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - &duration, &retval, NULL); - CHECK(err || retval != sizeof(pkt_v4) * 3, - "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); i = 1; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - &duration, &retval, NULL); - CHECK(err || retval != sizeof(pkt_v4), - "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4), "tailcall retval"); i = 0; err = bpf_map_delete_elem(map_fd, &i); if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - &duration, &retval, NULL); - CHECK(err || retval != sizeof(pkt_v4) * 2, - "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 2, "tailcall retval"); out: bpf_object__close(obj); } @@ -754,8 +759,12 @@ static void test_tailcall_bpf2bpf_4(bool noise) struct bpf_map *prog_array, *data_map; struct bpf_program *prog; struct bpf_object *obj; - __u32 retval, duration; char prog_name[32]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); err = bpf_prog_test_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); @@ -778,7 +787,7 @@ static void test_tailcall_bpf2bpf_4(bool noise) if (CHECK_FAIL(map_fd < 0)) goto out; - for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { + for (i = 0; i < bpf_map__max_entries(prog_array); i++) { snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); prog = bpf_object__find_program_by_name(obj, prog_name); @@ -809,20 +818,72 @@ static void test_tailcall_bpf2bpf_4(bool noise) if (CHECK_FAIL(err)) goto out; - err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0, - &duration, &retval, NULL); - CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n", - err, errno, retval); + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "tailcall"); + ASSERT_EQ(topts.retval, sizeof(pkt_v4) * 3, "tailcall retval"); i = 0; err = bpf_map_lookup_elem(data_fd, &i, &val); - CHECK(err || val.count != 31, "tailcall count", "err %d errno %d count %d\n", - err, errno, val.count); + ASSERT_OK(err, "tailcall count"); + ASSERT_EQ(val.count, 31, "tailcall count"); out: bpf_object__close(obj); } +#include "tailcall_bpf2bpf6.skel.h" + +/* Tail call counting works even when there is data on stack which is + * not aligned to 8 bytes. + */ +static void test_tailcall_bpf2bpf_6(void) +{ + struct tailcall_bpf2bpf6 *obj; + int err, map_fd, prog_fd, main_fd, data_fd, i, val; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + obj = tailcall_bpf2bpf6__open_and_load(); + if (!ASSERT_OK_PTR(obj, "open and load")) + return; + + main_fd = bpf_program__fd(obj->progs.entry); + if (!ASSERT_GE(main_fd, 0, "entry prog fd")) + goto out; + + map_fd = bpf_map__fd(obj->maps.jmp_table); + if (!ASSERT_GE(map_fd, 0, "jmp_table map fd")) + goto out; + + prog_fd = bpf_program__fd(obj->progs.classifier_0); + if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd")) + goto out; + + i = 0; + err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); + if (!ASSERT_OK(err, "jmp_table map update")) + goto out; + + err = bpf_prog_test_run_opts(main_fd, &topts); + ASSERT_OK(err, "entry prog test run"); + ASSERT_EQ(topts.retval, 0, "tailcall retval"); + + data_fd = bpf_map__fd(obj->maps.bss); + if (!ASSERT_GE(map_fd, 0, "bss map fd")) + goto out; + + i = 0; + err = bpf_map_lookup_elem(data_fd, &i, &val); + ASSERT_OK(err, "bss map lookup"); + ASSERT_EQ(val, 1, "done flag is set"); + +out: + tailcall_bpf2bpf6__destroy(obj); +} + void test_tailcalls(void) { if (test__start_subtest("tailcall_1")) @@ -847,4 +908,6 @@ void test_tailcalls(void) test_tailcall_bpf2bpf_4(false); if (test__start_subtest("tailcall_bpf2bpf_5")) test_tailcall_bpf2bpf_4(true); + if (test__start_subtest("tailcall_bpf2bpf_6")) + test_tailcall_bpf2bpf_6(); } diff --git a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c index 37c20b5ffa70..61935e7e056a 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c +++ b/tools/testing/selftests/bpf/prog_tests/task_pt_regs.c @@ -3,18 +3,22 @@ #include <test_progs.h> #include "test_task_pt_regs.skel.h" +/* uprobe attach point */ +static void trigger_func(void) +{ + asm volatile (""); +} + void test_task_pt_regs(void) { struct test_task_pt_regs *skel; struct bpf_link *uprobe_link; - size_t uprobe_offset; - ssize_t base_addr; + ssize_t uprobe_offset; bool match; - base_addr = get_base_addr(); - if (!ASSERT_GT(base_addr, 0, "get_base_addr")) + uprobe_offset = get_uprobe_offset(&trigger_func); + if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset")) return; - uprobe_offset = get_uprobe_offset(&get_base_addr, base_addr); skel = test_task_pt_regs__open_and_load(); if (!ASSERT_OK_PTR(skel, "skel_open")) @@ -32,7 +36,7 @@ void test_task_pt_regs(void) skel->links.handle_uprobe = uprobe_link; /* trigger & validate uprobe */ - get_base_addr(); + trigger_func(); if (!ASSERT_EQ(skel->bss->uprobe_res, 1, "check_uprobe_res")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index c2426df58e17..958dae769c52 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -10,17 +10,15 @@ * to drop unexpected traffic. */ -#define _GNU_SOURCE - #include <arpa/inet.h> #include <linux/if.h> #include <linux/if_tun.h> #include <linux/limits.h> #include <linux/sysctl.h> -#include <sched.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h> #include <stdbool.h> #include <stdio.h> -#include <sys/mount.h> #include <sys/stat.h> #include <unistd.h> @@ -29,6 +27,11 @@ #include "test_tc_neigh_fib.skel.h" #include "test_tc_neigh.skel.h" #include "test_tc_peer.skel.h" +#include "test_tc_dtime.skel.h" + +#ifndef TCP_TX_DELAY +#define TCP_TX_DELAY 37 +#endif #define NS_SRC "ns_src" #define NS_FWD "ns_fwd" @@ -61,6 +64,7 @@ #define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk" #define TIMEOUT_MILLIS 10000 +#define NSEC_PER_SEC 1000000000ULL #define log_err(MSG, ...) \ fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ @@ -84,91 +88,6 @@ static int write_file(const char *path, const char *newval) return 0; } -struct nstoken { - int orig_netns_fd; -}; - -static int setns_by_fd(int nsfd) -{ - int err; - - err = setns(nsfd, CLONE_NEWNET); - close(nsfd); - - if (!ASSERT_OK(err, "setns")) - return err; - - /* Switch /sys to the new namespace so that e.g. /sys/class/net - * reflects the devices in the new namespace. - */ - err = unshare(CLONE_NEWNS); - if (!ASSERT_OK(err, "unshare")) - return err; - - /* Make our /sys mount private, so the following umount won't - * trigger the global umount in case it's shared. - */ - err = mount("none", "/sys", NULL, MS_PRIVATE, NULL); - if (!ASSERT_OK(err, "remount private /sys")) - return err; - - err = umount2("/sys", MNT_DETACH); - if (!ASSERT_OK(err, "umount2 /sys")) - return err; - - err = mount("sysfs", "/sys", "sysfs", 0, NULL); - if (!ASSERT_OK(err, "mount /sys")) - return err; - - err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL); - if (!ASSERT_OK(err, "mount /sys/fs/bpf")) - return err; - - return 0; -} - -/** - * open_netns() - Switch to specified network namespace by name. - * - * Returns token with which to restore the original namespace - * using close_netns(). - */ -static struct nstoken *open_netns(const char *name) -{ - int nsfd; - char nspath[PATH_MAX]; - int err; - struct nstoken *token; - - token = malloc(sizeof(struct nstoken)); - if (!ASSERT_OK_PTR(token, "malloc token")) - return NULL; - - token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY); - if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net")) - goto fail; - - snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); - nsfd = open(nspath, O_RDONLY | O_CLOEXEC); - if (!ASSERT_GE(nsfd, 0, "open netns fd")) - goto fail; - - err = setns_by_fd(nsfd); - if (!ASSERT_OK(err, "setns_by_fd")) - goto fail; - - return token; -fail: - free(token); - return NULL; -} - -static void close_netns(struct nstoken *token) -{ - ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd"); - free(token); -} - static int netns_setup_namespaces(const char *verb) { const char * const *ns = namespaces; @@ -440,6 +359,431 @@ static int set_forwarding(bool enable) return 0; } +static void rcv_tstamp(int fd, const char *expected, size_t s) +{ + struct __kernel_timespec pkt_ts = {}; + char ctl[CMSG_SPACE(sizeof(pkt_ts))]; + struct timespec now_ts; + struct msghdr msg = {}; + __u64 now_ns, pkt_ns; + struct cmsghdr *cmsg; + struct iovec iov; + char data[32]; + int ret; + + iov.iov_base = data; + iov.iov_len = sizeof(data); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &ctl; + msg.msg_controllen = sizeof(ctl); + + ret = recvmsg(fd, &msg, 0); + if (!ASSERT_EQ(ret, s, "recvmsg")) + return; + ASSERT_STRNEQ(data, expected, s, "expected rcv data"); + + cmsg = CMSG_FIRSTHDR(&msg); + if (cmsg && cmsg->cmsg_level == SOL_SOCKET && + cmsg->cmsg_type == SO_TIMESTAMPNS_NEW) + memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts)); + + pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec; + ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp"); + + ret = clock_gettime(CLOCK_REALTIME, &now_ts); + ASSERT_OK(ret, "clock_gettime"); + now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + + if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp")) + ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC, + "check rcv tstamp"); +} + +static void snd_tstamp(int fd, char *b, size_t s) +{ + struct sock_txtime opt = { .clockid = CLOCK_TAI }; + char ctl[CMSG_SPACE(sizeof(__u64))]; + struct timespec now_ts; + struct msghdr msg = {}; + struct cmsghdr *cmsg; + struct iovec iov; + __u64 now_ns; + int ret; + + ret = clock_gettime(CLOCK_TAI, &now_ts); + ASSERT_OK(ret, "clock_get_time(CLOCK_TAI)"); + now_ns = now_ts.tv_sec * NSEC_PER_SEC + now_ts.tv_nsec; + + iov.iov_base = b; + iov.iov_len = s; + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = &ctl; + msg.msg_controllen = sizeof(ctl); + + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_TXTIME; + cmsg->cmsg_len = CMSG_LEN(sizeof(now_ns)); + *(__u64 *)CMSG_DATA(cmsg) = now_ns; + + ret = setsockopt(fd, SOL_SOCKET, SO_TXTIME, &opt, sizeof(opt)); + ASSERT_OK(ret, "setsockopt(SO_TXTIME)"); + + ret = sendmsg(fd, &msg, 0); + ASSERT_EQ(ret, s, "sendmsg"); +} + +static void test_inet_dtime(int family, int type, const char *addr, __u16 port) +{ + int opt = 1, accept_fd = -1, client_fd = -1, listen_fd, err; + char buf[] = "testing testing"; + struct nstoken *nstoken; + + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + return; + listen_fd = start_server(family, type, addr, port, 0); + close_netns(nstoken); + + if (!ASSERT_GE(listen_fd, 0, "listen")) + return; + + /* Ensure the kernel puts the (rcv) timestamp for all skb */ + err = setsockopt(listen_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW, + &opt, sizeof(opt)); + if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)")) + goto done; + + if (type == SOCK_STREAM) { + /* Ensure the kernel set EDT when sending out rst/ack + * from the kernel's ctl_sk. + */ + err = setsockopt(listen_fd, SOL_TCP, TCP_TX_DELAY, &opt, + sizeof(opt)); + if (!ASSERT_OK(err, "setsockopt(TCP_TX_DELAY)")) + goto done; + } + + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS); + close_netns(nstoken); + + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto done; + + if (type == SOCK_STREAM) { + int n; + + accept_fd = accept(listen_fd, NULL, NULL); + if (!ASSERT_GE(accept_fd, 0, "accept")) + goto done; + + n = write(client_fd, buf, sizeof(buf)); + if (!ASSERT_EQ(n, sizeof(buf), "send to server")) + goto done; + rcv_tstamp(accept_fd, buf, sizeof(buf)); + } else { + snd_tstamp(client_fd, buf, sizeof(buf)); + rcv_tstamp(listen_fd, buf, sizeof(buf)); + } + +done: + close(listen_fd); + if (accept_fd != -1) + close(accept_fd); + if (client_fd != -1) + close(client_fd); +} + +static int netns_load_dtime_bpf(struct test_tc_dtime *skel) +{ + struct nstoken *nstoken; + +#define PIN_FNAME(__file) "/sys/fs/bpf/" #__file +#define PIN(__prog) ({ \ + int err = bpf_program__pin(skel->progs.__prog, PIN_FNAME(__prog)); \ + if (!ASSERT_OK(err, "pin " #__prog)) \ + goto fail; \ + }) + + /* setup ns_src tc progs */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) + return -1; + PIN(egress_host); + PIN(ingress_host); + SYS("tc qdisc add dev veth_src clsact"); + SYS("tc filter add dev veth_src ingress bpf da object-pinned " + PIN_FNAME(ingress_host)); + SYS("tc filter add dev veth_src egress bpf da object-pinned " + PIN_FNAME(egress_host)); + close_netns(nstoken); + + /* setup ns_dst tc progs */ + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_DST)) + return -1; + PIN(egress_host); + PIN(ingress_host); + SYS("tc qdisc add dev veth_dst clsact"); + SYS("tc filter add dev veth_dst ingress bpf da object-pinned " + PIN_FNAME(ingress_host)); + SYS("tc filter add dev veth_dst egress bpf da object-pinned " + PIN_FNAME(egress_host)); + close_netns(nstoken); + + /* setup ns_fwd tc progs */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) + return -1; + PIN(ingress_fwdns_prio100); + PIN(egress_fwdns_prio100); + PIN(ingress_fwdns_prio101); + PIN(egress_fwdns_prio101); + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress prio 100 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio100)); + SYS("tc filter add dev veth_dst_fwd ingress prio 101 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio101)); + SYS("tc filter add dev veth_dst_fwd egress prio 100 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio100)); + SYS("tc filter add dev veth_dst_fwd egress prio 101 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio101)); + SYS("tc qdisc add dev veth_src_fwd clsact"); + SYS("tc filter add dev veth_src_fwd ingress prio 100 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio100)); + SYS("tc filter add dev veth_src_fwd ingress prio 101 bpf da object-pinned " + PIN_FNAME(ingress_fwdns_prio101)); + SYS("tc filter add dev veth_src_fwd egress prio 100 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio100)); + SYS("tc filter add dev veth_src_fwd egress prio 101 bpf da object-pinned " + PIN_FNAME(egress_fwdns_prio101)); + close_netns(nstoken); + +#undef PIN + + return 0; + +fail: + close_netns(nstoken); + return -1; +} + +enum { + INGRESS_FWDNS_P100, + INGRESS_FWDNS_P101, + EGRESS_FWDNS_P100, + EGRESS_FWDNS_P101, + INGRESS_ENDHOST, + EGRESS_ENDHOST, + SET_DTIME, + __MAX_CNT, +}; + +const char *cnt_names[] = { + "ingress_fwdns_p100", + "ingress_fwdns_p101", + "egress_fwdns_p100", + "egress_fwdns_p101", + "ingress_endhost", + "egress_endhost", + "set_dtime", +}; + +enum { + TCP_IP6_CLEAR_DTIME, + TCP_IP4, + TCP_IP6, + UDP_IP4, + UDP_IP6, + TCP_IP4_RT_FWD, + TCP_IP6_RT_FWD, + UDP_IP4_RT_FWD, + UDP_IP6_RT_FWD, + UKN_TEST, + __NR_TESTS, +}; + +const char *test_names[] = { + "tcp ip6 clear dtime", + "tcp ip4", + "tcp ip6", + "udp ip4", + "udp ip6", + "tcp ip4 rt fwd", + "tcp ip6 rt fwd", + "udp ip4 rt fwd", + "udp ip6 rt fwd", +}; + +static const char *dtime_cnt_str(int test, int cnt) +{ + static char name[64]; + + snprintf(name, sizeof(name), "%s %s", test_names[test], cnt_names[cnt]); + + return name; +} + +static const char *dtime_err_str(int test, int cnt) +{ + static char name[64]; + + snprintf(name, sizeof(name), "%s %s errs", test_names[test], + cnt_names[cnt]); + + return name; +} + +static void test_tcp_clear_dtime(struct test_tc_dtime *skel) +{ + int i, t = TCP_IP6_CLEAR_DTIME; + __u32 *dtimes = skel->bss->dtimes[t]; + __u32 *errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(AF_INET6, SOCK_STREAM, IP6_DST, 0); + + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P101)); + ASSERT_GT(dtimes[EGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, EGRESS_FWDNS_P100)); + ASSERT_EQ(dtimes[EGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, EGRESS_FWDNS_P101)); + ASSERT_GT(dtimes[EGRESS_ENDHOST], 0, + dtime_cnt_str(t, EGRESS_ENDHOST)); + ASSERT_GT(dtimes[INGRESS_ENDHOST], 0, + dtime_cnt_str(t, INGRESS_ENDHOST)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tcp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ + __u32 *dtimes, *errs; + const char *addr; + int i, t; + + if (family == AF_INET) { + t = bpf_fwd ? TCP_IP4 : TCP_IP4_RT_FWD; + addr = IP4_DST; + } else { + t = bpf_fwd ? TCP_IP6 : TCP_IP6_RT_FWD; + addr = IP6_DST; + } + + dtimes = skel->bss->dtimes[t]; + errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(family, SOCK_STREAM, addr, 0); + + /* fwdns_prio100 prog does not read delivery_time_type, so + * kernel puts the (rcv) timetamp in __sk_buff->tstamp + */ + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + for (i = INGRESS_FWDNS_P101; i < SET_DTIME; i++) + ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) +{ + __u32 *dtimes, *errs; + const char *addr; + int i, t; + + if (family == AF_INET) { + t = bpf_fwd ? UDP_IP4 : UDP_IP4_RT_FWD; + addr = IP4_DST; + } else { + t = bpf_fwd ? UDP_IP6 : UDP_IP6_RT_FWD; + addr = IP6_DST; + } + + dtimes = skel->bss->dtimes[t]; + errs = skel->bss->errs[t]; + + skel->bss->test = t; + test_inet_dtime(family, SOCK_DGRAM, addr, 0); + + ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + /* non mono delivery time is not forwarded */ + ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, + dtime_cnt_str(t, INGRESS_FWDNS_P100)); + for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++) + ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); + + for (i = INGRESS_FWDNS_P100; i < __MAX_CNT; i++) + ASSERT_EQ(errs[i], 0, dtime_err_str(t, i)); +} + +static void test_tc_redirect_dtime(struct netns_setup_result *setup_result) +{ + struct test_tc_dtime *skel; + struct nstoken *nstoken; + int err; + + skel = test_tc_dtime__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open")) + return; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_dtime__load(skel); + if (!ASSERT_OK(err, "test_tc_dtime__load")) + goto done; + + if (netns_load_dtime_bpf(skel)) + goto done; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto done; + err = set_forwarding(false); + close_netns(nstoken); + if (!ASSERT_OK(err, "disable forwarding")) + goto done; + + test_tcp_clear_dtime(skel); + + test_tcp_dtime(skel, AF_INET, true); + test_tcp_dtime(skel, AF_INET6, true); + test_udp_dtime(skel, AF_INET, true); + test_udp_dtime(skel, AF_INET6, true); + + /* Test the kernel ip[6]_forward path instead + * of bpf_redirect_neigh(). + */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto done; + err = set_forwarding(true); + close_netns(nstoken); + if (!ASSERT_OK(err, "enable forwarding")) + goto done; + + test_tcp_dtime(skel, AF_INET, false); + test_tcp_dtime(skel, AF_INET6, false); + test_udp_dtime(skel, AF_INET, false); + test_udp_dtime(skel, AF_INET6, false); + +done: + test_tc_dtime__destroy(skel); +} + static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result) { struct nstoken *nstoken = NULL; @@ -605,7 +949,6 @@ fail: return -1; } -#define MAX(a, b) ((a) > (b) ? (a) : (b)) enum { SRC_TO_TARGET = 0, TARGET_TO_SRC = 1, @@ -787,6 +1130,7 @@ static void *test_tc_redirect_run_tests(void *arg) RUN_TEST(tc_redirect_peer_l3); RUN_TEST(tc_redirect_neigh); RUN_TEST(tc_redirect_neigh_fib); + RUN_TEST(tc_redirect_dtime); return NULL; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c new file mode 100644 index 000000000000..c381faaae741 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright 2022 Sony Group Corporation */ +#include <sys/prctl.h> +#include <test_progs.h> +#include "bpf_syscall_macro.skel.h" + +void test_bpf_syscall_macro(void) +{ + struct bpf_syscall_macro *skel = NULL; + int err; + int exp_arg1 = 1001; + unsigned long exp_arg2 = 12; + unsigned long exp_arg3 = 13; + unsigned long exp_arg4 = 14; + unsigned long exp_arg5 = 15; + + /* check whether it can open program */ + skel = bpf_syscall_macro__open(); + if (!ASSERT_OK_PTR(skel, "bpf_syscall_macro__open")) + return; + + skel->rodata->filter_pid = getpid(); + + /* check whether it can load program */ + err = bpf_syscall_macro__load(skel); + if (!ASSERT_OK(err, "bpf_syscall_macro__load")) + goto cleanup; + + /* check whether it can attach kprobe */ + err = bpf_syscall_macro__attach(skel); + if (!ASSERT_OK(err, "bpf_syscall_macro__attach")) + goto cleanup; + + /* check whether args of syscall are copied correctly */ + prctl(exp_arg1, exp_arg2, exp_arg3, exp_arg4, exp_arg5); +#if defined(__aarch64__) || defined(__s390__) + ASSERT_NEQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#else + ASSERT_EQ(skel->bss->arg1, exp_arg1, "syscall_arg1"); +#endif + ASSERT_EQ(skel->bss->arg2, exp_arg2, "syscall_arg2"); + ASSERT_EQ(skel->bss->arg3, exp_arg3, "syscall_arg3"); + /* it cannot copy arg4 when uses PT_REGS_PARM4 on x86_64 */ +#ifdef __x86_64__ + ASSERT_NEQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#else + ASSERT_EQ(skel->bss->arg4_cx, exp_arg4, "syscall_arg4_from_cx"); +#endif + ASSERT_EQ(skel->bss->arg4, exp_arg4, "syscall_arg4"); + ASSERT_EQ(skel->bss->arg5, exp_arg5, "syscall_arg5"); + + /* check whether args of syscall are copied correctly for CORE variants */ + ASSERT_EQ(skel->bss->arg1_core, exp_arg1, "syscall_arg1_core_variant"); + ASSERT_EQ(skel->bss->arg2_core, exp_arg2, "syscall_arg2_core_variant"); + ASSERT_EQ(skel->bss->arg3_core, exp_arg3, "syscall_arg3_core_variant"); + /* it cannot copy arg4 when uses PT_REGS_PARM4_CORE on x86_64 */ +#ifdef __x86_64__ + ASSERT_NEQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#else + ASSERT_EQ(skel->bss->arg4_core_cx, exp_arg4, "syscall_arg4_from_cx_core_variant"); +#endif + ASSERT_EQ(skel->bss->arg4_core, exp_arg4, "syscall_arg4_core_variant"); + ASSERT_EQ(skel->bss->arg5_core, exp_arg5, "syscall_arg5_core_variant"); + + ASSERT_EQ(skel->bss->option_syscall, exp_arg1, "BPF_KPROBE_SYSCALL_option"); + ASSERT_EQ(skel->bss->arg2_syscall, exp_arg2, "BPF_KPROBE_SYSCALL_arg2"); + ASSERT_EQ(skel->bss->arg3_syscall, exp_arg3, "BPF_KPROBE_SYSCALL_arg3"); + ASSERT_EQ(skel->bss->arg4_syscall, exp_arg4, "BPF_KPROBE_SYSCALL_arg4"); + ASSERT_EQ(skel->bss->arg5_syscall, exp_arg5, "BPF_KPROBE_SYSCALL_arg5"); + +cleanup: + bpf_syscall_macro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c index 509e21d5cb9d..b90ee47d3111 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c +++ b/tools/testing/selftests/bpf/prog_tests/test_global_funcs.c @@ -81,6 +81,7 @@ void test_test_global_funcs(void) { "test_global_func14.o", "reference type('FWD S') size cannot be determined" }, { "test_global_func15.o", "At program exit the register R0 has value" }, { "test_global_func16.o", "invalid indirect read from stack" }, + { "test_global_func17.o", "Caller passes invalid args into func#1" }, }; libbpf_print_fn_t old_print_fn = NULL; int err, i, duration = 0; diff --git a/tools/testing/selftests/bpf/prog_tests/test_ima.c b/tools/testing/selftests/bpf/prog_tests/test_ima.c index 97d8a6f84f4a..b13feceb38f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_ima.c +++ b/tools/testing/selftests/bpf/prog_tests/test_ima.c @@ -13,14 +13,17 @@ #include "ima.skel.h" -static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +#define MAX_SAMPLES 4 + +static int _run_measured_process(const char *measured_dir, u32 *monitored_pid, + const char *cmd) { int child_pid, child_status; child_pid = fork(); if (child_pid == 0) { *monitored_pid = getpid(); - execlp("./ima_setup.sh", "./ima_setup.sh", "run", measured_dir, + execlp("./ima_setup.sh", "./ima_setup.sh", cmd, measured_dir, NULL); exit(errno); @@ -32,19 +35,39 @@ static int run_measured_process(const char *measured_dir, u32 *monitored_pid) return -EINVAL; } -static u64 ima_hash_from_bpf; +static int run_measured_process(const char *measured_dir, u32 *monitored_pid) +{ + return _run_measured_process(measured_dir, monitored_pid, "run"); +} + +static u64 ima_hash_from_bpf[MAX_SAMPLES]; +static int ima_hash_from_bpf_idx; static int process_sample(void *ctx, void *data, size_t len) { - ima_hash_from_bpf = *((u64 *)data); + if (ima_hash_from_bpf_idx >= MAX_SAMPLES) + return -ENOSPC; + + ima_hash_from_bpf[ima_hash_from_bpf_idx++] = *((u64 *)data); return 0; } +static void test_init(struct ima__bss *bss) +{ + ima_hash_from_bpf_idx = 0; + + bss->use_ima_file_hash = false; + bss->enable_bprm_creds_for_exec = false; + bss->enable_kernel_read_file = false; + bss->test_deny = false; +} + void test_test_ima(void) { char measured_dir_template[] = "/tmp/ima_measuredXXXXXX"; struct ring_buffer *ringbuf = NULL; const char *measured_dir; + u64 bin_true_sample; char cmd[256]; int err, duration = 0; @@ -72,13 +95,127 @@ void test_test_ima(void) if (CHECK(err, "failed to run command", "%s, errno = %d\n", cmd, errno)) goto close_clean; + /* + * Test #1 + * - Goal: obtain a sample with the bpf_ima_inode_hash() helper + * - Expected result: 1 sample (/bin/true) + */ + test_init(skel->bss); err = run_measured_process(measured_dir, &skel->bss->monitored_pid); - if (CHECK(err, "run_measured_process", "err = %d\n", err)) + if (CHECK(err, "run_measured_process #1", "err = %d\n", err)) goto close_clean; err = ring_buffer__consume(ringbuf); ASSERT_EQ(err, 1, "num_samples_or_err"); - ASSERT_NEQ(ima_hash_from_bpf, 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + + /* + * Test #2 + * - Goal: obtain samples with the bpf_ima_file_hash() helper + * - Expected result: 2 samples (./ima_setup.sh, /bin/true) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #2", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + bin_true_sample = ima_hash_from_bpf[1]; + + /* + * Test #3 + * - Goal: confirm that bpf_ima_inode_hash() returns a non-fresh digest + * - Expected result: 2 samples (/bin/true: non-fresh, fresh) + */ + test_init(skel->bss); + + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "modify-bin"); + if (CHECK(err, "modify-bin #3", "err = %d\n", err)) + goto close_clean; + + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #3", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_EQ(ima_hash_from_bpf[0], bin_true_sample, "sample_equal_or_err"); + /* IMA refreshed the digest. */ + ASSERT_NEQ(ima_hash_from_bpf[1], bin_true_sample, + "sample_different_or_err"); + + /* + * Test #4 + * - Goal: verify that bpf_ima_file_hash() returns a fresh digest + * - Expected result: 4 samples (./ima_setup.sh: fresh, fresh; + * /bin/true: fresh, fresh) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_bprm_creds_for_exec = true; + err = run_measured_process(measured_dir, &skel->bss->monitored_pid); + if (CHECK(err, "run_measured_process #4", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 4, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[3], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[2], bin_true_sample, + "sample_different_or_err"); + ASSERT_EQ(ima_hash_from_bpf[3], ima_hash_from_bpf[2], + "sample_equal_or_err"); + + skel->bss->use_ima_file_hash = false; + skel->bss->enable_bprm_creds_for_exec = false; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "restore-bin"); + if (CHECK(err, "restore-bin #3", "err = %d\n", err)) + goto close_clean; + + /* + * Test #5 + * - Goal: obtain a sample from the kernel_read_file hook + * - Expected result: 2 samples (./ima_setup.sh, policy_test) + */ + test_init(skel->bss); + skel->bss->use_ima_file_hash = true; + skel->bss->enable_kernel_read_file = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(err, "run_measured_process #5", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 2, "num_samples_or_err"); + ASSERT_NEQ(ima_hash_from_bpf[0], 0, "ima_hash"); + ASSERT_NEQ(ima_hash_from_bpf[1], 0, "ima_hash"); + + /* + * Test #6 + * - Goal: ensure that the kernel_read_file hook denies an operation + * - Expected result: 0 samples + */ + test_init(skel->bss); + skel->bss->enable_kernel_read_file = true; + skel->bss->test_deny = true; + err = _run_measured_process(measured_dir, &skel->bss->monitored_pid, + "load-policy"); + if (CHECK(!err, "run_measured_process #6", "err = %d\n", err)) + goto close_clean; + + err = ring_buffer__consume(ringbuf); + ASSERT_EQ(err, 0, "num_samples_or_err"); close_clean: snprintf(cmd, sizeof(cmd), "./ima_setup.sh cleanup %s", measured_dir); diff --git a/tools/testing/selftests/bpf/prog_tests/test_profiler.c b/tools/testing/selftests/bpf/prog_tests/test_profiler.c index 4ca275101ee0..de24e8f0e738 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_profiler.c +++ b/tools/testing/selftests/bpf/prog_tests/test_profiler.c @@ -8,20 +8,20 @@ static int sanity_run(struct bpf_program *prog) { - struct bpf_prog_test_run_attr test_attr = {}; + LIBBPF_OPTS(bpf_test_run_opts, test_attr); __u64 args[] = {1, 2, 3}; - __u32 duration = 0; int err, prog_fd; prog_fd = bpf_program__fd(prog); - test_attr.prog_fd = prog_fd; test_attr.ctx_in = args; test_attr.ctx_size_in = sizeof(args); - err = bpf_prog_test_run_xattr(&test_attr); - if (CHECK(err || test_attr.retval, "test_run", - "err %d errno %d retval %d duration %d\n", - err, errno, test_attr.retval, duration)) + err = bpf_prog_test_run_opts(prog_fd, &test_attr); + if (!ASSERT_OK(err, "test_run")) + return -1; + + if (!ASSERT_OK(test_attr.retval, "test_run retval")) return -1; + return 0; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c index cf1215531920..ae93411fd582 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c +++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c @@ -6,15 +6,18 @@ static int sanity_run(struct bpf_program *prog) { - __u32 duration, retval; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); prog_fd = bpf_program__fd(prog); - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - if (CHECK(err || retval != 123, "test_run", - "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration)) + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run")) + return -1; + if (!ASSERT_EQ(topts.retval, 123, "test_run retval")) return -1; return 0; } diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c index b57a3009465f..7ddd6615b7e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c +++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c @@ -44,16 +44,12 @@ static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name, static void test_strncmp_ret(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err, got; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - bpf_program__set_autoload(skel->progs.do_strncmp, true); err = strncmp_test__load(skel); @@ -91,18 +87,13 @@ out: static void test_strncmp_bad_not_const_str_size(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size"); @@ -113,18 +104,13 @@ static void test_strncmp_bad_not_const_str_size(void) static void test_strncmp_bad_writable_target(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_writable_target"); @@ -135,18 +121,13 @@ static void test_strncmp_bad_writable_target(void) static void test_strncmp_bad_not_null_term_target(void) { struct strncmp_test *skel; - struct bpf_program *prog; int err; skel = strncmp_test__open(); if (!ASSERT_OK_PTR(skel, "strncmp_test open")) return; - bpf_object__for_each_program(prog, skel->obj) - bpf_program__set_autoload(prog, false); - - bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, - true); + bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true); err = strncmp_test__load(skel); ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target"); diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c new file mode 100644 index 000000000000..3bba4a2a0530 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * End-to-end eBPF tunnel test suite + * The file tests BPF network tunnel implementation. + * + * Topology: + * --------- + * root namespace | at_ns0 namespace + * | + * ----------- | ----------- + * | tnl dev | | | tnl dev | (overlay network) + * ----------- | ----------- + * metadata-mode | metadata-mode + * with bpf | with bpf + * | + * ---------- | ---------- + * | veth1 | --------- | veth0 | (underlay network) + * ---------- peer ---------- + * + * + * Device Configuration + * -------------------- + * root namespace with metadata-mode tunnel + BPF + * Device names and addresses: + * veth1 IP 1: 172.16.1.200, IPv6: 00::22 (underlay) + * IP 2: 172.16.1.20, IPv6: 00::bb (underlay) + * tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay) + * + * Namespace at_ns0 with native tunnel + * Device names and addresses: + * veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay) + * tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay) + * + * + * End-to-end ping packet flow + * --------------------------- + * Most of the tests start by namespace creation, device configuration, + * then ping the underlay and overlay network. When doing 'ping 10.1.1.100' + * from root namespace, the following operations happen: + * 1) Route lookup shows 10.1.1.100/24 belongs to tnl dev, fwd to tnl dev. + * 2) Tnl device's egress BPF program is triggered and set the tunnel metadata, + * with local_ip=172.16.1.200, remote_ip=172.16.1.100. BPF program choose + * the primary or secondary ip of veth1 as the local ip of tunnel. The + * choice is made based on the value of bpf map local_ip_map. + * 3) Outer tunnel header is prepended and route the packet to veth1's egress. + * 4) veth0's ingress queue receive the tunneled packet at namespace at_ns0. + * 5) Tunnel protocol handler, ex: vxlan_rcv, decap the packet. + * 6) Forward the packet to the overlay tnl dev. + */ + +#include <arpa/inet.h> +#include <linux/if_tun.h> +#include <linux/limits.h> +#include <linux/sysctl.h> +#include <linux/time_types.h> +#include <linux/net_tstamp.h> +#include <net/if.h> +#include <stdbool.h> +#include <stdio.h> +#include <sys/stat.h> +#include <unistd.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tunnel_kern.skel.h" + +#define IP4_ADDR_VETH0 "172.16.1.100" +#define IP4_ADDR1_VETH1 "172.16.1.200" +#define IP4_ADDR2_VETH1 "172.16.1.20" +#define IP4_ADDR_TUNL_DEV0 "10.1.1.100" +#define IP4_ADDR_TUNL_DEV1 "10.1.1.200" + +#define IP6_ADDR_VETH0 "::11" +#define IP6_ADDR1_VETH1 "::22" +#define IP6_ADDR2_VETH1 "::bb" + +#define IP4_ADDR1_HEX_VETH1 0xac1001c8 +#define IP4_ADDR2_HEX_VETH1 0xac100114 +#define IP6_ADDR1_HEX_VETH1 0x22 +#define IP6_ADDR2_HEX_VETH1 0xbb + +#define MAC_TUNL_DEV0 "52:54:00:d9:01:00" +#define MAC_TUNL_DEV1 "52:54:00:d9:02:00" + +#define VXLAN_TUNL_DEV0 "vxlan00" +#define VXLAN_TUNL_DEV1 "vxlan11" +#define IP6VXLAN_TUNL_DEV0 "ip6vxlan00" +#define IP6VXLAN_TUNL_DEV1 "ip6vxlan11" + +#define PING_ARGS "-i 0.01 -c 3 -w 10 -q" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +#define SYS_NOFAIL(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + system(cmd); \ + }) + +static int config_device(void) +{ + SYS("ip netns add at_ns0"); + SYS("ip link add veth0 type veth peer name veth1"); + SYS("ip link set veth0 netns at_ns0"); + SYS("ip addr add " IP4_ADDR1_VETH1 "/24 dev veth1"); + SYS("ip addr add " IP4_ADDR2_VETH1 "/24 dev veth1"); + SYS("ip link set dev veth1 up mtu 1500"); + SYS("ip netns exec at_ns0 ip addr add " IP4_ADDR_VETH0 "/24 dev veth0"); + SYS("ip netns exec at_ns0 ip link set dev veth0 up mtu 1500"); + + return 0; +fail: + return -1; +} + +static void cleanup(void) +{ + SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0"); + SYS_NOFAIL("ip link del veth1 2> /dev/null"); + SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1); + SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1); +} + +static int add_vxlan_tunnel(void) +{ + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV1, MAC_TUNL_DEV1, VXLAN_TUNL_DEV0); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external gbp dstport 4789", + VXLAN_TUNL_DEV1); + SYS("ip link set dev %s address %s up", VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip neigh add %s lladdr %s dev %s", + IP4_ADDR_TUNL_DEV0, MAC_TUNL_DEV0, VXLAN_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", VXLAN_TUNL_DEV1); +} + +static int add_ip6vxlan_tunnel(void) +{ + SYS("ip netns exec at_ns0 ip -6 addr add %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS("ip netns exec at_ns0 ip link set dev veth0 up"); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS("ip -6 addr add %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS("ip link set dev veth1 up"); + + /* at_ns0 namespace */ + SYS("ip netns exec at_ns0 ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip addr add dev %s %s/24", + IP6VXLAN_TUNL_DEV0, IP4_ADDR_TUNL_DEV0); + SYS("ip netns exec at_ns0 ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV0, MAC_TUNL_DEV0); + + /* root namespace */ + SYS("ip link add dev %s type vxlan external dstport 4789", + IP6VXLAN_TUNL_DEV1); + SYS("ip addr add dev %s %s/24", IP6VXLAN_TUNL_DEV1, IP4_ADDR_TUNL_DEV1); + SYS("ip link set dev %s address %s up", + IP6VXLAN_TUNL_DEV1, MAC_TUNL_DEV1); + + return 0; +fail: + return -1; +} + +static void delete_ip6vxlan_tunnel(void) +{ + SYS_NOFAIL("ip netns exec at_ns0 ip -6 addr delete %s/96 dev veth0", + IP6_ADDR_VETH0); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR1_VETH1); + SYS_NOFAIL("ip -6 addr delete %s/96 dev veth1", IP6_ADDR2_VETH1); + SYS_NOFAIL("ip netns exec at_ns0 ip link delete dev %s", + IP6VXLAN_TUNL_DEV0); + SYS_NOFAIL("ip link delete dev %s", IP6VXLAN_TUNL_DEV1); +} + +static int test_ping(int family, const char *addr) +{ + SYS("%s %s %s > /dev/null", ping_command(family), PING_ARGS, addr); + return 0; +fail: + return -1; +} + +static int attach_tc_prog(struct bpf_tc_hook *hook, int igr_fd, int egr_fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts1, .handle = 1, + .priority = 1, .prog_fd = igr_fd); + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts2, .handle = 1, + .priority = 1, .prog_fd = egr_fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + if (igr_fd >= 0) { + hook->attach_point = BPF_TC_INGRESS; + ret = bpf_tc_attach(hook, &opts1); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + if (egr_fd >= 0) { + hook->attach_point = BPF_TC_EGRESS; + ret = bpf_tc_attach(hook, &opts2); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + } + + return 0; +} + +static void test_vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_vxlan_tunnel(); + if (!ASSERT_OK(err, "add vxlan tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_src); + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "bpf_map__fd")) + goto done; + local_ip = IP4_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete vxlan tunnel */ + delete_vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +static void test_ip6vxlan_tunnel(void) +{ + struct test_tunnel_kern *skel = NULL; + struct nstoken *nstoken; + int local_ip_map_fd = -1; + int set_src_prog_fd, get_src_prog_fd; + int set_dst_prog_fd; + int key = 0, ifindex = -1; + uint local_ip; + int err; + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + /* add vxlan tunnel */ + err = add_ip6vxlan_tunnel(); + if (!ASSERT_OK(err, "add_ip6vxlan_tunnel")) + goto done; + + /* load and attach bpf prog to tunnel dev tc hook point */ + skel = test_tunnel_kern__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_tunnel_kern__open_and_load")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV1); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan11 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + get_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_get_tunnel_src); + set_src_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_src); + if (!ASSERT_GE(set_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (!ASSERT_GE(get_src_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, get_src_prog_fd, set_src_prog_fd)) + goto done; + + /* load and attach prog set_md to tunnel dev tc hook point at_ns0 */ + nstoken = open_netns("at_ns0"); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + ifindex = if_nametoindex(IP6VXLAN_TUNL_DEV0); + if (!ASSERT_NEQ(ifindex, 0, "ip6vxlan00 ifindex")) + goto done; + tc_hook.ifindex = ifindex; + set_dst_prog_fd = bpf_program__fd(skel->progs.ip6vxlan_set_tunnel_dst); + if (!ASSERT_GE(set_dst_prog_fd, 0, "bpf_program__fd")) + goto done; + if (attach_tc_prog(&tc_hook, -1, set_dst_prog_fd)) + goto done; + close_netns(nstoken); + + /* use veth1 ip 2 as tunnel source ip */ + local_ip_map_fd = bpf_map__fd(skel->maps.local_ip_map); + if (!ASSERT_GE(local_ip_map_fd, 0, "get local_ip_map fd")) + goto done; + local_ip = IP6_ADDR2_HEX_VETH1; + err = bpf_map_update_elem(local_ip_map_fd, &key, &local_ip, BPF_ANY); + if (!ASSERT_OK(err, "update bpf local_ip_map")) + goto done; + + /* ping test */ + err = test_ping(AF_INET, IP4_ADDR_TUNL_DEV0); + if (!ASSERT_OK(err, "test_ping")) + goto done; + +done: + /* delete ipv6 vxlan tunnel */ + delete_ip6vxlan_tunnel(); + if (local_ip_map_fd >= 0) + close(local_ip_map_fd); + if (skel) + test_tunnel_kern__destroy(skel); +} + +#define RUN_TEST(name) \ + ({ \ + if (test__start_subtest(#name)) { \ + test_ ## name(); \ + } \ + }) + +static void *test_tunnel_run_tests(void *arg) +{ + cleanup(); + config_device(); + + RUN_TEST(vxlan_tunnel); + RUN_TEST(ip6vxlan_tunnel); + + cleanup(); + + return NULL; +} + +void serial_test_tunnel(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_tunnel_run_tests, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/prog_tests/timer.c b/tools/testing/selftests/bpf/prog_tests/timer.c index 0f4e49e622cd..7eb049214859 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer.c +++ b/tools/testing/selftests/bpf/prog_tests/timer.c @@ -6,7 +6,7 @@ static int timer(struct timer *timer_skel) { int err, prog_fd; - __u32 duration = 0, retval; + LIBBPF_OPTS(bpf_test_run_opts, topts); err = timer__attach(timer_skel); if (!ASSERT_OK(err, "timer_attach")) @@ -16,10 +16,9 @@ static int timer(struct timer *timer_skel) ASSERT_EQ(timer_skel->data->callback2_check, 52, "callback2_check1"); prog_fd = bpf_program__fd(timer_skel->progs.test1); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); timer__detach(timer_skel); usleep(50); /* 10 usecs should be enough, but give it extra */ diff --git a/tools/testing/selftests/bpf/prog_tests/timer_mim.c b/tools/testing/selftests/bpf/prog_tests/timer_mim.c index 949a0617869d..9ff7843909e7 100644 --- a/tools/testing/selftests/bpf/prog_tests/timer_mim.c +++ b/tools/testing/selftests/bpf/prog_tests/timer_mim.c @@ -6,19 +6,18 @@ static int timer_mim(struct timer_mim *timer_skel) { - __u32 duration = 0, retval; __u64 cnt1, cnt2; int err, prog_fd, key1 = 1; + LIBBPF_OPTS(bpf_test_run_opts, topts); err = timer_mim__attach(timer_skel); if (!ASSERT_OK(err, "timer_attach")) return err; prog_fd = bpf_program__fd(timer_skel->progs.test1); - err = bpf_prog_test_run(prog_fd, 1, NULL, 0, - NULL, NULL, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); ASSERT_OK(err, "test_run"); - ASSERT_EQ(retval, 0, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); timer_mim__detach(timer_skel); /* check that timer_cb[12] are incrementing 'cnt' */ @@ -36,7 +35,7 @@ static int timer_mim(struct timer_mim *timer_skel) ASSERT_EQ(timer_skel->bss->ok, 1 | 2, "ok"); close(bpf_map__fd(timer_skel->maps.inner_htab)); - err = bpf_map_delete_elem(bpf_map__fd(timer_skel->maps.outer_arr), &key1); + err = bpf_map__delete_elem(timer_skel->maps.outer_arr, &key1, sizeof(key1), 0); ASSERT_EQ(err, 0, "delete inner map"); /* check that timer_cb[12] are no longer running */ diff --git a/tools/testing/selftests/bpf/prog_tests/trace_ext.c b/tools/testing/selftests/bpf/prog_tests/trace_ext.c index 924441d4362d..aabdff7bea3e 100644 --- a/tools/testing/selftests/bpf/prog_tests/trace_ext.c +++ b/tools/testing/selftests/bpf/prog_tests/trace_ext.c @@ -23,8 +23,12 @@ void test_trace_ext(void) int err, pkt_fd, ext_fd; struct bpf_program *prog; char buf[100]; - __u32 retval; __u64 len; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); /* open/load/attach test_pkt_md_access */ skel_pkt = test_pkt_md_access__open_and_load(); @@ -77,32 +81,32 @@ void test_trace_ext(void) /* load/attach tracing */ err = test_trace_ext_tracing__load(skel_trace); - if (CHECK(err, "setup", "tracing/test_pkt_md_access_new load failed\n")) { + if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new load")) { libbpf_strerror(err, buf, sizeof(buf)); fprintf(stderr, "%s\n", buf); goto cleanup; } err = test_trace_ext_tracing__attach(skel_trace); - if (CHECK(err, "setup", "tracing/test_pkt_md_access_new attach failed: %d\n", err)) + if (!ASSERT_OK(err, "tracing/test_pkt_md_access_new attach")) goto cleanup; /* trigger the test */ - err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "run", "err %d errno %d retval %d\n", err, errno, retval); + err = bpf_prog_test_run_opts(pkt_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); + ASSERT_OK(topts.retval, "test_run_opts retval"); bss_ext = skel_ext->bss; bss_trace = skel_trace->bss; len = bss_ext->ext_called; - CHECK(bss_ext->ext_called == 0, - "check", "failed to trigger freplace/test_pkt_md_access\n"); - CHECK(bss_trace->fentry_called != len, - "check", "failed to trigger fentry/test_pkt_md_access_new\n"); - CHECK(bss_trace->fexit_called != len, - "check", "failed to trigger fexit/test_pkt_md_access_new\n"); + ASSERT_NEQ(bss_ext->ext_called, 0, + "failed to trigger freplace/test_pkt_md_access"); + ASSERT_EQ(bss_trace->fentry_called, len, + "failed to trigger fentry/test_pkt_md_access_new"); + ASSERT_EQ(bss_trace->fexit_called, len, + "failed to trigger fexit/test_pkt_md_access_new"); cleanup: test_trace_ext_tracing__destroy(skel_trace); diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c index 9c795ee52b7b..b0acbda6dbf5 100644 --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c @@ -1,126 +1,94 @@ // SPDX-License-Identifier: GPL-2.0-only #define _GNU_SOURCE -#include <sched.h> -#include <sys/prctl.h> #include <test_progs.h> #define MAX_TRAMP_PROGS 38 struct inst { struct bpf_object *obj; - struct bpf_link *link_fentry; - struct bpf_link *link_fexit; + struct bpf_link *link; }; -static int test_task_rename(void) +static struct bpf_program *load_prog(char *file, char *name, struct inst *inst) { - int fd, duration = 0, err; - char buf[] = "test_overhead"; + struct bpf_object *obj; + struct bpf_program *prog; + int err; - fd = open("/proc/self/comm", O_WRONLY|O_TRUNC); - if (CHECK(fd < 0, "open /proc", "err %d", errno)) - return -1; - err = write(fd, buf, sizeof(buf)); - if (err < 0) { - CHECK(err < 0, "task rename", "err %d", errno); - close(fd); - return -1; - } - close(fd); - return 0; -} + obj = bpf_object__open_file(file, NULL); + if (!ASSERT_OK_PTR(obj, "obj_open_file")) + return NULL; -static struct bpf_link *load(struct bpf_object *obj, const char *name) -{ - struct bpf_program *prog; - int duration = 0; + inst->obj = obj; + + err = bpf_object__load(obj); + if (!ASSERT_OK(err, "obj_load")) + return NULL; prog = bpf_object__find_program_by_name(obj, name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", name)) - return ERR_PTR(-EINVAL); - return bpf_program__attach_trace(prog); + if (!ASSERT_OK_PTR(prog, "obj_find_prog")) + return NULL; + + return prog; } /* TODO: use different target function to run in concurrent mode */ void serial_test_trampoline_count(void) { - const char *fentry_name = "prog1"; - const char *fexit_name = "prog2"; - const char *object = "test_trampoline_count.o"; - struct inst inst[MAX_TRAMP_PROGS] = {}; - int err, i = 0, duration = 0; - struct bpf_object *obj; + char *file = "test_trampoline_count.o"; + char *const progs[] = { "fentry_test", "fmod_ret_test", "fexit_test" }; + struct inst inst[MAX_TRAMP_PROGS + 1] = {}; + struct bpf_program *prog; struct bpf_link *link; - char comm[16] = {}; + int prog_fd, err, i; + LIBBPF_OPTS(bpf_test_run_opts, opts); /* attach 'allowed' trampoline programs */ for (i = 0; i < MAX_TRAMP_PROGS; i++) { - obj = bpf_object__open_file(object, NULL); - if (!ASSERT_OK_PTR(obj, "obj_open_file")) { - obj = NULL; + prog = load_prog(file, progs[i % ARRAY_SIZE(progs)], &inst[i]); + if (!prog) goto cleanup; - } - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "err %d\n", err)) + link = bpf_program__attach(prog); + if (!ASSERT_OK_PTR(link, "attach_prog")) goto cleanup; - inst[i].obj = obj; - obj = NULL; - if (rand() % 2) { - link = load(inst[i].obj, fentry_name); - if (!ASSERT_OK_PTR(link, "attach_prog")) { - link = NULL; - goto cleanup; - } - inst[i].link_fentry = link; - } else { - link = load(inst[i].obj, fexit_name); - if (!ASSERT_OK_PTR(link, "attach_prog")) { - link = NULL; - goto cleanup; - } - inst[i].link_fexit = link; - } + inst[i].link = link; } /* and try 1 extra.. */ - obj = bpf_object__open_file(object, NULL); - if (!ASSERT_OK_PTR(obj, "obj_open_file")) { - obj = NULL; + prog = load_prog(file, "fmod_ret_test", &inst[i]); + if (!prog) goto cleanup; - } - - err = bpf_object__load(obj); - if (CHECK(err, "obj_load", "err %d\n", err)) - goto cleanup_extra; /* ..that needs to fail */ - link = load(obj, fentry_name); - err = libbpf_get_error(link); - if (!ASSERT_ERR_PTR(link, "cannot attach over the limit")) { - bpf_link__destroy(link); - goto cleanup_extra; + link = bpf_program__attach(prog); + if (!ASSERT_ERR_PTR(link, "attach_prog")) { + inst[i].link = link; + goto cleanup; } /* with E2BIG error */ - ASSERT_EQ(err, -E2BIG, "proper error check"); - ASSERT_EQ(link, NULL, "ptr_is_null"); + if (!ASSERT_EQ(libbpf_get_error(link), -E2BIG, "E2BIG")) + goto cleanup; + if (!ASSERT_EQ(link, NULL, "ptr_is_null")) + goto cleanup; /* and finaly execute the probe */ - if (CHECK_FAIL(prctl(PR_GET_NAME, comm, 0L, 0L, 0L))) - goto cleanup_extra; - CHECK_FAIL(test_task_rename()); - CHECK_FAIL(prctl(PR_SET_NAME, comm, 0L, 0L, 0L)); + prog_fd = bpf_program__fd(prog); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd")) + goto cleanup; + + err = bpf_prog_test_run_opts(prog_fd, &opts); + if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) + goto cleanup; + + ASSERT_EQ(opts.retval & 0xffff, 4, "bpf_modify_return_test.result"); + ASSERT_EQ(opts.retval >> 16, 1, "bpf_modify_return_test.side_effect"); -cleanup_extra: - bpf_object__close(obj); cleanup: - if (i >= MAX_TRAMP_PROGS) - i = MAX_TRAMP_PROGS - 1; for (; i >= 0; i--) { - bpf_link__destroy(inst[i].link_fentry); - bpf_link__destroy(inst[i].link_fexit); + bpf_link__destroy(inst[i].link); bpf_object__close(inst[i].obj); } } diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c new file mode 100644 index 000000000000..1ed3cc2092db --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include <bpf/btf.h> + +#include "test_unpriv_bpf_disabled.skel.h" + +#include "cap_helpers.h" + +/* Using CAP_LAST_CAP is risky here, since it can get pulled in from + * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result + * CAP_BPF would not be included in ALL_CAPS. Instead use CAP_BPF as + * we know its value is correct since it is explicitly defined in + * cap_helpers.h. + */ +#define ALL_CAPS ((2ULL << CAP_BPF) - 1) + +#define PINPATH "/sys/fs/bpf/unpriv_bpf_disabled_" +#define NUM_MAPS 7 + +static __u32 got_perfbuf_val; +static __u32 got_ringbuf_val; + +static int process_ringbuf(void *ctx, void *data, size_t len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid")) + got_ringbuf_val = *(__u32 *)data; + return 0; +} + +static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len) +{ + if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid")) + got_perfbuf_val = *(__u32 *)data; +} + +static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val) +{ + int ret = 0; + FILE *fp; + + fp = fopen(sysctl_path, "r+"); + if (!fp) + return -errno; + if (old_val && fscanf(fp, "%s", old_val) <= 0) { + ret = -ENOENT; + } else if (!old_val || strcmp(old_val, new_val) != 0) { + fseek(fp, 0, SEEK_SET); + if (fprintf(fp, "%s", new_val) < 0) + ret = -errno; + } + fclose(fp); + + return ret; +} + +static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + struct perf_buffer *perfbuf = NULL; + struct ring_buffer *ringbuf = NULL; + int i, nr_cpus, link_fd = -1; + + nr_cpus = bpf_num_possible_cpus(); + + skel->bss->perfbuf_val = 1; + skel->bss->ringbuf_val = 2; + + /* Positive tests for unprivileged BPF disabled. Verify we can + * - retrieve and interact with pinned maps; + * - set up and interact with perf buffer; + * - set up and interact with ring buffer; + * - create a link + */ + perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL, + NULL); + if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new")) + goto cleanup; + + ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL); + if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new")) + goto cleanup; + + /* trigger & validate perf event, ringbuf output */ + usleep(1); + + ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll"); + ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val"); + ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume"); + ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val"); + + for (i = 0; i < NUM_MAPS; i++) { + map_fds[i] = bpf_obj_get(map_paths[i]); + if (!ASSERT_GT(map_fds[i], -1, "obj_get")) + goto cleanup; + } + + for (i = 0; i < NUM_MAPS; i++) { + bool prog_array = strstr(map_paths[i], "prog_array") != NULL; + bool array = strstr(map_paths[i], "array") != NULL; + bool buf = strstr(map_paths[i], "buf") != NULL; + __u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus]; + __u32 expected_val = 1; + int j; + + /* skip ringbuf, perfbuf */ + if (buf) + continue; + + for (j = 0; j < nr_cpus; j++) + vals[j] = expected_val; + + if (prog_array) { + /* need valid prog array value */ + vals[0] = prog_fd; + /* prog array lookup returns prog id, not fd */ + expected_val = prog_id; + } + ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem"); + ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem"); + ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values"); + if (!array) + ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem"); + } + + link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd, + BPF_PERF_EVENT, NULL); + ASSERT_GT(link_fd, 0, "link_create"); + +cleanup: + if (link_fd) + close(link_fd); + if (perfbuf) + perf_buffer__free(perfbuf); + if (ringbuf) + ring_buffer__free(ringbuf); +} + +static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel, + __u32 prog_id, int prog_fd, int perf_fd, + char **map_paths, int *map_fds) +{ + const struct bpf_insn prog_insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn); + LIBBPF_OPTS(bpf_prog_load_opts, load_opts); + struct bpf_map_info map_info = {}; + __u32 map_info_len = sizeof(map_info); + struct bpf_link_info link_info = {}; + __u32 link_info_len = sizeof(link_info); + struct btf *btf = NULL; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 next; + int i; + + /* Negative tests for unprivileged BPF disabled. Verify we cannot + * - load BPF programs; + * - create BPF maps; + * - get a prog/map/link fd by id; + * - get next prog/map/link id + * - query prog + * - BTF load + */ + ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL", + prog_insns, prog_insn_cnt, &load_opts), + -EPERM, "prog_load_fails"); + + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), + -EPERM, "map_create_fails"); + + ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails"); + ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(map_fds[0], &map_info, &map_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails"); + ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM, + "map_get_next_id_fails"); + } + ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails"); + + if (ASSERT_OK(bpf_obj_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter), + &link_info, &link_info_len), + "obj_get_info_by_fd")) { + ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails"); + ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM, + "link_get_next_id_fails"); + } + ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails"); + + ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids, + &prog_cnt), -EPERM, "prog_query_fails"); + + btf = btf__new_empty(); + if (ASSERT_OK_PTR(btf, "empty_btf") && + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) { + const void *raw_btf_data; + __u32 raw_btf_size; + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good")) + ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM, + "bpf_btf_load_fails"); + } + btf__free(btf); +} + +void test_unpriv_bpf_disabled(void) +{ + char *map_paths[NUM_MAPS] = { PINPATH "array", + PINPATH "percpu_array", + PINPATH "hash", + PINPATH "percpu_hash", + PINPATH "perfbuf", + PINPATH "ringbuf", + PINPATH "prog_array" }; + int map_fds[NUM_MAPS]; + struct test_unpriv_bpf_disabled *skel; + char unprivileged_bpf_disabled_orig[32] = {}; + char perf_event_paranoid_orig[32] = {}; + struct bpf_prog_info prog_info = {}; + __u32 prog_info_len = sizeof(prog_info); + struct perf_event_attr attr = {}; + int prog_fd, perf_fd = -1, i, ret; + __u64 save_caps = 0; + __u32 prog_id; + + skel = test_unpriv_bpf_disabled__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + skel->bss->test_pid = getpid(); + + map_fds[0] = bpf_map__fd(skel->maps.array); + map_fds[1] = bpf_map__fd(skel->maps.percpu_array); + map_fds[2] = bpf_map__fd(skel->maps.hash); + map_fds[3] = bpf_map__fd(skel->maps.percpu_hash); + map_fds[4] = bpf_map__fd(skel->maps.perfbuf); + map_fds[5] = bpf_map__fd(skel->maps.ringbuf); + map_fds[6] = bpf_map__fd(skel->maps.prog_array); + + for (i = 0; i < NUM_MAPS; i++) + ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd"); + + /* allow user without caps to use perf events */ + if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig, + "-1"), + "set_perf_event_paranoid")) + goto cleanup; + /* ensure unprivileged bpf disabled is set */ + ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", + unprivileged_bpf_disabled_orig, "2"); + if (ret == -EPERM) { + /* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */ + if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"), + "unprivileged_bpf_disabled_on")) + goto cleanup; + } else { + if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled")) + goto cleanup; + } + + prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter); + ASSERT_OK(bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len), + "obj_get_info_by_fd"); + prog_id = prog_info.id; + ASSERT_GT(prog_id, 0, "valid_prog_id"); + + attr.size = sizeof(attr); + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; + attr.freq = 1; + attr.sample_freq = 1000; + perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); + if (!ASSERT_GE(perf_fd, 0, "perf_fd")) + goto cleanup; + + if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach")) + goto cleanup; + + if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps")) + goto cleanup; + + if (test__start_subtest("unpriv_bpf_disabled_positive")) + test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + + if (test__start_subtest("unpriv_bpf_disabled_negative")) + test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths, + map_fds); + +cleanup: + close(perf_fd); + if (save_caps) + cap_enable_effective(save_caps, NULL); + if (strlen(perf_event_paranoid_orig) > 0) + sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig); + if (strlen(unprivileged_bpf_disabled_orig) > 0) + sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL, + unprivileged_bpf_disabled_orig); + for (i = 0; i < NUM_MAPS; i++) + unlink(map_paths[i]); + test_unpriv_bpf_disabled__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c new file mode 100644 index 000000000000..35b87c7ba5be --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022, Oracle and/or its affiliates. */ + +#include <test_progs.h> +#include "test_uprobe_autoattach.skel.h" + +/* uprobe attach point */ +static noinline int autoattach_trigger_func(int arg) +{ + asm volatile (""); + return arg + 1; +} + +void test_uprobe_autoattach(void) +{ + struct test_uprobe_autoattach *skel; + int trigger_val = 100, trigger_ret; + size_t malloc_sz = 1; + char *mem; + + skel = test_uprobe_autoattach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + if (!ASSERT_OK(test_uprobe_autoattach__attach(skel), "skel_attach")) + goto cleanup; + + skel->bss->test_pid = getpid(); + + /* trigger & validate uprobe & uretprobe */ + trigger_ret = autoattach_trigger_func(trigger_val); + + skel->bss->test_pid = getpid(); + + /* trigger & validate shared library u[ret]probes attached by name */ + mem = malloc(malloc_sz); + + ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname_rc, trigger_ret, "check_uretprobe_byname_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname_ran, 2, "check_uretprobe_byname_ran"); + ASSERT_EQ(skel->bss->uprobe_byname2_parm1, malloc_sz, "check_uprobe_byname2_parm1"); + ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran"); + ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc"); + ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran"); + + free(mem); +cleanup: + test_uprobe_autoattach__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/usdt.c b/tools/testing/selftests/bpf/prog_tests/usdt.c new file mode 100644 index 000000000000..5f733d50b0d7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/usdt.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#define _SDT_HAS_SEMAPHORES 1 +#include "../sdt.h" + +#include "test_usdt.skel.h" +#include "test_urandom_usdt.skel.h" + +int lets_test_this(int); + +static volatile int idx = 2; +static volatile __u64 bla = 0xFEDCBA9876543210ULL; +static volatile short nums[] = {-1, -2, -3, }; + +static volatile struct { + int x; + signed char y; +} t1 = { 1, -127 }; + +#define SEC(name) __attribute__((section(name), used)) + +unsigned short test_usdt0_semaphore SEC(".probes"); +unsigned short test_usdt3_semaphore SEC(".probes"); +unsigned short test_usdt12_semaphore SEC(".probes"); + +static void __always_inline trigger_func(int x) { + long y = 42; + + if (test_usdt0_semaphore) + STAP_PROBE(test, usdt0); + if (test_usdt3_semaphore) + STAP_PROBE3(test, usdt3, x, y, &bla); + if (test_usdt12_semaphore) { + STAP_PROBE12(test, usdt12, + x, x + 1, y, x + y, 5, + y / 7, bla, &bla, -9, nums[x], + nums[idx], t1.y); + } +} + +static void subtest_basic_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt0 won't be auto-attached */ + opts.usdt_cookie = 0xcafedeadbeeffeed; + skel->links.usdt0 = bpf_program__attach_usdt(skel->progs.usdt0, + 0 /*self*/, "/proc/self/exe", + "test", "usdt0", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt0, "usdt0_link")) + goto cleanup; + + trigger_func(1); + + ASSERT_EQ(bss->usdt0_called, 1, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 1, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 1, "usdt12_called"); + + ASSERT_EQ(bss->usdt0_cookie, 0xcafedeadbeeffeed, "usdt0_cookie"); + ASSERT_EQ(bss->usdt0_arg_cnt, 0, "usdt0_arg_cnt"); + ASSERT_EQ(bss->usdt0_arg_ret, -ENOENT, "usdt0_arg_ret"); + + /* auto-attached usdt3 gets default zero cookie value */ + ASSERT_EQ(bss->usdt3_cookie, 0, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 1, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + + /* auto-attached usdt12 gets default zero cookie value */ + ASSERT_EQ(bss->usdt12_cookie, 0, "usdt12_cookie"); + ASSERT_EQ(bss->usdt12_arg_cnt, 12, "usdt12_arg_cnt"); + + ASSERT_EQ(bss->usdt12_args[0], 1, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 1 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[2], 42, "usdt12_arg3"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 1, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[4], 5, "usdt12_arg5"); + ASSERT_EQ(bss->usdt12_args[5], 42 / 7, "usdt12_arg6"); + ASSERT_EQ(bss->usdt12_args[6], bla, "usdt12_arg7"); + ASSERT_EQ(bss->usdt12_args[7], (uintptr_t)&bla, "usdt12_arg8"); + ASSERT_EQ(bss->usdt12_args[8], -9, "usdt12_arg9"); + ASSERT_EQ(bss->usdt12_args[9], nums[1], "usdt12_arg10"); + ASSERT_EQ(bss->usdt12_args[10], nums[idx], "usdt12_arg11"); + ASSERT_EQ(bss->usdt12_args[11], t1.y, "usdt12_arg12"); + + /* trigger_func() is marked __always_inline, so USDT invocations will be + * inlined in two different places, meaning that each USDT will have + * at least 2 different places to be attached to. This verifies that + * bpf_program__attach_usdt() handles this properly and attaches to + * all possible places of USDT invocation. + */ + trigger_func(2); + + ASSERT_EQ(bss->usdt0_called, 2, "usdt0_called"); + ASSERT_EQ(bss->usdt3_called, 2, "usdt3_called"); + ASSERT_EQ(bss->usdt12_called, 2, "usdt12_called"); + + /* only check values that depend on trigger_func()'s input value */ + ASSERT_EQ(bss->usdt3_args[0], 2, "usdt3_arg1"); + + ASSERT_EQ(bss->usdt12_args[0], 2, "usdt12_arg1"); + ASSERT_EQ(bss->usdt12_args[1], 2 + 1, "usdt12_arg2"); + ASSERT_EQ(bss->usdt12_args[3], 42 + 2, "usdt12_arg4"); + ASSERT_EQ(bss->usdt12_args[9], nums[2], "usdt12_arg10"); + + /* detach and re-attach usdt3 */ + bpf_link__destroy(skel->links.usdt3); + + opts.usdt_cookie = 0xBADC00C51E; + skel->links.usdt3 = bpf_program__attach_usdt(skel->progs.usdt3, -1 /* any pid */, + "/proc/self/exe", "test", "usdt3", &opts); + if (!ASSERT_OK_PTR(skel->links.usdt3, "usdt3_reattach")) + goto cleanup; + + trigger_func(3); + + ASSERT_EQ(bss->usdt3_called, 3, "usdt3_called"); + /* this time usdt3 has custom cookie */ + ASSERT_EQ(bss->usdt3_cookie, 0xBADC00C51E, "usdt3_cookie"); + ASSERT_EQ(bss->usdt3_arg_cnt, 3, "usdt3_arg_cnt"); + + ASSERT_EQ(bss->usdt3_arg_rets[0], 0, "usdt3_arg1_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[1], 0, "usdt3_arg2_ret"); + ASSERT_EQ(bss->usdt3_arg_rets[2], 0, "usdt3_arg3_ret"); + ASSERT_EQ(bss->usdt3_args[0], 3, "usdt3_arg1"); + ASSERT_EQ(bss->usdt3_args[1], 42, "usdt3_arg2"); + ASSERT_EQ(bss->usdt3_args[2], (uintptr_t)&bla, "usdt3_arg3"); + +cleanup: + test_usdt__destroy(skel); +} + +unsigned short test_usdt_100_semaphore SEC(".probes"); +unsigned short test_usdt_300_semaphore SEC(".probes"); +unsigned short test_usdt_400_semaphore SEC(".probes"); + +#define R10(F, X) F(X+0); F(X+1);F(X+2); F(X+3); F(X+4); \ + F(X+5); F(X+6); F(X+7); F(X+8); F(X+9); +#define R100(F, X) R10(F,X+ 0);R10(F,X+10);R10(F,X+20);R10(F,X+30);R10(F,X+40); \ + R10(F,X+50);R10(F,X+60);R10(F,X+70);R10(F,X+80);R10(F,X+90); + +/* carefully control that we get exactly 100 inlines by preventing inlining */ +static void __always_inline f100(int x) +{ + STAP_PROBE1(test, usdt_100, x); +} + +__weak void trigger_100_usdts(void) +{ + R100(f100, 0); +} + +/* we shouldn't be able to attach to test:usdt2_300 USDT as we don't have as + * many slots for specs. It's important that each STAP_PROBE2() invocation + * (after untolling) gets different arg spec due to compiler inlining i as + * a constant + */ +static void __always_inline f300(int x) +{ + STAP_PROBE1(test, usdt_300, x); +} + +__weak void trigger_300_usdts(void) +{ + R100(f300, 0); + R100(f300, 100); + R100(f300, 200); +} + +static void __always_inline f400(int x __attribute__((unused))) +{ + STAP_PROBE1(test, usdt_400, 400); +} + +/* this time we have 400 different USDT call sites, but they have uniform + * argument location, so libbpf's spec string deduplication logic should keep + * spec count use very small and so we should be able to attach to all 400 + * call sites + */ +__weak void trigger_400_usdts(void) +{ + R100(f400, 0); + R100(f400, 100); + R100(f400, 200); + R100(f400, 300); +} + +static void subtest_multispec_usdt(void) +{ + LIBBPF_OPTS(bpf_usdt_opts, opts); + struct test_usdt *skel; + struct test_usdt__bss *bss; + int err, i; + + skel = test_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + bss = skel->bss; + bss->my_pid = getpid(); + + err = test_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_attach")) + goto cleanup; + + /* usdt_100 is auto-attached and there are 100 inlined call sites, + * let's validate that all of them are properly attached to and + * handled from BPF side + */ + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + + /* Stress test free spec ID tracking. By default libbpf allows up to + * 256 specs to be used, so if we don't return free spec IDs back + * after few detachments and re-attachments we should run out of + * available spec IDs. + */ + for (i = 0; i < 2; i++) { + bpf_link__destroy(skel->links.usdt_100); + + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_100", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_100_reattach")) + goto cleanup; + + bss->usdt_100_sum = 0; + trigger_100_usdts(); + + ASSERT_EQ(bss->usdt_100_called, (i + 1) * 100 + 100, "usdt_100_called"); + ASSERT_EQ(bss->usdt_100_sum, 99 * 100 / 2, "usdt_100_sum"); + } + + /* Now let's step it up and try to attach USDT that requires more than + * 256 attach points with different specs for each. + * Note that we need trigger_300_usdts() only to actually have 300 + * USDT call sites, we are not going to actually trace them. + */ + trigger_300_usdts(); + + /* we'll reuse usdt_100 BPF program for usdt_300 test */ + bpf_link__destroy(skel->links.usdt_100); + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, "/proc/self/exe", + "test", "usdt_300", NULL); + err = -errno; + if (!ASSERT_ERR_PTR(skel->links.usdt_100, "usdt_300_bad_attach")) + goto cleanup; + ASSERT_EQ(err, -E2BIG, "usdt_300_attach_err"); + + /* let's check that there are no "dangling" BPF programs attached due + * to partial success of the above test:usdt_300 attachment + */ + bss->usdt_100_called = 0; + bss->usdt_100_sum = 0; + + f300(777); /* this is 301st instance of usdt_300 */ + + ASSERT_EQ(bss->usdt_100_called, 0, "usdt_301_called"); + ASSERT_EQ(bss->usdt_100_sum, 0, "usdt_301_sum"); + + /* This time we have USDT with 400 inlined invocations, but arg specs + * should be the same across all sites, so libbpf will only need to + * use one spec and thus we'll be able to attach 400 uprobes + * successfully. + * + * Again, we are reusing usdt_100 BPF program. + */ + skel->links.usdt_100 = bpf_program__attach_usdt(skel->progs.usdt_100, -1, + "/proc/self/exe", + "test", "usdt_400", NULL); + if (!ASSERT_OK_PTR(skel->links.usdt_100, "usdt_400_attach")) + goto cleanup; + + trigger_400_usdts(); + + ASSERT_EQ(bss->usdt_100_called, 400, "usdt_400_called"); + ASSERT_EQ(bss->usdt_100_sum, 400 * 400, "usdt_400_sum"); + +cleanup: + test_usdt__destroy(skel); +} + +static FILE *urand_spawn(int *pid) +{ + FILE *f; + + /* urandom_read's stdout is wired into f */ + f = popen("./urandom_read 1 report-pid", "r"); + if (!f) + return NULL; + + if (fscanf(f, "%d", pid) != 1) { + pclose(f); + return NULL; + } + + return f; +} + +static int urand_trigger(FILE **urand_pipe) +{ + int exit_code; + + /* pclose() waits for child process to exit and returns their exit code */ + exit_code = pclose(*urand_pipe); + *urand_pipe = NULL; + + return exit_code; +} + +static void subtest_urandom_usdt(bool auto_attach) +{ + struct test_urandom_usdt *skel; + struct test_urandom_usdt__bss *bss; + struct bpf_link *l; + FILE *urand_pipe = NULL; + int err, urand_pid = 0; + + skel = test_urandom_usdt__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + urand_pipe = urand_spawn(&urand_pid); + if (!ASSERT_OK_PTR(urand_pipe, "urand_spawn")) + goto cleanup; + + bss = skel->bss; + bss->urand_pid = urand_pid; + + if (auto_attach) { + err = test_urandom_usdt__attach(skel); + if (!ASSERT_OK(err, "skel_auto_attach")) + goto cleanup; + } else { + l = bpf_program__attach_usdt(skel->progs.urand_read_without_sema, + urand_pid, "./urandom_read", + "urand", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_without_sema_attach")) + goto cleanup; + skel->links.urand_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urand_read_with_sema, + urand_pid, "./urandom_read", + "urand", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urand_with_sema_attach")) + goto cleanup; + skel->links.urand_read_with_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_without_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_without_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_without_sema_attach")) + goto cleanup; + skel->links.urandlib_read_without_sema = l; + + l = bpf_program__attach_usdt(skel->progs.urandlib_read_with_sema, + urand_pid, "./liburandom_read.so", + "urandlib", "read_with_sema", NULL); + if (!ASSERT_OK_PTR(l, "urandlib_with_sema_attach")) + goto cleanup; + skel->links.urandlib_read_with_sema = l; + + } + + /* trigger urandom_read USDTs */ + ASSERT_OK(urand_trigger(&urand_pipe), "urand_exit_code"); + + ASSERT_EQ(bss->urand_read_without_sema_call_cnt, 1, "urand_wo_sema_cnt"); + ASSERT_EQ(bss->urand_read_without_sema_buf_sz_sum, 256, "urand_wo_sema_sum"); + + ASSERT_EQ(bss->urand_read_with_sema_call_cnt, 1, "urand_w_sema_cnt"); + ASSERT_EQ(bss->urand_read_with_sema_buf_sz_sum, 256, "urand_w_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_without_sema_call_cnt, 1, "urandlib_wo_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_without_sema_buf_sz_sum, 256, "urandlib_wo_sema_sum"); + + ASSERT_EQ(bss->urandlib_read_with_sema_call_cnt, 1, "urandlib_w_sema_cnt"); + ASSERT_EQ(bss->urandlib_read_with_sema_buf_sz_sum, 256, "urandlib_w_sema_sum"); + +cleanup: + if (urand_pipe) + pclose(urand_pipe); + test_urandom_usdt__destroy(skel); +} + +void test_usdt(void) +{ + if (test__start_subtest("basic")) + subtest_basic_usdt(); + if (test__start_subtest("multispec")) + subtest_multispec_usdt(); + if (test__start_subtest("urand_auto_attach")) + subtest_urandom_usdt(true /* auto_attach */); + if (test__start_subtest("urand_pid_attach")) + subtest_urandom_usdt(false /* auto_attach */); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp.c b/tools/testing/selftests/bpf/prog_tests/xdp.c index ac65456b7ab8..ec21c53cb1da 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp.c @@ -13,8 +13,14 @@ void test_xdp(void) char buf[128]; struct ipv6hdr iph6; struct iphdr iph; - __u32 duration, retval, size; int err, prog_fd, map_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) @@ -26,21 +32,23 @@ void test_xdp(void) bpf_map_update_elem(map_fd, &key4, &value4, 0); bpf_map_update_elem(map_fd, &key6, &value6, 0); - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); + err = bpf_prog_test_run_opts(prog_fd, &topts); memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); - CHECK(err || retval != XDP_TX || size != 74 || - iph.protocol != IPPROTO_IPIP, "ipv4", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv4 test_run data_size_out"); + ASSERT_EQ(iph.protocol, IPPROTO_IPIP, "ipv4 test_run iph.protocol"); - err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); + + err = bpf_prog_test_run_opts(prog_fd, &topts); memcpy(&iph6, buf + sizeof(struct ethhdr), sizeof(iph6)); - CHECK(err || retval != XDP_TX || size != 114 || - iph6.nexthdr != IPPROTO_IPV6, "ipv6", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 114, "ipv6 test_run data_size_out"); + ASSERT_EQ(iph6.nexthdr, IPPROTO_IPV6, "ipv6 test_run iph6.nexthdr"); out: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c new file mode 100644 index 000000000000..2f033da4cd45 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> + +static void test_xdp_update_frags(void) +{ + const char *file = "./test_xdp_update_frags.o"; + int err, prog_fd, max_skb_frags, buf_size, num; + struct bpf_program *prog; + struct bpf_object *obj; + __u32 *offset; + __u8 *buf; + FILE *f; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(128); + if (!ASSERT_OK_PTR(buf, "alloc buf 128b")) + goto out; + + memset(buf, 0, 128); + offset = (__u32 *)buf; + *offset = 16; + buf[*offset] = 0xaa; /* marker at offset 16 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 31 (head) */ + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 128; + topts.data_size_out = 128; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[16,31]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[16], 0xbb, "xdp_update_frag buf[16]"); + ASSERT_EQ(buf[31], 0xbb, "xdp_update_frag buf[31]"); + + free(buf); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 5000; + buf[*offset] = 0xaa; /* marker at offset 5000 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 5015 (frag0) */ + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 9000; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[5000,5015]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[5000], 0xbb, "xdp_update_frag buf[5000]"); + ASSERT_EQ(buf[5015], 0xbb, "xdp_update_frag buf[5015]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 3510; + buf[*offset] = 0xaa; /* marker at offset 3510 (head) */ + buf[*offset + 15] = 0xaa; /* marker at offset 3525 (frag0) */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[3510,3525]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[3510], 0xbb, "xdp_update_frag buf[3510]"); + ASSERT_EQ(buf[3525], 0xbb, "xdp_update_frag buf[3525]"); + + memset(buf, 0, 9000); + offset = (__u32 *)buf; + *offset = 7606; + buf[*offset] = 0xaa; /* marker at offset 7606 (frag0) */ + buf[*offset + 15] = 0xaa; /* marker at offset 7621 (frag1) */ + + err = bpf_prog_test_run_opts(prog_fd, &topts); + + /* test_xdp_update_frags: buf[7606,7621]: 0xaa -> 0xbb */ + ASSERT_OK(err, "xdp_update_frag"); + ASSERT_EQ(topts.retval, XDP_PASS, "xdp_update_frag retval"); + ASSERT_EQ(buf[7606], 0xbb, "xdp_update_frag buf[7606]"); + ASSERT_EQ(buf[7621], 0xbb, "xdp_update_frag buf[7621]"); + + free(buf); + + /* test_xdp_update_frags: unsupported buffer size */ + f = fopen("/proc/sys/net/core/max_skb_frags", "r"); + if (!ASSERT_OK_PTR(f, "max_skb_frag file pointer")) + goto out; + + num = fscanf(f, "%d", &max_skb_frags); + fclose(f); + + if (!ASSERT_EQ(num, 1, "max_skb_frags read failed")) + goto out; + + /* xdp_buff linear area size is always set to 4096 in the + * bpf_prog_test_run_xdp routine. + */ + buf_size = 4096 + (max_skb_frags + 1) * sysconf(_SC_PAGE_SIZE); + buf = malloc(buf_size); + if (!ASSERT_OK_PTR(buf, "alloc buf")) + goto out; + + memset(buf, 0, buf_size); + offset = (__u32 *)buf; + *offset = 16; + buf[*offset] = 0xaa; + buf[*offset + 15] = 0xaa; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = buf_size; + topts.data_size_out = buf_size; + + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_EQ(err, -ENOMEM, + "unsupported buf size, possible non-default /proc/sys/net/core/max_skb_flags?"); + free(buf); +out: + bpf_object__close(obj); +} + +void test_xdp_adjust_frags(void) +{ + if (test__start_subtest("xdp_adjust_frags")) + test_xdp_update_frags(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index 3f5a17c38be5..21ceac24e174 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -5,28 +5,35 @@ static void test_xdp_adjust_tail_shrink(void) { const char *file = "./test_xdp_adjust_tail_shrink.o"; - __u32 duration, retval, size, expect_sz; + __u32 expect_sz; struct bpf_object *obj; int err, prog_fd; char buf[128]; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (CHECK_FAIL(err)) + if (ASSERT_OK(err, "test_xdp_adjust_tail_shrink")) return; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - - CHECK(err || retval != XDP_DROP, - "ipv4", "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval"); expect_sz = sizeof(pkt_v6) - 20; /* Test shrink with 20 bytes */ - err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != XDP_TX || size != expect_sz, - "ipv6", "err %d errno %d retval %d size %d expect-size %d\n", - err, errno, retval, size, expect_sz); + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_size_out = sizeof(buf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); + ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size"); + bpf_object__close(obj); } @@ -35,25 +42,31 @@ static void test_xdp_adjust_tail_grow(void) const char *file = "./test_xdp_adjust_tail_grow.o"; struct bpf_object *obj; char buf[4096]; /* avoid segfault: large buf to hold grow results */ - __u32 duration, retval, size, expect_sz; + __u32 expect_sz; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = 1, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (CHECK_FAIL(err)) + if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; - err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != XDP_DROP, - "ipv4", "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_DROP, "ipv4 retval"); expect_sz = sizeof(pkt_v6) + 40; /* Test grow with 40 bytes */ - err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6) /* 74 */, - buf, &size, &retval, &duration); - CHECK(err || retval != XDP_TX || size != expect_sz, - "ipv6", "err %d errno %d retval %d size %d expect-size %d\n", - err, errno, retval, size, expect_sz); + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "ipv6"); + ASSERT_EQ(topts.retval, XDP_TX, "ipv6 retval"); + ASSERT_EQ(topts.data_size_out, expect_sz, "ipv6 size"); bpf_object__close(obj); } @@ -65,18 +78,18 @@ static void test_xdp_adjust_tail_grow2(void) int tailroom = 320; /* SKB_DATA_ALIGN(sizeof(struct skb_shared_info))*/; struct bpf_object *obj; int err, cnt, i; - int max_grow; + int max_grow, prog_fd; - struct bpf_prog_test_run_attr tattr = { + LIBBPF_OPTS(bpf_test_run_opts, tattr, .repeat = 1, .data_in = &buf, .data_out = &buf, .data_size_in = 0, /* Per test */ .data_size_out = 0, /* Per test */ - }; + ); - err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &tattr.prog_fd); - if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno)) + err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); + if (ASSERT_OK(err, "test_xdp_adjust_tail_grow")) return; /* Test case-64 */ @@ -84,49 +97,171 @@ static void test_xdp_adjust_tail_grow2(void) tattr.data_size_in = 64; /* Determine test case via pkt size */ tattr.data_size_out = 128; /* Limit copy_size */ /* Kernel side alloc packet memory area that is zero init */ - err = bpf_prog_test_run_xattr(&tattr); + err = bpf_prog_test_run_opts(prog_fd, &tattr); - CHECK_ATTR(errno != ENOSPC /* Due limit copy_size in bpf_test_finish */ - || tattr.retval != XDP_TX - || tattr.data_size_out != 192, /* Expected grow size */ - "case-64", - "err %d errno %d retval %d size %d\n", - err, errno, tattr.retval, tattr.data_size_out); + ASSERT_EQ(errno, ENOSPC, "case-64 errno"); /* Due limit copy_size in bpf_test_finish */ + ASSERT_EQ(tattr.retval, XDP_TX, "case-64 retval"); + ASSERT_EQ(tattr.data_size_out, 192, "case-64 data_size_out"); /* Expected grow size */ /* Extra checks for data contents */ - CHECK_ATTR(tattr.data_size_out != 192 - || buf[0] != 1 || buf[63] != 1 /* 0-63 memset to 1 */ - || buf[64] != 0 || buf[127] != 0 /* 64-127 memset to 0 */ - || buf[128] != 1 || buf[191] != 1, /*128-191 memset to 1 */ - "case-64-data", - "err %d errno %d retval %d size %d\n", - err, errno, tattr.retval, tattr.data_size_out); + ASSERT_EQ(buf[0], 1, "case-64-data buf[0]"); /* 0-63 memset to 1 */ + ASSERT_EQ(buf[63], 1, "case-64-data buf[63]"); + ASSERT_EQ(buf[64], 0, "case-64-data buf[64]"); /* 64-127 memset to 0 */ + ASSERT_EQ(buf[127], 0, "case-64-data buf[127]"); + ASSERT_EQ(buf[128], 1, "case-64-data buf[128]"); /* 128-191 memset to 1 */ + ASSERT_EQ(buf[191], 1, "case-64-data buf[191]"); /* Test case-128 */ memset(buf, 2, sizeof(buf)); tattr.data_size_in = 128; /* Determine test case via pkt size */ tattr.data_size_out = sizeof(buf); /* Copy everything */ - err = bpf_prog_test_run_xattr(&tattr); + err = bpf_prog_test_run_opts(prog_fd, &tattr); max_grow = 4096 - XDP_PACKET_HEADROOM - tailroom; /* 3520 */ - CHECK_ATTR(err - || tattr.retval != XDP_TX - || tattr.data_size_out != max_grow,/* Expect max grow size */ - "case-128", - "err %d errno %d retval %d size %d expect-size %d\n", - err, errno, tattr.retval, tattr.data_size_out, max_grow); + ASSERT_OK(err, "case-128"); + ASSERT_EQ(tattr.retval, XDP_TX, "case-128 retval"); + ASSERT_EQ(tattr.data_size_out, max_grow, "case-128 data_size_out"); /* Expect max grow */ /* Extra checks for data content: Count grow size, will contain zeros */ for (i = 0, cnt = 0; i < sizeof(buf); i++) { if (buf[i] == 0) cnt++; } - CHECK_ATTR((cnt != (max_grow - tattr.data_size_in)) /* Grow increase */ - || tattr.data_size_out != max_grow, /* Total grow size */ - "case-128-data", - "err %d errno %d retval %d size %d grow-size %d\n", - err, errno, tattr.retval, tattr.data_size_out, cnt); + ASSERT_EQ(cnt, max_grow - tattr.data_size_in, "case-128-data cnt"); /* Grow increase */ + ASSERT_EQ(tattr.data_size_out, max_grow, "case-128-data data_size_out"); /* Total grow */ + + bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_shrink(void) +{ + const char *file = "./test_xdp_adjust_tail_shrink.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + /* For the individual test cases, the first byte in the packet + * indicates which test will be run. + */ + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(9000); + if (!ASSERT_OK_PTR(buf, "alloc buf 9Kb")) + goto out; + + memset(buf, 0, 9000); + + /* Test case removing 10 bytes from last frag, NOT freeing it */ + exp_size = 8990; /* 9000 - 10 */ + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 9000; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-10b"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-10b size"); + + /* Test case removing one of two pages, assuming 4K pages */ + buf[0] = 1; + exp_size = 4900; /* 9000 - 4100 */ + + topts.data_size_out = 9000; /* reset from previous invocation */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-4Kb"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-4Kb retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-4Kb size"); + + /* Test case removing two pages resulting in a linear xdp_buff */ + buf[0] = 2; + exp_size = 800; /* 9000 - 8200 */ + topts.data_size_out = 9000; /* reset from previous invocation */ + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb-9Kb"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb-9Kb retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb-9Kb size"); + + free(buf); +out: + bpf_object__close(obj); +} + +static void test_xdp_adjust_frags_tail_grow(void) +{ + const char *file = "./test_xdp_adjust_tail_grow.o"; + __u32 exp_size; + struct bpf_program *prog; + struct bpf_object *obj; + int err, i, prog_fd; + __u8 *buf; + LIBBPF_OPTS(bpf_test_run_opts, topts); + + obj = bpf_object__open(file); + if (libbpf_get_error(obj)) + return; + + prog = bpf_object__next_program(obj, NULL); + if (bpf_object__load(obj)) + return; + + prog_fd = bpf_program__fd(prog); + + buf = malloc(16384); + if (!ASSERT_OK_PTR(buf, "alloc buf 16Kb")) + goto out; + + /* Test case add 10 bytes to last frag */ + memset(buf, 1, 16384); + exp_size = 9000 + 10; + + topts.data_in = buf; + topts.data_out = buf; + topts.data_size_in = 9000; + topts.data_size_out = 16384; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(topts.retval, XDP_TX, "9Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); + + for (i = 0; i < 9000; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-old"); + + for (i = 9000; i < 9010; i++) + ASSERT_EQ(buf[i], 0, "9Kb+10b-new"); + + for (i = 9010; i < 16384; i++) + ASSERT_EQ(buf[i], 1, "9Kb+10b-untouched"); + + /* Test a too large grow */ + memset(buf, 1, 16384); + exp_size = 9001; + + topts.data_in = topts.data_out = buf; + topts.data_size_in = 9001; + topts.data_size_out = 16384; + err = bpf_prog_test_run_opts(prog_fd, &topts); + + ASSERT_OK(err, "9Kb+10b"); + ASSERT_EQ(topts.retval, XDP_DROP, "9Kb+10b retval"); + ASSERT_EQ(topts.data_size_out, exp_size, "9Kb+10b size"); + free(buf); +out: bpf_object__close(obj); } @@ -138,4 +273,8 @@ void test_xdp_adjust_tail(void) test_xdp_adjust_tail_grow(); if (test__start_subtest("xdp_adjust_tail_grow2")) test_xdp_adjust_tail_grow2(); + if (test__start_subtest("xdp_adjust_frags_tail_shrink")) + test_xdp_adjust_frags_tail_shrink(); + if (test__start_subtest("xdp_adjust_frags_tail_grow")) + test_xdp_adjust_frags_tail_grow(); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c index c6fa390e3aa1..62aa3edda5e6 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_attach.c @@ -11,8 +11,7 @@ void serial_test_xdp_attach(void) const char *file = "./test_xdp.o"; struct bpf_prog_info info = {}; int err, fd1, fd2, fd3; - DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, - .old_fd = -1); + LIBBPF_OPTS(bpf_xdp_attach_opts, opts); len = sizeof(info); @@ -38,49 +37,47 @@ void serial_test_xdp_attach(void) if (CHECK_FAIL(err)) goto out_2; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, - &opts); + err = bpf_xdp_attach(IFINDEX_LO, fd1, XDP_FLAGS_REPLACE, &opts); if (CHECK(err, "load_ok", "initial load failed")) goto out_close; - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (CHECK(err || id0 != id1, "id1_check", "loaded prog id %u != id1 %u, err %d", id0, id1, err)) goto out_close; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, - &opts); + err = bpf_xdp_attach(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE, &opts); if (CHECK(!err, "load_fail", "load with expected id didn't fail")) goto out; - opts.old_fd = fd1; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts); + opts.old_prog_fd = fd1; + err = bpf_xdp_attach(IFINDEX_LO, fd2, 0, &opts); if (CHECK(err, "replace_ok", "replace valid old_fd failed")) goto out; - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (CHECK(err || id0 != id2, "id2_check", "loaded prog id %u != id2 %u, err %d", id0, id2, err)) goto out_close; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts); + err = bpf_xdp_attach(IFINDEX_LO, fd3, 0, &opts); if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail")) goto out; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); + err = bpf_xdp_detach(IFINDEX_LO, 0, &opts); if (CHECK(!err, "remove_fail", "remove invalid old_fd didn't fail")) goto out; - opts.old_fd = fd2; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, 0, &opts); + opts.old_prog_fd = fd2; + err = bpf_xdp_detach(IFINDEX_LO, 0, &opts); if (CHECK(err, "remove_ok", "remove valid old_fd failed")) goto out; - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (CHECK(err || id0 != 0, "unload_check", "loaded prog id %u != 0, err %d", id0, err)) goto out_close; out: - bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); + bpf_xdp_detach(IFINDEX_LO, 0, NULL); out_close: bpf_object__close(obj3); out_2: diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c index c98a897ad692..76967d8ace9c 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c @@ -10,40 +10,101 @@ struct meta { int pkt_len; }; +struct test_ctx_s { + bool passed; + int pkt_size; +}; + +struct test_ctx_s test_ctx; + static void on_sample(void *ctx, int cpu, void *data, __u32 size) { - int duration = 0; struct meta *meta = (struct meta *)data; struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta); + unsigned char *raw_pkt = data + sizeof(*meta); + struct test_ctx_s *tst_ctx = ctx; - if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta), - "check_size", "size %u < %zu\n", - size, sizeof(pkt_v4) + sizeof(*meta))) - return; + ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size"); + ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex"); + ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len"); + ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0, + "check_packet_content"); + + if (meta->pkt_len > sizeof(pkt_v4)) { + for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++) + ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i, + "check_packet_content"); + } + + tst_ctx->passed = true; +} + +#define BUF_SZ 9000 + +static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb, + struct test_xdp_bpf2bpf *ftrace_skel, + int pkt_size) +{ + __u8 *buf, *buf_in; + int err; + LIBBPF_OPTS(bpf_test_run_opts, topts); - if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex", - "meta->ifindex = %d\n", meta->ifindex)) + if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") || + !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size")) return; - if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len", - "meta->pkt_len = %zd\n", sizeof(pkt_v4))) + buf_in = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()")) return; - if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), - "check_packet_content", "content not the same\n")) + buf = malloc(BUF_SZ); + if (!ASSERT_OK_PTR(buf, "buf malloc()")) { + free(buf_in); return; + } - *(bool *)ctx = true; + test_ctx.passed = false; + test_ctx.pkt_size = pkt_size; + + memcpy(buf_in, &pkt_v4, sizeof(pkt_v4)); + if (pkt_size > sizeof(pkt_v4)) { + for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++) + buf_in[i + sizeof(pkt_v4)] = i; + } + + /* Run test program */ + topts.data_in = buf_in; + topts.data_size_in = pkt_size; + topts.data_out = buf; + topts.data_size_out = BUF_SZ; + + err = bpf_prog_test_run_opts(pkt_fd, &topts); + + ASSERT_OK(err, "ipv4"); + ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval"); + ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size"); + + /* Make sure bpf_xdp_output() was triggered and it sent the expected + * data to the perf ring buffer. + */ + err = perf_buffer__poll(pb, 100); + + ASSERT_GE(err, 0, "perf_buffer__poll"); + ASSERT_TRUE(test_ctx.passed, "test passed"); + /* Verify test results */ + ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"), + "fentry result"); + ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result"); + + free(buf); + free(buf_in); } void test_xdp_bpf2bpf(void) { - __u32 duration = 0, retval, size; - char buf[128]; int err, pkt_fd, map_fd; - bool passed = false; - struct iphdr iph; - struct iptnl_info value4 = {.family = AF_INET}; + int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200}; + struct iptnl_info value4 = {.family = AF_INET6}; struct test_xdp *pkt_skel = NULL; struct test_xdp_bpf2bpf *ftrace_skel = NULL; struct vip key4 = {.protocol = 6, .family = AF_INET}; @@ -52,7 +113,7 @@ void test_xdp_bpf2bpf(void) /* Load XDP program to introspect */ pkt_skel = test_xdp__open_and_load(); - if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp skeleton failed\n")) + if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load")) return; pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel); @@ -62,7 +123,7 @@ void test_xdp_bpf2bpf(void) /* Load trace program */ ftrace_skel = test_xdp_bpf2bpf__open(); - if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n")) + if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open")) goto out; /* Demonstrate the bpf_program__set_attach_target() API rather than @@ -77,50 +138,24 @@ void test_xdp_bpf2bpf(void) bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel"); err = test_xdp_bpf2bpf__load(ftrace_skel); - if (CHECK(err, "__load", "ftrace skeleton failed\n")) + if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load")) goto out; err = test_xdp_bpf2bpf__attach(ftrace_skel); - if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err)) + if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach")) goto out; /* Set up perf buffer */ - pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 1, - on_sample, NULL, &passed, NULL); + pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8, + on_sample, NULL, &test_ctx, NULL); if (!ASSERT_OK_PTR(pb, "perf_buf__new")) goto out; - /* Run test program */ - err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - memcpy(&iph, buf + sizeof(struct ethhdr), sizeof(iph)); - if (CHECK(err || retval != XDP_TX || size != 74 || - iph.protocol != IPPROTO_IPIP, "ipv4", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size)) - goto out; - - /* Make sure bpf_xdp_output() was triggered and it sent the expected - * data to the perf ring buffer. - */ - err = perf_buffer__poll(pb, 100); - if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) - goto out; - - CHECK_FAIL(!passed); - - /* Verify test results */ - if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"), - "result", "fentry failed err %llu\n", - ftrace_skel->bss->test_result_fentry)) - goto out; - - CHECK(ftrace_skel->bss->test_result_fexit != XDP_TX, "result", - "fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit); - + for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++) + run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel, + pkt_sizes[i]); out: - if (pb) - perf_buffer__free(pb); + perf_buffer__free(pb); test_xdp__destroy(pkt_skel); test_xdp_bpf2bpf__destroy(ftrace_skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c index fd812bd43600..f775a1613833 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -3,11 +3,12 @@ #include <linux/if_link.h> #include <test_progs.h> +#include "test_xdp_with_cpumap_frags_helpers.skel.h" #include "test_xdp_with_cpumap_helpers.skel.h" #define IFINDEX_LO 1 -void serial_test_xdp_cpumap_attach(void) +static void test_xdp_with_cpumap_helpers(void) { struct test_xdp_with_cpumap_helpers *skel; struct bpf_prog_info info = {}; @@ -23,11 +24,11 @@ void serial_test_xdp_cpumap_attach(void) return; prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); - err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_OK(err, "Generic attach of program with 8-byte CPUMAP")) goto out_close; - err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); ASSERT_OK(err, "XDP program detach"); prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); @@ -45,15 +46,76 @@ void serial_test_xdp_cpumap_attach(void) ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to cpumap entry prog_id"); /* can not attach BPF_XDP_CPUMAP program to a device */ - err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_CPUMAP program")) - bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); val.qsize = 192; val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); err = bpf_map_update_elem(map_fd, &idx, &val, 0); ASSERT_NEQ(err, 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry"); + /* Try to attach BPF_XDP program with frags to cpumap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to cpumap entry"); + out_close: test_xdp_with_cpumap_helpers__destroy(skel); } + +static void test_xdp_with_cpumap_frags_helpers(void) +{ + struct test_xdp_with_cpumap_frags_helpers *skel; + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + struct bpf_cpumap_val val = { + .qsize = 192, + }; + int err, frags_prog_fd, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_cpumap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_cpumap_helpers__open_and_load")) + return; + + frags_prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm_frags); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(frags_prog_fd, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = frags_prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add program to cpumap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read cpumap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to cpumap entry prog_id"); + + /* Try to attach BPF_XDP program to cpumap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to cpumap entry"); + +out_close: + test_xdp_with_cpumap_frags_helpers__destroy(skel); +} + +void serial_test_xdp_cpumap_attach(void) +{ + if (test__start_subtest("CPUMAP with programs in entries")) + test_xdp_with_cpumap_helpers(); + + if (test__start_subtest("CPUMAP with frags programs in entries")) + test_xdp_with_cpumap_frags_helpers(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c index 3079d5568f8f..ead40016c324 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c @@ -4,6 +4,7 @@ #include <test_progs.h> #include "test_xdp_devmap_helpers.skel.h" +#include "test_xdp_with_devmap_frags_helpers.skel.h" #include "test_xdp_with_devmap_helpers.skel.h" #define IFINDEX_LO 1 @@ -25,11 +26,11 @@ static void test_xdp_with_devmap_helpers(void) return; dm_fd = bpf_program__fd(skel->progs.xdp_redir_prog); - err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_OK(err, "Generic attach of program with 8-byte devmap")) goto out_close; - err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); ASSERT_OK(err, "XDP program detach"); dm_fd = bpf_program__fd(skel->progs.xdp_dummy_dm); @@ -47,15 +48,24 @@ static void test_xdp_with_devmap_helpers(void) ASSERT_EQ(info.id, val.bpf_prog.id, "Match program id to devmap entry prog_id"); /* can not attach BPF_XDP_DEVMAP program to a device */ - err = bpf_set_link_xdp_fd(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_attach(IFINDEX_LO, dm_fd, XDP_FLAGS_SKB_MODE, NULL); if (!ASSERT_NEQ(err, 0, "Attach of BPF_XDP_DEVMAP program")) - bpf_set_link_xdp_fd(IFINDEX_LO, -1, XDP_FLAGS_SKB_MODE); + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_SKB_MODE, NULL); val.ifindex = 1; val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); err = bpf_map_update_elem(map_fd, &idx, &val, 0); ASSERT_NEQ(err, 0, "Add non-BPF_XDP_DEVMAP program to devmap entry"); + /* Try to attach BPF_XDP program with frags to devmap when we have + * already loaded a BPF_XDP program on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program with frags to devmap entry"); + out_close: test_xdp_with_devmap_helpers__destroy(skel); } @@ -71,12 +81,57 @@ static void test_neg_xdp_devmap_helpers(void) } } +static void test_xdp_with_devmap_frags_helpers(void) +{ + struct test_xdp_with_devmap_frags_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_devmap_val val = { + .ifindex = IFINDEX_LO, + }; + __u32 len = sizeof(info); + int err, dm_fd_frags, map_fd; + __u32 idx = 0; + + skel = test_xdp_with_devmap_frags_helpers__open_and_load(); + if (!ASSERT_OK_PTR(skel, "test_xdp_with_devmap_helpers__open_and_load")) + return; + + dm_fd_frags = bpf_program__fd(skel->progs.xdp_dummy_dm_frags); + map_fd = bpf_map__fd(skel->maps.dm_ports); + err = bpf_obj_get_info_by_fd(dm_fd_frags, &info, &len); + if (!ASSERT_OK(err, "bpf_obj_get_info_by_fd")) + goto out_close; + + val.bpf_prog.fd = dm_fd_frags; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_OK(err, "Add frags program to devmap entry"); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + ASSERT_OK(err, "Read devmap entry"); + ASSERT_EQ(info.id, val.bpf_prog.id, + "Match program id to devmap entry prog_id"); + + /* Try to attach BPF_XDP program to devmap when we have + * already loaded a BPF_XDP program with frags on the map + */ + idx = 1; + val.ifindex = 1; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_dm); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + ASSERT_NEQ(err, 0, "Add BPF_XDP program to devmap entry"); + +out_close: + test_xdp_with_devmap_frags_helpers__destroy(skel); +} void serial_test_xdp_devmap_attach(void) { if (test__start_subtest("DEVMAP with programs in entries")) test_xdp_with_devmap_helpers(); + if (test__start_subtest("DEVMAP with frags programs in entries")) + test_xdp_with_devmap_frags_helpers(); + if (test__start_subtest("Verifier check of DEVMAP programs")) test_neg_xdp_devmap_helpers(); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c new file mode 100644 index 000000000000..a50971c6cf4a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <network_helpers.h> +#include <net/if.h> +#include <linux/if_ether.h> +#include <linux/if_packet.h> +#include <linux/ipv6.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <bpf/bpf_endian.h> +#include "test_xdp_do_redirect.skel.h" + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto out; \ + }) + +struct udp_packet { + struct ethhdr eth; + struct ipv6hdr iph; + struct udphdr udp; + __u8 payload[64 - sizeof(struct udphdr) + - sizeof(struct ethhdr) - sizeof(struct ipv6hdr)]; +} __packed; + +static struct udp_packet pkt_udp = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .eth.h_dest = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}, + .eth.h_source = {0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb}, + .iph.version = 6, + .iph.nexthdr = IPPROTO_UDP, + .iph.payload_len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .iph.hop_limit = 2, + .iph.saddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(1)}, + .iph.daddr.s6_addr16 = {bpf_htons(0xfc00), 0, 0, 0, 0, 0, 0, bpf_htons(2)}, + .udp.source = bpf_htons(1), + .udp.dest = bpf_htons(1), + .udp.len = bpf_htons(sizeof(struct udp_packet) + - offsetof(struct udp_packet, udp)), + .payload = {0x42}, /* receiver XDP program matches on this */ +}; + +static int attach_tc_prog(struct bpf_tc_hook *hook, int fd) +{ + DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd); + int ret; + + ret = bpf_tc_hook_create(hook); + if (!ASSERT_OK(ret, "create tc hook")) + return ret; + + ret = bpf_tc_attach(hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(hook); + return ret; + } + + return 0; +} + +/* The maximum permissible size is: PAGE_SIZE - sizeof(struct xdp_page_head) - + * sizeof(struct skb_shared_info) - XDP_PACKET_HEADROOM = 3368 bytes + */ +#define MAX_PKT_SIZE 3368 +static void test_max_pkt_size(int fd) +{ + char data[MAX_PKT_SIZE + 1] = {}; + int err; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = MAX_PKT_SIZE, + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = 1, + ); + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_OK(err, "prog_run_max_size"); + + opts.data_size_in += 1; + err = bpf_prog_test_run_opts(fd, &opts); + ASSERT_EQ(err, -EINVAL, "prog_run_too_big"); +} + +#define NUM_PKTS 10000 +void test_xdp_do_redirect(void) +{ + int err, xdp_prog_fd, tc_prog_fd, ifindex_src, ifindex_dst; + char data[sizeof(pkt_udp) + sizeof(__u32)]; + struct test_xdp_do_redirect *skel = NULL; + struct nstoken *nstoken = NULL; + struct bpf_link *link; + + struct xdp_md ctx_in = { .data = sizeof(__u32), + .data_end = sizeof(data) }; + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &data, + .data_size_in = sizeof(data), + .ctx_in = &ctx_in, + .ctx_size_in = sizeof(ctx_in), + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, + .repeat = NUM_PKTS, + .batch_size = 64, + ); + DECLARE_LIBBPF_OPTS(bpf_tc_hook, tc_hook, + .attach_point = BPF_TC_INGRESS); + + memcpy(&data[sizeof(__u32)], &pkt_udp, sizeof(pkt_udp)); + *((__u32 *)data) = 0x42; /* metadata test value */ + + skel = test_xdp_do_redirect__open(); + if (!ASSERT_OK_PTR(skel, "skel")) + return; + + /* The XDP program we run with bpf_prog_run() will cycle through all + * three xmit (PASS/TX/REDIRECT) return codes starting from above, and + * ending up with PASS, so we should end up with two packets on the dst + * iface and NUM_PKTS-2 in the TC hook. We match the packets on the UDP + * payload. + */ + SYS("ip netns add testns"); + nstoken = open_netns("testns"); + if (!ASSERT_OK_PTR(nstoken, "setns")) + goto out; + + SYS("ip link add veth_src type veth peer name veth_dst"); + SYS("ip link set dev veth_src address 00:11:22:33:44:55"); + SYS("ip link set dev veth_dst address 66:77:88:99:aa:bb"); + SYS("ip link set dev veth_src up"); + SYS("ip link set dev veth_dst up"); + SYS("ip addr add dev veth_src fc00::1/64"); + SYS("ip addr add dev veth_dst fc00::2/64"); + SYS("ip neigh add fc00::2 dev veth_src lladdr 66:77:88:99:aa:bb"); + + /* We enable forwarding in the test namespace because that will cause + * the packets that go through the kernel stack (with XDP_PASS) to be + * forwarded back out the same interface (because of the packet dst + * combined with the interface addresses). When this happens, the + * regular forwarding path will end up going through the same + * veth_xdp_xmit() call as the XDP_REDIRECT code, which can cause a + * deadlock if it happens on the same CPU. There's a local_bh_disable() + * in the test_run code to prevent this, but an earlier version of the + * code didn't have this, so we keep the test behaviour to make sure the + * bug doesn't resurface. + */ + SYS("sysctl -qw net.ipv6.conf.all.forwarding=1"); + + ifindex_src = if_nametoindex("veth_src"); + ifindex_dst = if_nametoindex("veth_dst"); + if (!ASSERT_NEQ(ifindex_src, 0, "ifindex_src") || + !ASSERT_NEQ(ifindex_dst, 0, "ifindex_dst")) + goto out; + + memcpy(skel->rodata->expect_dst, &pkt_udp.eth.h_dest, ETH_ALEN); + skel->rodata->ifindex_out = ifindex_src; /* redirect back to the same iface */ + skel->rodata->ifindex_in = ifindex_src; + ctx_in.ingress_ifindex = ifindex_src; + tc_hook.ifindex = ifindex_src; + + if (!ASSERT_OK(test_xdp_do_redirect__load(skel), "load")) + goto out; + + link = bpf_program__attach_xdp(skel->progs.xdp_count_pkts, ifindex_dst); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto out; + skel->links.xdp_count_pkts = link; + + tc_prog_fd = bpf_program__fd(skel->progs.tc_count_pkts); + if (attach_tc_prog(&tc_hook, tc_prog_fd)) + goto out; + + xdp_prog_fd = bpf_program__fd(skel->progs.xdp_redirect); + err = bpf_prog_test_run_opts(xdp_prog_fd, &opts); + if (!ASSERT_OK(err, "prog_run")) + goto out_tc; + + /* wait for the packets to be flushed */ + kern_sync_rcu(); + + /* There will be one packet sent through XDP_REDIRECT and one through + * XDP_TX; these will show up on the XDP counting program, while the + * rest will be counted at the TC ingress hook (and the counting program + * resets the packet payload so they don't get counted twice even though + * they are re-xmited out the veth device + */ + ASSERT_EQ(skel->bss->pkts_seen_xdp, 2, "pkt_count_xdp"); + ASSERT_EQ(skel->bss->pkts_seen_zero, 2, "pkt_count_zero"); + ASSERT_EQ(skel->bss->pkts_seen_tc, NUM_PKTS - 2, "pkt_count_tc"); + + test_max_pkt_size(bpf_program__fd(skel->progs.xdp_count_pkts)); + +out_tc: + bpf_tc_hook_destroy(&tc_hook); +out: + if (nstoken) + close_netns(nstoken); + system("ip netns del testns"); + test_xdp_do_redirect__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_info.c b/tools/testing/selftests/bpf/prog_tests/xdp_info.c index abe48e82e1dc..0d01ff6cb91a 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_info.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_info.c @@ -14,13 +14,13 @@ void serial_test_xdp_info(void) /* Get prog_id for XDP_ATTACHED_NONE mode */ - err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id); if (CHECK(err, "get_xdp_none", "errno=%d\n", errno)) return; if (CHECK(prog_id, "prog_id_none", "unexpected prog_id=%u\n", prog_id)) return; - err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id); if (CHECK(err, "get_xdp_none_skb", "errno=%d\n", errno)) return; if (CHECK(prog_id, "prog_id_none_skb", "unexpected prog_id=%u\n", @@ -37,32 +37,32 @@ void serial_test_xdp_info(void) if (CHECK(err, "get_prog_info", "errno=%d\n", errno)) goto out_close; - err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE, NULL); if (CHECK(err, "set_xdp_skb", "errno=%d\n", errno)) goto out_close; /* Get prog_id for single prog mode */ - err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &prog_id); if (CHECK(err, "get_xdp", "errno=%d\n", errno)) goto out; if (CHECK(prog_id != info.id, "prog_id", "prog_id not available\n")) goto out; - err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_SKB_MODE); + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_SKB_MODE, &prog_id); if (CHECK(err, "get_xdp_skb", "errno=%d\n", errno)) goto out; if (CHECK(prog_id != info.id, "prog_id_skb", "prog_id not available\n")) goto out; - err = bpf_get_link_xdp_id(IFINDEX_LO, &prog_id, XDP_FLAGS_DRV_MODE); + err = bpf_xdp_query_id(IFINDEX_LO, XDP_FLAGS_DRV_MODE, &prog_id); if (CHECK(err, "get_xdp_drv", "errno=%d\n", errno)) goto out; if (CHECK(prog_id, "prog_id_drv", "unexpected prog_id=%u\n", prog_id)) goto out; out: - bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); + bpf_xdp_detach(IFINDEX_LO, 0, NULL); out_close: bpf_object__close(obj); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index b2b357f8c74c..3e9d5c5521f0 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -8,9 +8,9 @@ void serial_test_xdp_link(void) { - DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1); struct test_xdp_link *skel1 = NULL, *skel2 = NULL; __u32 id1, id2, id0 = 0, prog_fd1, prog_fd2; + LIBBPF_OPTS(bpf_xdp_attach_opts, opts); struct bpf_link_info link_info; struct bpf_prog_info prog_info; struct bpf_link *link; @@ -41,12 +41,12 @@ void serial_test_xdp_link(void) id2 = prog_info.id; /* set initial prog attachment */ - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts); if (!ASSERT_OK(err, "fd_attach")) goto cleanup; /* validate prog ID */ - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val")) goto cleanup; @@ -55,14 +55,14 @@ void serial_test_xdp_link(void) if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { bpf_link__destroy(link); /* best-effort detach prog */ - opts.old_fd = prog_fd1; - bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); + opts.old_prog_fd = prog_fd1; + bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts); goto cleanup; } /* detach BPF program */ - opts.old_fd = prog_fd1; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); + opts.old_prog_fd = prog_fd1; + err = bpf_xdp_detach(IFINDEX_LO, XDP_FLAGS_REPLACE, &opts); if (!ASSERT_OK(err, "prog_detach")) goto cleanup; @@ -73,23 +73,23 @@ void serial_test_xdp_link(void) skel1->links.xdp_handler = link; /* validate prog ID */ - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val")) goto cleanup; /* BPF prog attach is not allowed to replace BPF link */ - opts.old_fd = prog_fd1; - err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts); + opts.old_prog_fd = prog_fd1; + err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts); if (!ASSERT_ERR(err, "prog_attach_fail")) goto cleanup; /* Can't force-update when BPF link is active */ - err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0); + err = bpf_xdp_attach(IFINDEX_LO, prog_fd2, 0, NULL); if (!ASSERT_ERR(err, "prog_update_fail")) goto cleanup; /* Can't force-detach when BPF link is active */ - err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); + err = bpf_xdp_detach(IFINDEX_LO, 0, NULL); if (!ASSERT_ERR(err, "prog_detach_fail")) goto cleanup; @@ -109,7 +109,7 @@ void serial_test_xdp_link(void) goto cleanup; skel2->links.xdp_handler = link; - err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + err = bpf_xdp_query_id(IFINDEX_LO, 0, &id0); if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c index 0281095de266..92ef0aa50866 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c @@ -25,43 +25,49 @@ void test_xdp_noinline(void) __u8 flags; } real_def = {.dst = MAGIC_VAL}; __u32 ch_key = 11, real_num = 3; - __u32 duration = 0, retval, size; int err, i; __u64 bytes = 0, pkts = 0; char buf[128]; u32 *magic = (u32 *)buf; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .data_out = buf, + .data_size_out = sizeof(buf), + .repeat = NUM_ITER, + ); skel = test_xdp_noinline__open_and_load(); - if (CHECK(!skel, "skel_open_and_load", "failed\n")) + if (!ASSERT_OK_PTR(skel, "skel_open_and_load")) return; bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0); bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0); bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0); - err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4), - NUM_ITER, &pkt_v4, sizeof(pkt_v4), - buf, &size, &retval, &duration); - CHECK(err || retval != 1 || size != 54 || - *magic != MAGIC_VAL, "ipv4", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v4), &topts); + ASSERT_OK(err, "ipv4 test_run"); + ASSERT_EQ(topts.retval, 1, "ipv4 test_run retval"); + ASSERT_EQ(topts.data_size_out, 54, "ipv4 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv4 test_run magic"); - err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6), - NUM_ITER, &pkt_v6, sizeof(pkt_v6), - buf, &size, &retval, &duration); - CHECK(err || retval != 1 || size != 74 || - *magic != MAGIC_VAL, "ipv6", - "err %d errno %d retval %d size %d magic %x\n", - err, errno, retval, size, *magic); + topts.data_in = &pkt_v6; + topts.data_size_in = sizeof(pkt_v6); + topts.data_out = buf; + topts.data_size_out = sizeof(buf); + + err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.balancer_ingress_v6), &topts); + ASSERT_OK(err, "ipv6 test_run"); + ASSERT_EQ(topts.retval, 1, "ipv6 test_run retval"); + ASSERT_EQ(topts.data_size_out, 74, "ipv6 test_run data_size_out"); + ASSERT_EQ(*magic, MAGIC_VAL, "ipv6 test_run magic"); bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats); for (i = 0; i < nr_cpus; i++) { bytes += stats[i].bytes; pkts += stats[i].pkts; } - CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2, - "stats", "bytes %lld pkts %lld\n", - (unsigned long long)bytes, (unsigned long long)pkts); + ASSERT_EQ(bytes, MAGIC_BYTES * NUM_ITER * 2, "stats bytes"); + ASSERT_EQ(pkts, NUM_ITER * 2, "stats pkts"); test_xdp_noinline__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c index 15a3900e4370..f543d1bd21b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_perf.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_perf.c @@ -4,22 +4,25 @@ void test_xdp_perf(void) { const char *file = "./xdp_dummy.o"; - __u32 duration, retval, size; struct bpf_object *obj; char in[128], out[128]; int err, prog_fd; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = in, + .data_size_in = sizeof(in), + .data_out = out, + .data_size_out = sizeof(out), + .repeat = 1000000, + ); err = bpf_prog_test_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); if (CHECK_FAIL(err)) return; - err = bpf_prog_test_run(prog_fd, 1000000, &in[0], 128, - out, &size, &retval, &duration); - - CHECK(err || retval != XDP_PASS || size != 128, - "xdp-perf", - "err %d errno %d retval %d size %d\n", - err, errno, retval, size); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, XDP_PASS, "test_run retval"); + ASSERT_EQ(topts.data_size_out, 128, "test_run data_size_out"); bpf_object__close(obj); } |
