The skb metadata tests for BPF programs which don't have metadata access yet have no observable side-effects. Hence, we can't detect breakage. Count each successful BPF program pass, when taking the expected path, as a side-effect to test for. Signed-off-by: Jakub Sitnicki --- .../bpf/prog_tests/xdp_context_test_run.c | 19 ++++++++++++++++- tools/testing/selftests/bpf/progs/test_xdp_meta.c | 24 ++++++++++++++-------- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c index 4cf8e009a054..b9c9f874f1b4 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -294,13 +294,15 @@ void test_xdp_context_veth(void) static void test_tuntap(struct bpf_program *xdp_prog, struct bpf_program *tc_prio_1_prog, struct bpf_program *tc_prio_2_prog, - struct bpf_program *nf_prog, + struct bpf_program *nf_prog, __u32 *prog_run_cnt, struct bpf_map *result_map) { LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); struct bpf_link *nf_link = NULL; struct netns_obj *ns = NULL; __u8 packet[PACKET_LEN]; + __u32 want_run_cnt = 0; + __u32 have_run_cnt; int tap_fd = -1; int tap_ifindex; int ret; @@ -336,6 +338,7 @@ static void test_tuntap(struct bpf_program *xdp_prog, ret = bpf_tc_attach(&tc_hook, &tc_opts); if (!ASSERT_OK(ret, "bpf_tc_attach")) goto close; + want_run_cnt++; } if (tc_prio_2_prog) { @@ -345,6 +348,7 @@ static void test_tuntap(struct bpf_program *xdp_prog, ret = bpf_tc_attach(&tc_hook, &tc_opts); if (!ASSERT_OK(ret, "bpf_tc_attach")) goto close; + want_run_cnt++; } if (nf_prog) { @@ -354,18 +358,25 @@ static void test_tuntap(struct bpf_program *xdp_prog, nf_link = bpf_program__attach_netfilter(nf_prog, &nf_opts); if (!ASSERT_OK_PTR(nf_link, "attach_netfilter")) goto close; + want_run_cnt++; } ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog), 0, NULL); if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) goto close; + want_run_cnt++; init_test_packet(packet); ret = write(tap_fd, packet, sizeof(packet)); if (!ASSERT_EQ(ret, sizeof(packet), "write packet")) goto close; + have_run_cnt = __atomic_exchange_n(prog_run_cnt, 0, __ATOMIC_SEQ_CST); + if (!ASSERT_EQ(have_run_cnt, want_run_cnt, + "unexpected bpf prog runs")) + goto close; + if (result_map) assert_test_result(result_map); @@ -390,36 +401,42 @@ void test_xdp_context_tuntap(void) skel->progs.ing_cls, NULL, /* tc prio 2 */ NULL, /* netfilter */ + &skel->bss->prog_run_cnt, skel->maps.test_result); if (test__start_subtest("dynptr_read")) test_tuntap(skel->progs.ing_xdp, skel->progs.ing_cls_dynptr_read, NULL, /* tc prio 2 */ NULL, /* netfilter */ + &skel->bss->prog_run_cnt, skel->maps.test_result); if (test__start_subtest("dynptr_slice")) test_tuntap(skel->progs.ing_xdp, skel->progs.ing_cls_dynptr_slice, NULL, /* tc prio 2 */ NULL, /* netfilter */ + &skel->bss->prog_run_cnt, skel->maps.test_result); if (test__start_subtest("dynptr_write")) test_tuntap(skel->progs.ing_xdp_zalloc_meta, skel->progs.ing_cls_dynptr_write, skel->progs.ing_cls_dynptr_read, NULL, /* netfilter */ + &skel->bss->prog_run_cnt, skel->maps.test_result); if (test__start_subtest("dynptr_slice_rdwr")) test_tuntap(skel->progs.ing_xdp_zalloc_meta, skel->progs.ing_cls_dynptr_slice_rdwr, skel->progs.ing_cls_dynptr_slice, NULL, /* netfilter */ + &skel->bss->prog_run_cnt, skel->maps.test_result); if (test__start_subtest("dynptr_nf_hook")) test_tuntap(skel->progs.ing_xdp, NULL, /* tc prio 1 */ NULL, /* tc prio 2 */ skel->progs.ing_nf, + &skel->bss->prog_run_cnt, NULL /* ignore result for now */); test_xdp_meta__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c index 41411d164190..ae149e45cf0c 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c @@ -35,6 +35,14 @@ struct { __uint(value_size, META_SIZE); } test_result SEC(".maps"); +__u32 prog_run_cnt = 0; + +static __always_inline int run_ok(int retval) +{ + __sync_fetch_and_add(&prog_run_cnt, 1); + return retval; +} + SEC("tc") int ing_cls(struct __sk_buff *ctx) { @@ -49,7 +57,7 @@ int ing_cls(struct __sk_buff *ctx) bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY); - return TC_ACT_SHOT; + return run_ok(TC_ACT_SHOT); } /* Read from metadata using bpf_dynptr_read helper */ @@ -67,7 +75,7 @@ int ing_cls_dynptr_read(struct __sk_buff *ctx) bpf_dynptr_from_skb(ctx, BPF_DYNPTR_F_SKB_METADATA, &meta); bpf_dynptr_read(dst, META_SIZE, &meta, 0, 0); - return TC_ACT_SHOT; + return run_ok(TC_ACT_SHOT); } /* Check that we can't get a dynptr slice to skb metadata yet */ @@ -81,7 +89,7 @@ int ing_nf(struct bpf_nf_ctx *ctx) if (bpf_dynptr_size(&meta) != 0) return NF_DROP; - return NF_ACCEPT; + return run_ok(NF_ACCEPT); } /* Write to metadata using bpf_dynptr_write helper */ @@ -99,7 +107,7 @@ int ing_cls_dynptr_write(struct __sk_buff *ctx) bpf_dynptr_from_skb(ctx, BPF_DYNPTR_F_SKB_METADATA, &meta); bpf_dynptr_write(&meta, 0, src, META_SIZE, 0); - return TC_ACT_UNSPEC; /* pass */ + return run_ok(TC_ACT_UNSPEC); /* pass */ } /* Read from metadata using read-only dynptr slice */ @@ -121,7 +129,7 @@ int ing_cls_dynptr_slice(struct __sk_buff *ctx) __builtin_memcpy(dst, src, META_SIZE); - return TC_ACT_SHOT; + return run_ok(TC_ACT_SHOT); } /* Write to metadata using writeable dynptr slice */ @@ -143,7 +151,7 @@ int ing_cls_dynptr_slice_rdwr(struct __sk_buff *ctx) __builtin_memcpy(dst, src, META_SIZE); - return TC_ACT_UNSPEC; /* pass */ + return run_ok(TC_ACT_UNSPEC); /* pass */ } /* Reserve and clear space for metadata but don't populate it */ @@ -174,7 +182,7 @@ int ing_xdp_zalloc_meta(struct xdp_md *ctx) __builtin_memset(meta, 0, META_SIZE); - return XDP_PASS; + return run_ok(XDP_PASS); } SEC("xdp") @@ -205,7 +213,7 @@ int ing_xdp(struct xdp_md *ctx) return XDP_DROP; __builtin_memcpy(data_meta, payload, META_SIZE); - return XDP_PASS; + return run_ok(XDP_PASS); } char _license[] SEC("license") = "GPL"; -- 2.43.0