From: Mykyta Yatsenko Add a consistency subtest to htab_reuse that detects torn writes caused by the BPF_F_LOCK lockless update racing with element reallocation in alloc_htab_elem(). The test uses three thread roles started simultaneously via a pipe: - locked updaters: BPF_F_LOCK|BPF_EXIST in-place updates - delete+update workers: delete then BPF_ANY|BPF_F_LOCK insert - locked readers: BPF_F_LOCK lookup checking value consistency Signed-off-by: Mykyta Yatsenko --- .../testing/selftests/bpf/prog_tests/htab_reuse.c | 169 ++++++++++++++++++++- tools/testing/selftests/bpf/progs/htab_reuse.c | 16 ++ 2 files changed, 184 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c index a742dd994d60..d7c3df165adc 100644 --- a/tools/testing/selftests/bpf/prog_tests/htab_reuse.c +++ b/tools/testing/selftests/bpf/prog_tests/htab_reuse.c @@ -59,7 +59,7 @@ static void *htab_update_fn(void *arg) return NULL; } -void test_htab_reuse(void) +static void test_htab_reuse_basic(void) { unsigned int i, wr_nr = 1, rd_nr = 4; pthread_t tids[wr_nr + rd_nr]; @@ -99,3 +99,170 @@ void test_htab_reuse(void) } htab_reuse__destroy(skel); } + +/* + * Writes consistency test for BPF_F_LOCK update + * + * The race: + * 1. Thread A: BPF_F_LOCK|BPF_EXIST update + * 2. Thread B: delete element then update it with BPF_ANY + */ + +struct htab_val_large { + struct bpf_spin_lock lock; + __u32 seq; + __u64 data[256]; +}; + +struct consistency_ctx { + int fd; + int start_fd; + int loop; + volatile bool torn_write; +}; + +static void wait_for_start(int fd) +{ + char buf; + + read(fd, &buf, 1); +} + +static void *locked_update_fn(void *arg) +{ + struct consistency_ctx *ctx = arg; + struct htab_val_large value; + unsigned int key = 1; + int i; + + memset(&value, 0xAA, sizeof(value)); + wait_for_start(ctx->start_fd); + + for (i = 0; i < ctx->loop; i++) { + value.seq = i; + bpf_map_update_elem(ctx->fd, &key, &value, + BPF_F_LOCK | BPF_EXIST); + } + + return NULL; +} + +/* Delete + update: removes the element then re-creates it with BPF_ANY. */ +static void *delete_update_fn(void *arg) +{ + struct consistency_ctx *ctx = arg; + struct htab_val_large value; + unsigned int key = 1; + int i; + + memset(&value, 0xBB, sizeof(value)); + + wait_for_start(ctx->start_fd); + + for (i = 0; i < ctx->loop; i++) { + value.seq = i; + bpf_map_delete_elem(ctx->fd, &key); + bpf_map_update_elem(ctx->fd, &key, &value, BPF_ANY | BPF_F_LOCK); + } + + return NULL; +} + +static void *locked_lookup_fn(void *arg) +{ + struct consistency_ctx *ctx = arg; + struct htab_val_large value; + unsigned int key = 1; + int i, j; + + wait_for_start(ctx->start_fd); + + for (i = 0; i < ctx->loop && !ctx->torn_write; i++) { + if (bpf_map_lookup_elem_flags(ctx->fd, &key, &value, BPF_F_LOCK)) + continue; + + for (j = 0; j < 256; j++) { + if (value.data[j] != value.data[0]) { + ctx->torn_write = true; + return NULL; + } + } + } + + return NULL; +} + +static void test_htab_reuse_consistency(void) +{ + int threads_total = 6, threads = 2; + pthread_t tids[threads_total]; + struct consistency_ctx ctx; + struct htab_val_large seed; + struct htab_reuse *skel; + unsigned int key = 1, i; + int pipefd[2]; + int err; + + skel = htab_reuse__open_and_load(); + if (!ASSERT_OK_PTR(skel, "htab_reuse__open_and_load")) + return; + + if (!ASSERT_OK(pipe(pipefd), "pipe")) + goto out; + + ctx.fd = bpf_map__fd(skel->maps.htab_lock_consistency); + ctx.start_fd = pipefd[0]; + ctx.loop = 100000; + ctx.torn_write = false; + + /* Seed the element so locked updaters have something to find */ + memset(&seed, 0xBB, sizeof(seed)); + err = bpf_map_update_elem(ctx.fd, &key, &seed, BPF_ANY); + if (!ASSERT_OK(err, "seed_element")) + goto close_pipe; + + memset(tids, 0, sizeof(tids)); + for (i = 0; i < threads; i++) { + err = pthread_create(&tids[i], NULL, locked_update_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) + goto stop; + } + for (i = 0; i < threads; i++) { + err = pthread_create(&tids[threads + i], NULL, delete_update_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) + goto stop; + } + for (i = 0; i < threads; i++) { + err = pthread_create(&tids[threads * 2 + i], NULL, locked_lookup_fn, &ctx); + if (!ASSERT_OK(err, "pthread_create")) + goto stop; + } + + /* Release all threads simultaneously */ + close(pipefd[1]); + pipefd[1] = -1; + +stop: + for (i = 0; i < threads_total; i++) { + if (!tids[i]) + continue; + pthread_join(tids[i], NULL); + } + + ASSERT_FALSE(ctx.torn_write, "no torn writes detected"); + +close_pipe: + if (pipefd[1] >= 0) + close(pipefd[1]); + close(pipefd[0]); +out: + htab_reuse__destroy(skel); +} + +void test_htab_reuse(void) +{ + if (test__start_subtest("basic")) + test_htab_reuse_basic(); + if (test__start_subtest("consistency")) + test_htab_reuse_consistency(); +} diff --git a/tools/testing/selftests/bpf/progs/htab_reuse.c b/tools/testing/selftests/bpf/progs/htab_reuse.c index 7f7368cb3095..1c7fa7ee45ee 100644 --- a/tools/testing/selftests/bpf/progs/htab_reuse.c +++ b/tools/testing/selftests/bpf/progs/htab_reuse.c @@ -17,3 +17,19 @@ struct { __type(value, struct htab_val); __uint(map_flags, BPF_F_NO_PREALLOC); } htab SEC(".maps"); + +#define HTAB_NDATA 256 + +struct htab_val_large { + struct bpf_spin_lock lock; + __u32 seq; + __u64 data[HTAB_NDATA]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 8); + __type(key, unsigned int); + __type(value, struct htab_val_large); + __uint(map_flags, BPF_F_NO_PREALLOC); +} htab_lock_consistency SEC(".maps"); -- 2.52.0