From: Mykyta Yatsenko Add tests validating resizable hash map handles BPF_F_LOCK flag as expected. Signed-off-by: Mykyta Yatsenko --- tools/testing/selftests/bpf/prog_tests/rhash.c | 99 ++++++++++++++++++++++++++ tools/testing/selftests/bpf/progs/rhash.c | 35 +++++++++ 2 files changed, 134 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/rhash.c b/tools/testing/selftests/bpf/prog_tests/rhash.c index be6e5e01bde6b127268f313d0ef25dcb33373b97..d1557405e062851ba2ad3d7378049c93395e7943 100644 --- a/tools/testing/selftests/bpf/prog_tests/rhash.c +++ b/tools/testing/selftests/bpf/prog_tests/rhash.c @@ -7,6 +7,7 @@ #include #include #include +#include static void rhash_run(const char *prog_name) { @@ -37,6 +38,100 @@ static void rhash_run(const char *prog_name) rhash__destroy(skel); } +struct lock_thread_args { + int prog_fd; + int map_fd; +}; + +struct lock_elem { + struct bpf_spin_lock lock; + int var[16]; +}; + +static void *spin_lock_thread(void *arg) +{ + struct lock_thread_args *args = arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 10000, + ); + int err; + + err = bpf_prog_test_run_opts(args->prog_fd, &topts); + if (err || topts.retval) + return (void *)1; + + return (void *)0; +} + +static void *parallel_map_access(void *arg) +{ + struct lock_thread_args *args = arg; + int i, j, key = 0; + int err; + struct lock_elem val; + + for (i = 0; i < 10000; i++) { + err = bpf_map_lookup_elem_flags(args->map_fd, &key, &val, BPF_F_LOCK); + if (err) + return (void *)1; + if (val.lock.val) + return (void *)1; + for (j = 1; j < 16; j++) { + if (val.var[j] != val.var[0]) + return (void *)1; + } + } + + return (void *)0; +} + +static void rhash_spin_lock_test(void) +{ + struct lock_thread_args args; + struct rhash *skel; + struct lock_elem val = {}; + pthread_t thread_id[4]; + int err, key = 0, i; + void *ret; + + skel = rhash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rhash__open_and_load")) + return; + + args.prog_fd = bpf_program__fd(skel->progs.test_rhash_spin_lock); + args.map_fd = bpf_map__fd(skel->maps.rhmap_lock); + + /* Insert initial element with BPF_F_LOCK */ + err = bpf_map_update_elem(args.map_fd, &key, &val, BPF_F_LOCK); + if (!ASSERT_OK(err, "initial update")) + goto cleanup; + + /* Spawn 2 threads running BPF program (uses bpf_spin_lock) */ + for (i = 0; i < 2; i++) + if (!ASSERT_OK(pthread_create(&thread_id[i], NULL, + &spin_lock_thread, &args), + "pthread_create spin_lock")) + goto cleanup; + + /* Spawn 2 threads doing parallel map access with BPF_F_LOCK */ + for (i = 2; i < 4; i++) + if (!ASSERT_OK(pthread_create(&thread_id[i], NULL, + ¶llel_map_access, &args), + "pthread_create parallel_map_access")) + goto cleanup; + + /* Wait for all threads */ + for (i = 0; i < 4; i++) + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join") || + !ASSERT_OK((long)ret, "thread ret")) + goto cleanup; + +cleanup: + rhash__destroy(skel); +} + void test_rhash(void) { if (test__start_subtest("test_rhash_lookup_update")) @@ -59,4 +154,8 @@ void test_rhash(void) if (test__start_subtest("test_rhash_delete_nonexistent")) rhash_run("test_rhash_delete_nonexistent"); + + if (test__start_subtest("test_rhash_spin_lock")) + rhash_spin_lock_test(); } + diff --git a/tools/testing/selftests/bpf/progs/rhash.c b/tools/testing/selftests/bpf/progs/rhash.c index 246cc4538c493902c720498bf7ab9bc5587d4643..fba55b5d9ed2575a1a31c4d2247c1d76f10d6bb1 100644 --- a/tools/testing/selftests/bpf/progs/rhash.c +++ b/tools/testing/selftests/bpf/progs/rhash.c @@ -247,3 +247,38 @@ int test_rhash_delete_nonexistent(void *ctx) return 0; } +#define VAR_NUM 16 + +struct lock_elem { + struct bpf_spin_lock lock; + int var[VAR_NUM]; +}; + +struct { + __uint(type, BPF_MAP_TYPE_RHASH); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, struct lock_elem); +} rhmap_lock SEC(".maps"); + +SEC("cgroup/skb") +int test_rhash_spin_lock(struct __sk_buff *skb) +{ + struct lock_elem *val; + int rnd = bpf_get_prandom_u32(); + int key = 0, i; + + val = bpf_map_lookup_elem(&rhmap_lock, &key); + if (!val) + return 1; + + /* spin_lock in resizable hash map */ + bpf_spin_lock(&val->lock); + for (i = 0; i < VAR_NUM; i++) + val->var[i] = rnd; + bpf_spin_unlock(&val->lock); + + return 0; +} + -- 2.53.0