From: Mykyta Yatsenko Test get_next_key behavior under concurrent modification: * Resize test: verify all elements visited after resize trigger * Stress test: concurrent iterators and modifiers to detect races Signed-off-by: Mykyta Yatsenko --- tools/testing/selftests/bpf/prog_tests/rhash.c | 156 +++++++++++++++++++++++++ tools/testing/selftests/bpf/progs/rhash.c | 8 ++ 2 files changed, 164 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/rhash.c b/tools/testing/selftests/bpf/prog_tests/rhash.c index d1557405e062851ba2ad3d7378049c93395e7943..689ba6d73a724e94c3845acaeca3c0a7b45a8c6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/rhash.c +++ b/tools/testing/selftests/bpf/prog_tests/rhash.c @@ -132,6 +132,156 @@ static void rhash_spin_lock_test(void) rhash__destroy(skel); } +struct iter_thread_args { + int map_fd; + int stop; + int error; +}; + +static void *get_next_key_thread(void *arg) +{ + struct iter_thread_args *args = arg; + int key, next_key; + int i = 0; + + for (i = 0; i < 1000; i++) { + if (READ_ONCE(args->stop)) + break; + + if (bpf_map_get_next_key(args->map_fd, NULL, &next_key) != 0) { + WRITE_ONCE(args->error, 1); + continue; + } + + key = next_key; + while (bpf_map_get_next_key(args->map_fd, &key, &next_key) == 0) + key = next_key; + } + + return (void *)0; +} + +static void *modifier_thread(void *arg) +{ + struct iter_thread_args *args = arg; + int key, value; + int i; + + for (i = 0; i < 10000; i++) { + if (READ_ONCE(args->stop)) + break; + + key = i; + value = i; + if (bpf_map_update_elem(args->map_fd, &key, &value, BPF_ANY)) + WRITE_ONCE(args->error, 1); + } + + return (void *)0; +} + +static void rhash_get_next_key_stress_test(void) +{ + struct iter_thread_args args = {}; + struct rhash *skel; + pthread_t iter_threads[2]; + pthread_t mod_threads[2]; + int key, value; + int err, i; + void *ret; + + skel = rhash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rhash__open_and_load")) + return; + + args.map_fd = bpf_map__fd(skel->maps.rhmap_iter); + args.stop = 0; + args.error = 0; + + /* Pre-populate map */ + for (i = 0; i < 50; i++) { + key = i; + value = i; + err = bpf_map_update_elem(args.map_fd, &key, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "initial insert")) + goto cleanup; + } + + /* Iterator threads */ + for (i = 0; i < 2; i++) + if (!ASSERT_OK(pthread_create(&iter_threads[i], NULL, + &get_next_key_thread, &args), + "pthread_create iter")) + goto cleanup; + + /* Modifier threads */ + for (i = 0; i < 2; i++) + if (!ASSERT_OK(pthread_create(&mod_threads[i], NULL, + &modifier_thread, &args), + "pthread_create mod")) + goto cleanup; + + /* Wait for modifier threads to finish */ + for (i = 0; i < 2; i++) + pthread_join(mod_threads[i], &ret); + + /* Signal iterator threads to stop */ + WRITE_ONCE(args.stop, 1); + + /* Wait for iterator threads */ + for (i = 0; i < 2; i++) + if (!ASSERT_OK(pthread_join(iter_threads[i], &ret), "pthread_join iter") || + !ASSERT_OK((long)ret, "iter thread ret")) + goto cleanup; + + ASSERT_EQ(args.error, 0, "no infinite loop"); + +cleanup: + rhash__destroy(skel); +} + +static void rhash_get_next_key_resize_test(void) +{ + struct rhash *skel; + int key, next_key, value; + int visited[128] = {0}; + int map_fd; + int err, i; + + skel = rhash__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rhash__open_and_load")) + return; + + map_fd = bpf_map__fd(skel->maps.rhmap_iter); + + /* Insert 100 elements - triggers resize */ + for (i = 0; i < 100; i++) { + key = i; + value = i; + err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST); + if (!ASSERT_OK(err, "insert elem")) + goto cleanup; + } + + /* Iterate all elements */ + err = bpf_map_get_next_key(map_fd, NULL, &next_key); + if (!ASSERT_OK(err, "get first key")) + goto cleanup; + + do { + key = next_key; + ASSERT_NEQ(visited[key], 1, "unique keys"); + visited[key] = 1; + } while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0); + + /* Verify all elements visited */ + for (i = 0; i < 100; i++) + ASSERT_TRUE(visited[i], "element visited"); + +cleanup: + rhash__destroy(skel); +} + void test_rhash(void) { if (test__start_subtest("test_rhash_lookup_update")) @@ -157,5 +307,11 @@ void test_rhash(void) if (test__start_subtest("test_rhash_spin_lock")) rhash_spin_lock_test(); + + if (test__start_subtest("test_rhash_get_next_key_resize")) + rhash_get_next_key_resize_test(); + + if (test__start_subtest("test_rhash_get_next_key_stress")) + rhash_get_next_key_stress_test(); } diff --git a/tools/testing/selftests/bpf/progs/rhash.c b/tools/testing/selftests/bpf/progs/rhash.c index fba55b5d9ed2575a1a31c4d2247c1d76f10d6bb1..61eda618cf55dfd4938c818eb4db41a842869599 100644 --- a/tools/testing/selftests/bpf/progs/rhash.c +++ b/tools/testing/selftests/bpf/progs/rhash.c @@ -262,6 +262,14 @@ struct { __type(value, struct lock_elem); } rhmap_lock SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_RHASH); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, 65536); + __type(key, int); + __type(value, int); +} rhmap_iter SEC(".maps"); + SEC("cgroup/skb") int test_rhash_spin_lock(struct __sk_buff *skb) { -- 2.53.0