From: Hui Zhu Replace hardcoded enum values with bpf_core_enum_value() calls in cgroup_iter_memcg test to improve portability across different kernel versions. The change adds runtime enum value resolution for: - node_stat_item: NR_ANON_MAPPED, NR_SHMEM, NR_FILE_PAGES, NR_FILE_MAPPED - memcg_stat_item: MEMCG_KMEM - vm_event_item: PGFAULT This ensures the BPF program can adapt to enum value changes between kernel versions, returning early if any enum value is unavailable (returns 0). Signed-off-by: Hui Zhu --- .../selftests/bpf/progs/cgroup_iter_memcg.c | 41 +++++++++++++++---- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c index 59fb70a3cc50..b020951dd7e6 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c +++ b/tools/testing/selftests/bpf/progs/cgroup_iter_memcg.c @@ -15,6 +15,8 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx) struct cgroup *cgrp = ctx->cgroup; struct cgroup_subsys_state *css; struct mem_cgroup *memcg; + int ret = 1; + int idx; if (!cgrp) return 1; @@ -26,14 +28,39 @@ int cgroup_memcg_query(struct bpf_iter__cgroup *ctx) bpf_mem_cgroup_flush_stats(memcg); - memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, NR_ANON_MAPPED); - memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, NR_SHMEM); - memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, NR_FILE_PAGES); - memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, NR_FILE_MAPPED); - memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, MEMCG_KMEM); - memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, PGFAULT); + idx = bpf_core_enum_value(enum node_stat_item, NR_ANON_MAPPED); + if (idx == 0) + goto out; + memcg_query.nr_anon_mapped = bpf_mem_cgroup_page_state(memcg, idx); + idx = bpf_core_enum_value(enum node_stat_item, NR_SHMEM); + if (idx == 0) + goto out; + memcg_query.nr_shmem = bpf_mem_cgroup_page_state(memcg, idx); + + idx = bpf_core_enum_value(enum node_stat_item, NR_FILE_PAGES); + if (idx == 0) + goto out; + memcg_query.nr_file_pages = bpf_mem_cgroup_page_state(memcg, idx); + + idx = bpf_core_enum_value(enum node_stat_item, NR_FILE_MAPPED); + if (idx == 0) + goto out; + memcg_query.nr_file_mapped = bpf_mem_cgroup_page_state(memcg, idx); + + idx = bpf_core_enum_value(enum memcg_stat_item, MEMCG_KMEM); + if (idx == 0) + goto out; + memcg_query.memcg_kmem = bpf_mem_cgroup_page_state(memcg, idx); + + idx = bpf_core_enum_value(enum vm_event_item, PGFAULT); + if (idx == 0) + goto out; + memcg_query.pgfault = bpf_mem_cgroup_vm_events(memcg, idx); + + ret = 0; +out: bpf_put_mem_cgroup(memcg); - return 0; + return ret; } -- 2.43.0 From: Hui Zhu When back-porting test_progs to different kernel versions, I encountered an issue where the test_cgroup_iter_memcg test would falsely pass even when bpf_mem_cgroup_page_state() failed. This patch adds explicit checks to ensure bpf_mem_cgroup_page_state() doesn't return -1 before validating the actual statistics values. Signed-off-by: Hui Zhu --- .../selftests/bpf/prog_tests/cgroup_iter_memcg.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c index a5afd16705f0..897b17b58df3 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c @@ -53,6 +53,8 @@ static void test_anon(struct bpf_link *link, struct memcg_query *memcg_query) if (!ASSERT_OK(read_stats(link), "read stats")) goto cleanup; + ASSERT_NEQ(memcg_query->nr_anon_mapped, (unsigned long)-1, + "bpf_mem_cgroup_page_state NR_ANON_MAPPED"); ASSERT_GT(memcg_query->nr_anon_mapped, 0, "final anon mapped val"); cleanup: @@ -88,6 +90,10 @@ static void test_file(struct bpf_link *link, struct memcg_query *memcg_query) if (!ASSERT_OK(read_stats(link), "read stats")) goto cleanup_map; + ASSERT_NEQ(memcg_query->nr_file_pages, (unsigned long)-1, + "bpf_mem_cgroup_page_state NR_FILE_PAGES"); + ASSERT_NEQ(memcg_query->nr_file_mapped, (unsigned long)-1, + "bpf_mem_cgroup_page_state NR_FILE_MAPPED"); ASSERT_GT(memcg_query->nr_file_pages, 0, "final file value"); ASSERT_GT(memcg_query->nr_file_mapped, 0, "final file mapped value"); @@ -119,6 +125,8 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query) if (!ASSERT_OK(read_stats(link), "read stats")) goto cleanup; + ASSERT_NEQ(memcg_query->nr_shmem, (unsigned long)-1, + "bpf_mem_cgroup_page_state NR_SHMEM"); ASSERT_GT(memcg_query->nr_shmem, 0, "final shmem value"); cleanup: @@ -143,6 +151,8 @@ static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query) if (!ASSERT_OK(read_stats(link), "read stats")) goto cleanup; + ASSERT_NEQ(memcg_query->memcg_kmem, (unsigned long)-1, + "bpf_mem_cgroup_vm_events MEMCG_KMEM"); ASSERT_GT(memcg_query->memcg_kmem, 0, "kmem value"); cleanup: @@ -170,6 +180,8 @@ static void test_pgfault(struct bpf_link *link, struct memcg_query *memcg_query) if (!ASSERT_OK(read_stats(link), "read stats")) goto cleanup; + ASSERT_NEQ(memcg_query->pgfault, (unsigned long)-1, + "bpf_mem_cgroup_page_state PGFAULT"); ASSERT_GT(memcg_query->pgfault, 0, "final pgfault val"); cleanup: -- 2.43.0 From: Hui Zhu When cgroup.memory=nokmem is set in the kernel command line, kmem accounting is disabled. This causes the test_kmem subtest in cgroup_iter_memcg to fail because it expects non-zero kmem values. Fix this by checking /proc/cmdline for the nokmem parameter. If found, verify that kmem value is zero and return early, skipping the pipe creation test that would otherwise fail. Signed-off-by: Hui Zhu --- .../bpf/prog_tests/cgroup_iter_memcg.c | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c index 897b17b58df3..2b9c148cebf0 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_iter_memcg.c @@ -134,11 +134,41 @@ static void test_shmem(struct bpf_link *link, struct memcg_query *memcg_query) shm_unlink("/tmp_shmem"); } +static bool cmdline_has(const char *arg) +{ + char cmdline[4096]; + int fd; + ssize_t len; + bool ret = false; + + fd = open("/proc/cmdline", O_RDONLY); + if (fd < 0) + return false; + + len = read(fd, cmdline, sizeof(cmdline) - 1); + close(fd); + if (len < 0) + return false; + + cmdline[len] = '\0'; + if (strstr(cmdline, arg)) + ret = true; + + return ret; +} + #define NR_PIPES 64 static void test_kmem(struct bpf_link *link, struct memcg_query *memcg_query) { int fds[NR_PIPES][2], i; + if (cmdline_has("cgroup.memory=nokmem")) { + if (!ASSERT_OK(read_stats(link), "read stats")) + return; + ASSERT_EQ(memcg_query->memcg_kmem, 0, "kmem value"); + return; + } + /* * Increase kmem value by creating pipes which will allocate some * kernel buffers. -- 2.43.0