Add libbpf support for the BPF_F_CPU flag for percpu_array maps by embedding the cpu info into the high 32 bits of: 1. **flags**: bpf_map_lookup_elem_flags(), bpf_map__lookup_elem(), bpf_map_update_elem() and bpf_map__update_elem() 2. **opts->elem_flags**: bpf_map_lookup_batch() and bpf_map_update_batch() Behavior: * If cpu is (u32)~0, the update is applied across all CPUs. * Otherwise, it updates value only to the specified CPU. * If cpu is (u32)~0, lookup values across all CPUs. * Otherwise, it lookups value only from the specified CPU. Signed-off-by: Leon Hwang --- tools/lib/bpf/bpf.h | 5 +++++ tools/lib/bpf/libbpf.c | 28 ++++++++++++++++++++++------ tools/lib/bpf/libbpf.h | 17 ++++++----------- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 7252150e7ad35..8dea8216d5992 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -286,6 +286,11 @@ LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, * Update spin_lock-ed map elements. This must be * specified if the map value contains a spinlock. * + * **BPF_F_CPU** + * As for percpu map, the cpu info is embedded into the high 32 bits of + * **opts->elem_flags**. Update value across all CPUs if cpu is (__u32)~0, + * or on specified CPU otherwise. + * * @param fd BPF map file descriptor * @param keys pointer to an array of *count* keys * @param values pointer to an array of *count* values diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fb4d92c5c3394..29ada5d62ba26 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10593,8 +10593,10 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) } static int validate_map_op(const struct bpf_map *map, size_t key_sz, - size_t value_sz, bool check_value_sz) + size_t value_sz, bool check_value_sz, __u64 flags) { + __u32 cpu; + if (!map_is_created(map)) /* map is not yet created */ return -ENOENT; @@ -10612,6 +10614,20 @@ static int validate_map_op(const struct bpf_map *map, size_t key_sz, if (!check_value_sz) return 0; + if (flags & BPF_F_CPU) { + if (map->def.type != BPF_MAP_TYPE_PERCPU_ARRAY) + return -EINVAL; + cpu = flags >> 32; + if (cpu != BPF_ALL_CPUS && cpu >= libbpf_num_possible_cpus()) + return -ERANGE; + if (map->def.value_size != value_sz) { + pr_warn("map '%s': unexpected value size %zu provided, expected %u\n", + map->name, value_sz, map->def.value_size); + return -EINVAL; + } + return 0; + } + switch (map->def.type) { case BPF_MAP_TYPE_PERCPU_ARRAY: case BPF_MAP_TYPE_PERCPU_HASH: @@ -10644,7 +10660,7 @@ int bpf_map__lookup_elem(const struct bpf_map *map, { int err; - err = validate_map_op(map, key_sz, value_sz, true); + err = validate_map_op(map, key_sz, value_sz, true, flags); if (err) return libbpf_err(err); @@ -10657,7 +10673,7 @@ int bpf_map__update_elem(const struct bpf_map *map, { int err; - err = validate_map_op(map, key_sz, value_sz, true); + err = validate_map_op(map, key_sz, value_sz, true, flags); if (err) return libbpf_err(err); @@ -10669,7 +10685,7 @@ int bpf_map__delete_elem(const struct bpf_map *map, { int err; - err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); + err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0); if (err) return libbpf_err(err); @@ -10682,7 +10698,7 @@ int bpf_map__lookup_and_delete_elem(const struct bpf_map *map, { int err; - err = validate_map_op(map, key_sz, value_sz, true); + err = validate_map_op(map, key_sz, value_sz, true, 0); if (err) return libbpf_err(err); @@ -10694,7 +10710,7 @@ int bpf_map__get_next_key(const struct bpf_map *map, { int err; - err = validate_map_op(map, key_sz, 0, false /* check_value_sz */); + err = validate_map_op(map, key_sz, 0, false /* check_value_sz */, 0); if (err) return libbpf_err(err); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index d1cf813a057bc..bde22b017a3ce 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1169,10 +1169,11 @@ LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map); * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @param value pointer to memory in which looked up value will be stored * @param value_sz size in byte of value data memory; it has to match BPF map - * definition's **value_size**. For per-CPU BPF maps value size has to be - * a product of BPF map value size and number of possible CPUs in the system - * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for - * per-CPU values value size has to be aligned up to closest 8 bytes for + * definition's **value_size**. For per-CPU BPF maps, value size can be + * definition's **value_size** if **BPF_F_CPU** is specified in **flags**, + * otherwise a product of BPF map value size and number of possible CPUs in the + * system (could be fetched with **libbpf_num_possible_cpus()**). Note else that + * for per-CPU values value size has to be aligned up to closest 8 bytes for * alignment reasons, so expected size is: `round_up(value_size, 8) * * libbpf_num_possible_cpus()`. * @flags extra flags passed to kernel for this operation @@ -1192,13 +1193,7 @@ LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map, * @param key pointer to memory containing bytes of the key * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size** * @param value pointer to memory containing bytes of the value - * @param value_sz size in byte of value data memory; it has to match BPF map - * definition's **value_size**. For per-CPU BPF maps value size has to be - * a product of BPF map value size and number of possible CPUs in the system - * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for - * per-CPU values value size has to be aligned up to closest 8 bytes for - * alignment reasons, so expected size is: `round_up(value_size, 8) - * * libbpf_num_possible_cpus()`. + * @param value_sz refer to **bpf_map__lookup_elem**'s description.' * @flags extra flags passed to kernel for this operation * @return 0, on success; negative error, otherwise * -- 2.50.1