Introduce BPF_F_ALL_CPUS flag support for percpu_cgroup_storage maps to allow updating values for all CPUs with a single value for update_elem API. Introduce BPF_F_CPU flag support for percpu_cgroup_storage maps to allow: * update value for specified CPU for update_elem API. * lookup value for specified CPU for lookup_elem API. The BPF_F_CPU flag is passed via map_flags along with embedded cpu info. Signed-off-by: Leon Hwang --- include/linux/bpf-cgroup.h | 4 ++-- kernel/bpf/local_storage.c | 46 +++++++++++++++++++++++++++++--------- kernel/bpf/syscall.c | 3 ++- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index aedf573bdb426..013f4db9903fd 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -172,7 +172,7 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); -int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); +int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value, u64 flags); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, void *value, u64 flags); @@ -467,7 +467,7 @@ static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( static inline void bpf_cgroup_storage_free( struct bpf_cgroup_storage *storage) {} static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, - void *value) { + void *value, u64 flags) { return 0; } static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index c93a756e035c0..1abfbb27449ee 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -180,18 +180,22 @@ static long cgroup_storage_update_elem(struct bpf_map *map, void *key, } int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key, - void *value) + void *value, u64 map_flags) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); struct bpf_cgroup_storage *storage; - int cpu, off = 0; + int err, cpu, off = 0; u32 size; + err = bpf_map_check_cpu_flags(map_flags, false); + if (err) + return err; + rcu_read_lock(); storage = cgroup_storage_lookup(map, key, false); if (!storage) { - rcu_read_unlock(); - return -ENOENT; + err = -ENOENT; + goto unlock; } /* per_cpu areas are zero-filled and bpf programs can only @@ -199,13 +203,19 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key, * will not leak any kernel data */ size = round_up(_map->value_size, 8); + if (map_flags & BPF_F_CPU) { + cpu = map_flags >> 32; + bpf_long_memcpy(value, per_cpu_ptr(storage->percpu_buf, cpu), size); + goto unlock; + } for_each_possible_cpu(cpu) { bpf_long_memcpy(value + off, per_cpu_ptr(storage->percpu_buf, cpu), size); off += size; } +unlock: rcu_read_unlock(); - return 0; + return err; } int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, @@ -213,17 +223,21 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, { struct bpf_cgroup_storage_map *map = map_to_storage(_map); struct bpf_cgroup_storage *storage; - int cpu, off = 0; + int err, cpu, off = 0; u32 size; - if (map_flags != BPF_ANY && map_flags != BPF_EXIST) + if ((u32)map_flags & ~(BPF_ANY | BPF_EXIST | BPF_F_CPU | BPF_F_ALL_CPUS)) return -EINVAL; + err = bpf_map_check_cpu_flags(map_flags, true); + if (err) + return err; + rcu_read_lock(); storage = cgroup_storage_lookup(map, key, false); if (!storage) { - rcu_read_unlock(); - return -ENOENT; + err = -ENOENT; + goto unlock; } /* the user space will provide round_up(value_size, 8) bytes that @@ -233,13 +247,23 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, * so no kernel data leaks possible */ size = round_up(_map->value_size, 8); + if (map_flags & BPF_F_CPU) { + cpu = map_flags >> 32; + bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), value, size); + goto unlock; + } for_each_possible_cpu(cpu) { bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), value + off, size); - off += size; + /* same user-provided value is used if BPF_F_ALL_CPUS is + * specified, otherwise value is an array of per-CPU values. + */ + if (!(map_flags & BPF_F_ALL_CPUS)) + off += size; } +unlock: rcu_read_unlock(); - return 0; + return err; } static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d01424e0560ce..dbb715719f40c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -137,6 +137,7 @@ static bool bpf_map_supports_cpu_flags(enum bpf_map_type map_type) case BPF_MAP_TYPE_PERCPU_ARRAY: case BPF_MAP_TYPE_PERCPU_HASH: case BPF_MAP_TYPE_LRU_PERCPU_HASH: + case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: return true; default: return false; @@ -347,7 +348,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { err = bpf_percpu_array_copy(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { - err = bpf_percpu_cgroup_storage_copy(map, key, value); + err = bpf_percpu_cgroup_storage_copy(map, key, value, flags); } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { err = bpf_stackmap_copy(map, key, value); } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { -- 2.50.1