It is to unify map flags checking for lookup_elem, update_elem, lookup_batch and update_batch APIs. Therefore, it will be convenient to check BPF_F_CPU and BPF_F_ALL_CPUS flags in it for these APIs in next patch. Signed-off-by: Leon Hwang --- include/linux/bpf.h | 31 +++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 34 +++++++++++----------------------- 2 files changed, 42 insertions(+), 23 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ce523a49dc20c..55c98c7d52510 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -3735,4 +3735,35 @@ int bpf_prog_get_file_line(struct bpf_prog *prog, unsigned long ip, const char * const char **linep, int *nump); struct bpf_prog *bpf_prog_find_from_stack(void); +static inline int bpf_map_check_op_flags(struct bpf_map *map, u64 flags, u64 allowed_flags) +{ + if (flags & ~allowed_flags) + return -EINVAL; + + if ((flags & BPF_F_LOCK) && !btf_record_has_field(map->record, BPF_SPIN_LOCK)) + return -EINVAL; + + return 0; +} + +static inline int bpf_map_check_lookup_flags(struct bpf_map *map, u64 flags) +{ + return bpf_map_check_op_flags(map, flags, BPF_F_LOCK); +} + +static inline int bpf_map_check_update_flags(struct bpf_map *map, u64 flags) +{ + return bpf_map_check_op_flags(map, flags, ~0); +} + +static inline int bpf_map_check_lookup_batch_flags(struct bpf_map *map, u64 flags) +{ + return bpf_map_check_lookup_flags(map, flags); +} + +static inline int bpf_map_check_update_batch_flags(struct bpf_map *map, u64 flags) +{ + return bpf_map_check_op_flags(map, flags, BPF_F_LOCK); +} + #endif /* _LINUX_BPF_H */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0fbfa8532c392..3a51836f18915 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1669,9 +1669,6 @@ static int map_lookup_elem(union bpf_attr *attr) if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) return -EINVAL; - if (attr->flags & ~BPF_F_LOCK) - return -EINVAL; - CLASS(fd, f)(attr->map_fd); map = __bpf_map_get(f); if (IS_ERR(map)) @@ -1679,9 +1676,9 @@ static int map_lookup_elem(union bpf_attr *attr) if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) return -EPERM; - if ((attr->flags & BPF_F_LOCK) && - !btf_record_has_field(map->record, BPF_SPIN_LOCK)) - return -EINVAL; + err = bpf_map_check_lookup_flags(map, attr->flags); + if (err) + return err; key = __bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) @@ -1744,11 +1741,9 @@ static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr) goto err_put; } - if ((attr->flags & BPF_F_LOCK) && - !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { - err = -EINVAL; + err = bpf_map_check_update_flags(map, attr->flags); + if (err) goto err_put; - } key = ___bpf_copy_key(ukey, map->key_size); if (IS_ERR(key)) { @@ -1952,13 +1947,9 @@ int generic_map_update_batch(struct bpf_map *map, struct file *map_file, void *key, *value; int err = 0; - if (attr->batch.elem_flags & ~BPF_F_LOCK) - return -EINVAL; - - if ((attr->batch.elem_flags & BPF_F_LOCK) && - !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { - return -EINVAL; - } + err = bpf_map_check_update_batch_flags(map, attr->batch.elem_flags); + if (err) + return err; value_size = bpf_map_value_size(map); @@ -2015,12 +2006,9 @@ int generic_map_lookup_batch(struct bpf_map *map, u32 value_size, cp, max_count; int err; - if (attr->batch.elem_flags & ~BPF_F_LOCK) - return -EINVAL; - - if ((attr->batch.elem_flags & BPF_F_LOCK) && - !btf_record_has_field(map->record, BPF_SPIN_LOCK)) - return -EINVAL; + err = bpf_map_check_lookup_batch_flags(map, attr->batch.elem_flags); + if (err) + return err; value_size = bpf_map_value_size(map); -- 2.50.1