From: Kaitao Cheng If a user holds ownership of a node in the middle of a list, they can directly remove it from the list without strictly adhering to deletion rules from the head or tail. This is typically paired with bpf_refcount. After calling bpf_list_del, it is generally necessary to drop the reference to the list node twice to prevent reference count leaks. Signed-off-by: Kaitao Cheng --- kernel/bpf/helpers.c | 19 +++++++++++++++++++ kernel/bpf/verifier.c | 6 +++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 6eb6c82ed2ee..19d88da8e694 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2459,6 +2459,24 @@ __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) return __bpf_list_del(head, true); } +__bpf_kfunc struct bpf_list_node *bpf_list_del(struct bpf_list_head *head, + struct bpf_list_node *node) +{ + struct bpf_list_node_kern *knode = (struct bpf_list_node_kern *)node; + struct list_head *h = (void *)head; + + if (unlikely(!knode)) + return NULL; + + if (WARN_ON_ONCE(READ_ONCE(knode->owner) != h)) + return NULL; + + list_del_init(&knode->list_head); + WRITE_ONCE(knode->owner, NULL); + + return node; +} + __bpf_kfunc struct bpf_list_node *bpf_list_front(struct bpf_list_head *head) { struct list_head *h = (struct list_head *)head; @@ -4545,6 +4563,7 @@ BTF_ID_FLAGS(func, bpf_list_push_front_impl) BTF_ID_FLAGS(func, bpf_list_push_back_impl) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 67c09b43a497..c9557d3fb8dd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12461,6 +12461,7 @@ enum special_kfunc_type { KF_bpf_list_push_back_impl, KF_bpf_list_pop_front, KF_bpf_list_pop_back, + KF_bpf_list_del, KF_bpf_list_front, KF_bpf_list_back, KF_bpf_cast_to_kern_ctx, @@ -12521,6 +12522,7 @@ BTF_ID(func, bpf_list_push_front_impl) BTF_ID(func, bpf_list_push_back_impl) BTF_ID(func, bpf_list_pop_front) BTF_ID(func, bpf_list_pop_back) +BTF_ID(func, bpf_list_del) BTF_ID(func, bpf_list_front) BTF_ID(func, bpf_list_back) BTF_ID(func, bpf_cast_to_kern_ctx) @@ -12996,6 +12998,7 @@ static bool is_bpf_list_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || btf_id == special_kfunc_list[KF_bpf_list_pop_front] || btf_id == special_kfunc_list[KF_bpf_list_pop_back] || + btf_id == special_kfunc_list[KF_bpf_list_del] || btf_id == special_kfunc_list[KF_bpf_list_front] || btf_id == special_kfunc_list[KF_bpf_list_back]; } @@ -13118,7 +13121,8 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, switch (node_field_type) { case BPF_LIST_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || - kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl]); + kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || + kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]); break; case BPF_RB_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || -- 2.50.1 (Apple Git-155) From: Kaitao Cheng Add a node to both an rbtree and a list, retrieve the node from the rbtree, use the obtained node pointer to remove it from the list, and finally free the node. To verify the validity of bpf_list_del, also expect the verifier to reject calls to bpf_list_del made without holding the spin_lock. Signed-off-by: Kaitao Cheng --- .../testing/selftests/bpf/bpf_experimental.h | 11 +++ .../selftests/bpf/progs/refcounted_kptr.c | 71 +++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 4b7210c318dd..54ec9d307fdc 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -99,6 +99,17 @@ extern struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) __ks */ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksym; +/* Description + * Remove 'node' from the BPF linked list with head 'head'. + * The node must be in the list. Caller receives ownership of the + * removed node and must release it with bpf_obj_drop. + * Returns + * Pointer to the removed bpf_list_node, or NULL if 'node' is NULL + * or not in the list. + */ +extern struct bpf_list_node *bpf_list_del(struct bpf_list_head *head, + struct bpf_list_node *node) __ksym; + /* Description * Remove 'node' from rbtree with root 'root' * Returns diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 1aca85d86aeb..ac7672cfefb8 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -367,6 +367,77 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); +/* Insert node_data into both rbtree and list, remove from tree, then remove + * from list via bpf_list_del using the node obtained from the tree. + */ +SEC("tc") +__description("test_bpf_list_del: remove an arbitrary node from the list") +__success __retval(0) +long test_bpf_list_del(void *ctx) +{ + long err; + struct bpf_rb_node *rb; + struct bpf_list_node *l; + struct node_data *n; + + err = __insert_in_tree_and_list(&head, &root, &lock); + if (err) + return err; + + bpf_spin_lock(&lock); + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + if (!rb) { + bpf_spin_unlock(&lock); + return -5; + } + + n = container_of(rb, struct node_data, r); + l = bpf_list_del(&head, &n->l); + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + if (!l) + return -6; + + bpf_obj_drop(container_of(l, struct node_data, l)); + return 0; +} + +SEC("?tc") +__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head") +long list_del_without_lock_fail(void *ctx) +{ + struct bpf_rb_node *rb; + struct bpf_list_node *l; + struct node_data *n; + + bpf_spin_lock(&lock); + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + bpf_spin_unlock(&lock); + if (!rb) + return -5; + + n = container_of(rb, struct node_data, r); + l = bpf_list_del(&head, &n->l); + bpf_obj_drop(n); + if (!l) + return -6; + + bpf_obj_drop(container_of(l, struct node_data, l)); + return 0; +} + SEC("tc") __success long rbtree_refcounted_node_ref_escapes(void *ctx) -- 2.50.1 (Apple Git-155) From: Kaitao Cheng Add a new kfunc bpf_list_add_impl(head, new, prev, meta, off) that inserts 'new' after 'prev' in the BPF linked list. Both must be in the same list; 'prev' must already be in the list. The new node must be an owning reference (e.g. from bpf_obj_new); the kfunc consumes that reference and the node becomes non-owning once inserted. We have added an additional parameter bpf_list_head *head to bpf_list_add_impl, as the verifier requires the head parameter to check whether the lock is being held. Returns 0 on success, -EINVAL if 'prev' is not in a list or 'new' is already in a list (or duplicate insertion). On failure, the kernel drops the passed-in node. Signed-off-by: Kaitao Cheng --- kernel/bpf/helpers.c | 27 +++++++++++++++++++++++++++ kernel/bpf/verifier.c | 14 ++++++++++---- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 19d88da8e694..488810da8f30 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2497,6 +2497,32 @@ __bpf_kfunc struct bpf_list_node *bpf_list_back(struct bpf_list_head *head) return (struct bpf_list_node *)h->prev; } +__bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head, + struct bpf_list_node *new, + struct bpf_list_node *prev, + void *meta__ign, u64 off) +{ + struct bpf_list_node_kern *kn = (void *)new, *kp = (void *)prev; + struct btf_struct_meta *meta = meta__ign; + struct list_head *n = &kn->list_head, *p = &kp->list_head; + + if (unlikely(!head)) + return -EINVAL; + + if (WARN_ON_ONCE(READ_ONCE(kp->owner) != head)) + return -EINVAL; + + if (cmpxchg(&kn->owner, NULL, BPF_PTR_POISON)) { + __bpf_obj_drop_impl((void *)n - off, + meta ? meta->record : NULL, false); + return -EINVAL; + } + + list_add(n, p); + WRITE_ONCE(kn->owner, head); + return 0; +} + __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) { @@ -4566,6 +4592,7 @@ BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_list_add_impl) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c9557d3fb8dd..6dfd4afff1cf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12464,6 +12464,7 @@ enum special_kfunc_type { KF_bpf_list_del, KF_bpf_list_front, KF_bpf_list_back, + KF_bpf_list_add_impl, KF_bpf_cast_to_kern_ctx, KF_bpf_rdonly_cast, KF_bpf_rcu_read_lock, @@ -12525,6 +12526,7 @@ BTF_ID(func, bpf_list_pop_back) BTF_ID(func, bpf_list_del) BTF_ID(func, bpf_list_front) BTF_ID(func, bpf_list_back) +BTF_ID(func, bpf_list_add_impl) BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rcu_read_lock) @@ -13000,7 +13002,8 @@ static bool is_bpf_list_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_list_pop_back] || btf_id == special_kfunc_list[KF_bpf_list_del] || btf_id == special_kfunc_list[KF_bpf_list_front] || - btf_id == special_kfunc_list[KF_bpf_list_back]; + btf_id == special_kfunc_list[KF_bpf_list_back] || + btf_id == special_kfunc_list[KF_bpf_list_add_impl]; } static bool is_bpf_rbtree_api_kfunc(u32 btf_id) @@ -13122,7 +13125,8 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, case BPF_LIST_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || - kfunc_btf_id == special_kfunc_list[KF_bpf_list_del]); + kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] || + kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl]); break; case BPF_RB_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || @@ -14264,6 +14268,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (meta.func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || meta.func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || + meta.func_id == special_kfunc_list[KF_bpf_list_add_impl] || meta.func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { release_ref_obj_id = regs[BPF_REG_2].ref_obj_id; insn_aux->insert_off = regs[BPF_REG_2].off; @@ -23230,13 +23235,14 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, *cnt = 3; } else if (desc->func_id == special_kfunc_list[KF_bpf_list_push_back_impl] || desc->func_id == special_kfunc_list[KF_bpf_list_push_front_impl] || + desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] || desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; int struct_meta_reg = BPF_REG_3; int node_offset_reg = BPF_REG_4; - /* rbtree_add has extra 'less' arg, so args-to-fixup are in diff regs */ - if (desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { + if (desc->func_id == special_kfunc_list[KF_bpf_list_add_impl] || + desc->func_id == special_kfunc_list[KF_bpf_rbtree_add_impl]) { struct_meta_reg = BPF_REG_4; node_offset_reg = BPF_REG_5; } -- 2.50.1 (Apple Git-155) From: Kaitao Cheng Extend refcounted_kptr test to exercise bpf_list_add: add a second node after the first, then bpf_list_del both nodes. To verify the validity of bpf_list_add, also expect the verifier to reject calls to bpf_list_add made without holding the spin_lock. Signed-off-by: Kaitao Cheng --- .../testing/selftests/bpf/bpf_experimental.h | 16 +++ .../selftests/bpf/progs/refcounted_kptr.c | 122 ++++++++++++++++-- 2 files changed, 124 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 54ec9d307fdc..fdcc7a054095 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -110,6 +110,22 @@ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksy extern struct bpf_list_node *bpf_list_del(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; +/* Description + * Insert 'new' after 'prev' in the BPF linked list with head 'head'. + * The bpf_spin_lock protecting the list must be held. 'prev' must already + * be in that list; 'new' must not be in any list. The 'meta' and 'off' + * parameters are rewritten by the verifier, no need for BPF programs to + * set them. + * Returns + * 0 on success, -EINVAL if head is NULL, prev is not in the list with head, + * or new is already in a list. + */ +extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *new, + struct bpf_list_node *prev, void *meta, __u64 off) __ksym; + +/* Convenience macro to wrap over bpf_list_add_impl */ +#define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0) + /* Description * Remove 'node' from rbtree with root 'root' * Returns diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index ac7672cfefb8..5a83274e1d26 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -367,18 +367,19 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); -/* Insert node_data into both rbtree and list, remove from tree, then remove - * from list via bpf_list_del using the node obtained from the tree. +/* Insert one node in tree and list, remove it from tree, add a second + * node after it in list with bpf_list_add, then remove both nodes from + * list via bpf_list_del. */ SEC("tc") -__description("test_bpf_list_del: remove an arbitrary node from the list") +__description("test_list_add_del: test bpf_list_add/del") __success __retval(0) -long test_bpf_list_del(void *ctx) +long test_list_add_del(void *ctx) { - long err; + long err = 0; struct bpf_rb_node *rb; - struct bpf_list_node *l; - struct node_data *n; + struct bpf_list_node *l, *l_1; + struct node_data *n, *n_1, *m_1; err = __insert_in_tree_and_list(&head, &root, &lock); if (err) @@ -392,20 +393,48 @@ long test_bpf_list_del(void *ctx) } rb = bpf_rbtree_remove(&root, rb); - if (!rb) { - bpf_spin_unlock(&lock); + bpf_spin_unlock(&lock); + if (!rb) return -5; - } n = container_of(rb, struct node_data, r); + n_1 = bpf_obj_new(typeof(*n_1)); + if (!n_1) { + bpf_obj_drop(n); + return -1; + } + m_1 = bpf_refcount_acquire(n_1); + if (!m_1) { + bpf_obj_drop(n); + bpf_obj_drop(n_1); + return -1; + } + + bpf_spin_lock(&lock); + if (bpf_list_add(&head, &n_1->l, &n->l)) { + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -8; + } + l = bpf_list_del(&head, &n->l); + l_1 = bpf_list_del(&head, &m_1->l); bpf_spin_unlock(&lock); bpf_obj_drop(n); - if (!l) - return -6; + bpf_obj_drop(m_1); - bpf_obj_drop(container_of(l, struct node_data, l)); - return 0; + if (l) + bpf_obj_drop(container_of(l, struct node_data, l)); + else + err = -6; + + if (l_1) + bpf_obj_drop(container_of(l_1, struct node_data, l)); + else + err = -6; + + return err; } SEC("?tc") @@ -438,6 +467,71 @@ long list_del_without_lock_fail(void *ctx) return 0; } +SEC("?tc") +__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head") +long list_add_without_lock_fail(void *ctx) +{ + long err = 0; + struct bpf_rb_node *rb; + struct bpf_list_node *l, *l_1; + struct node_data *n, *n_1, *m_1; + + err = __insert_in_tree_and_list(&head, &root, &lock); + if (err) + return err; + + bpf_spin_lock(&lock); + rb = bpf_rbtree_first(&root); + if (!rb) { + bpf_spin_unlock(&lock); + return -4; + } + + rb = bpf_rbtree_remove(&root, rb); + bpf_spin_unlock(&lock); + if (!rb) + return -5; + + n = container_of(rb, struct node_data, r); + n_1 = bpf_obj_new(typeof(*n_1)); + if (!n_1) { + bpf_obj_drop(n); + return -1; + } + m_1 = bpf_refcount_acquire(n_1); + if (!m_1) { + bpf_obj_drop(n); + bpf_obj_drop(n_1); + return -1; + } + + /* Intentionally no lock: verifier should reject bpf_list_add without lock */ + if (bpf_list_add(&head, &n_1->l, &n->l)) { + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -8; + } + + bpf_spin_lock(&lock); + l = bpf_list_del(&head, &n->l); + l_1 = bpf_list_del(&head, &m_1->l); + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + + if (l) + bpf_obj_drop(container_of(l, struct node_data, l)); + else + err = -6; + + if (l_1) + bpf_obj_drop(container_of(l_1, struct node_data, l)); + else + err = -6; + + return err; +} + SEC("tc") __success long rbtree_refcounted_node_ref_escapes(void *ctx) -- 2.50.1 (Apple Git-155) From: Kaitao Cheng Add three kfuncs for BPF linked list queries: - bpf_list_is_first(head, node): true if node is the first in the list. - bpf_list_is_last(head, node): true if node is the last in the list. - bpf_list_empty(head): true if the list has no entries. In previous versions, to implement the above functionality, it was necessary to first call bpf_list_pop_front/back to retrieve the first or last node before checking whether the passed-in node was the first or last one. After the check, the node had to be pushed back into the list using bpf_list_push_front/back, which was very inefficient. Now, with the bpf_list_is_first/last/empty kfuncs, we can directly check whether a node is the first, last, or whether the list is empty, without having to first retrieve the node. Signed-off-by: Kaitao Cheng --- kernel/bpf/helpers.c | 41 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 15 +++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 488810da8f30..3a64a9bab06b 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2523,6 +2523,44 @@ __bpf_kfunc int bpf_list_add_impl(struct bpf_list_head *head, return 0; } +__bpf_kfunc bool bpf_list_is_first(struct bpf_list_head *head, struct bpf_list_node *node) +{ + struct list_head *h = (struct list_head *)head; + struct bpf_list_node_kern *n = (struct bpf_list_node_kern *)node; + + if (unlikely(!h->next) || list_empty(h)) + return false; + + if (READ_ONCE(n->owner) != head) + return false; + + return h->next == &n->list_head; +} + +__bpf_kfunc bool bpf_list_is_last(struct bpf_list_head *head, struct bpf_list_node *node) +{ + struct list_head *h = (struct list_head *)head; + struct bpf_list_node_kern *n = (struct bpf_list_node_kern *)node; + + if (unlikely(!h->next) || list_empty(h)) + return false; + + if (READ_ONCE(n->owner) != head) + return false; + + return h->prev == &n->list_head; +} + +__bpf_kfunc bool bpf_list_empty(struct bpf_list_head *head) +{ + struct list_head *h = (struct list_head *)head; + + if (unlikely(!h->next)) + return true; + + return list_empty(h); +} + __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) { @@ -4593,6 +4631,9 @@ BTF_ID_FLAGS(func, bpf_list_del, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_front, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_back, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_add_impl) +BTF_ID_FLAGS(func, bpf_list_is_first) +BTF_ID_FLAGS(func, bpf_list_is_last) +BTF_ID_FLAGS(func, bpf_list_empty) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6dfd4afff1cf..78652ccd2a89 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -12465,6 +12465,9 @@ enum special_kfunc_type { KF_bpf_list_front, KF_bpf_list_back, KF_bpf_list_add_impl, + KF_bpf_list_is_first, + KF_bpf_list_is_last, + KF_bpf_list_empty, KF_bpf_cast_to_kern_ctx, KF_bpf_rdonly_cast, KF_bpf_rcu_read_lock, @@ -12527,6 +12530,9 @@ BTF_ID(func, bpf_list_del) BTF_ID(func, bpf_list_front) BTF_ID(func, bpf_list_back) BTF_ID(func, bpf_list_add_impl) +BTF_ID(func, bpf_list_is_first) +BTF_ID(func, bpf_list_is_last) +BTF_ID(func, bpf_list_empty) BTF_ID(func, bpf_cast_to_kern_ctx) BTF_ID(func, bpf_rdonly_cast) BTF_ID(func, bpf_rcu_read_lock) @@ -13003,7 +13009,10 @@ static bool is_bpf_list_api_kfunc(u32 btf_id) btf_id == special_kfunc_list[KF_bpf_list_del] || btf_id == special_kfunc_list[KF_bpf_list_front] || btf_id == special_kfunc_list[KF_bpf_list_back] || - btf_id == special_kfunc_list[KF_bpf_list_add_impl]; + btf_id == special_kfunc_list[KF_bpf_list_add_impl] || + btf_id == special_kfunc_list[KF_bpf_list_is_first] || + btf_id == special_kfunc_list[KF_bpf_list_is_last] || + btf_id == special_kfunc_list[KF_bpf_list_empty]; } static bool is_bpf_rbtree_api_kfunc(u32 btf_id) @@ -13126,7 +13135,9 @@ static bool check_kfunc_is_graph_node_api(struct bpf_verifier_env *env, ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_front_impl] || kfunc_btf_id == special_kfunc_list[KF_bpf_list_push_back_impl] || kfunc_btf_id == special_kfunc_list[KF_bpf_list_del] || - kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl]); + kfunc_btf_id == special_kfunc_list[KF_bpf_list_add_impl] || + kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_first] || + kfunc_btf_id == special_kfunc_list[KF_bpf_list_is_last]); break; case BPF_RB_NODE: ret = (kfunc_btf_id == special_kfunc_list[KF_bpf_rbtree_remove] || -- 2.50.1 (Apple Git-155) From: Kaitao Cheng Rename test_list_add_del to list_add_del_and_check and extend it to cover the new kfuncs: assert list non-empty after insert, assert is_first(n) and is_last(m_1) after bpf_list_add, and assert list empty after removing both nodes. Signed-off-by: Kaitao Cheng --- .../testing/selftests/bpf/bpf_experimental.h | 15 +++++++++++ .../selftests/bpf/progs/refcounted_kptr.c | 27 +++++++++++++++---- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index fdcc7a054095..056b2de19b06 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -126,6 +126,21 @@ extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *n /* Convenience macro to wrap over bpf_list_add_impl */ #define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0) +/* Description + * Return true if 'node' is the first node in the list with head 'head'. + */ +extern bool bpf_list_is_first(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; + +/* Description + * Return true if 'node' is the last node in the list with head 'head'. + */ +extern bool bpf_list_is_last(struct bpf_list_head *head, struct bpf_list_node *node) __ksym; + +/* Description + * Return true if the list with head 'head' has no entries. + */ +extern bool bpf_list_empty(struct bpf_list_head *head) __ksym; + /* Description * Remove 'node' from rbtree with root 'root' * Returns diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 5a83274e1d26..1b119fe16f5a 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -367,14 +367,14 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \ INSERT_STASH_READ(true, "insert_stash_read: remove from tree"); INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree"); -/* Insert one node in tree and list, remove it from tree, add a second - * node after it in list with bpf_list_add, then remove both nodes from - * list via bpf_list_del. +/* Insert one node in tree and list, remove it from tree, add a second node + * after it with bpf_list_add, check bpf_list_is_first/is_last/empty, then + * remove both nodes from list via bpf_list_del. */ SEC("tc") -__description("test_list_add_del: test bpf_list_add/del") +__description("list_add_del_and_check: test bpf_list_add/del/is_first/is_last/empty") __success __retval(0) -long test_list_add_del(void *ctx) +long list_add_del_and_check(void *ctx) { long err = 0; struct bpf_rb_node *rb; @@ -386,6 +386,11 @@ long test_list_add_del(void *ctx) return err; bpf_spin_lock(&lock); + if (bpf_list_empty(&head)) { + bpf_spin_unlock(&lock); + return -7; + } + rb = bpf_rbtree_first(&root); if (!rb) { bpf_spin_unlock(&lock); @@ -418,6 +423,14 @@ long test_list_add_del(void *ctx) return -8; } + if (!bpf_list_is_first(&head, &n->l) || + !bpf_list_is_last(&head, &m_1->l)) { + bpf_spin_unlock(&lock); + bpf_obj_drop(n); + bpf_obj_drop(m_1); + return -9; + } + l = bpf_list_del(&head, &n->l); l_1 = bpf_list_del(&head, &m_1->l); bpf_spin_unlock(&lock); @@ -434,6 +447,10 @@ long test_list_add_del(void *ctx) else err = -6; + bpf_spin_lock(&lock); + if (!bpf_list_empty(&head)) + err = -7; + bpf_spin_unlock(&lock); return err; } -- 2.50.1 (Apple Git-155)