The range tree uses bpf_mem_alloc() that is safe to be called from all contexts and uses a pre-allocated pool of memory to serve these allocations. Replace bpf_mem_alloc() with kmalloc_nolock() as it can be called safely from all contexts and is more scalable than bpf_mem_alloc(). Remove the migrate_disable/enable pairs as they were only needed for bpf_mem_alloc() as it does per-cpu operations, kmalloc_nolock() doesn't need this. Signed-off-by: Puranjay Mohan --- kernel/bpf/range_tree.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/kernel/bpf/range_tree.c b/kernel/bpf/range_tree.c index 37b80a23ae1a..2f28886f3ff7 100644 --- a/kernel/bpf/range_tree.c +++ b/kernel/bpf/range_tree.c @@ -2,7 +2,6 @@ /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include #include -#include #include #include "range_tree.h" @@ -21,7 +20,7 @@ * in commit 6772fcc8890a ("xfs: convert xbitmap to interval tree"). * * The implementation relies on external lock to protect rbtree-s. - * The alloc/free of range_node-s is done via bpf_mem_alloc. + * The alloc/free of range_node-s is done via kmalloc_nolock(). * * bpf arena is using range_tree to represent unallocated slots. * At init time: @@ -150,9 +149,8 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len) range_it_insert(rn, rt); /* Add a range */ - migrate_disable(); - new_rn = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); - migrate_enable(); + new_rn = kmalloc_nolock(sizeof(struct range_node), __GFP_ACCOUNT, + NUMA_NO_NODE); if (!new_rn) return -ENOMEM; new_rn->rn_start = last + 1; @@ -172,9 +170,7 @@ int range_tree_clear(struct range_tree *rt, u32 start, u32 len) } else { /* in the middle of the clearing range */ range_it_remove(rn, rt); - migrate_disable(); - bpf_mem_free(&bpf_global_ma, rn); - migrate_enable(); + kfree_nolock(rn); } } return 0; @@ -227,9 +223,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len) range_it_remove(right, rt); left->rn_last = right->rn_last; range_it_insert(left, rt); - migrate_disable(); - bpf_mem_free(&bpf_global_ma, right); - migrate_enable(); + kfree_nolock(right); } else if (left) { /* Combine with the left range */ range_it_remove(left, rt); @@ -241,9 +235,7 @@ int range_tree_set(struct range_tree *rt, u32 start, u32 len) right->rn_start = start; range_it_insert(right, rt); } else { - migrate_disable(); - left = bpf_mem_alloc(&bpf_global_ma, sizeof(struct range_node)); - migrate_enable(); + left = kmalloc_nolock(sizeof(struct range_node), __GFP_ACCOUNT, NUMA_NO_NODE); if (!left) return -ENOMEM; left->rn_start = start; @@ -259,7 +251,7 @@ void range_tree_destroy(struct range_tree *rt) while ((rn = range_it_iter_first(rt, 0, -1U))) { range_it_remove(rn, rt); - bpf_mem_free(&bpf_global_ma, rn); + kfree_nolock(rn); } } -- 2.47.3