The open-coded task_vma iterator holds mmap_lock for the entire duration of iteration, increasing contention on this highly contended lock. Switch to per-VMA locking. Find the next VMA via an RCU-protected maple tree walk and lock it with lock_vma_under_rcu(). lock_next_vma() is not used because its fallback takes mmap_read_lock(), and the iterator must work in non-sleepable contexts. lock_vma_under_rcu() is a point lookup (mas_walk) that finds the VMA containing a given address but cannot iterate across gaps. An RCU-protected vma_next() walk (mas_find) first locates the next VMA's vm_start to pass to lock_vma_under_rcu(). Between the RCU walk and the lock, the VMA may be removed, shrunk, or write-locked. On failure, advance past it using vm_end from the RCU walk. Because the VMA slab is SLAB_TYPESAFE_BY_RCU, vm_end may be stale; fall back to PAGE_SIZE advancement when it does not make forward progress. Concurrent VMA insertions at addresses already passed by the iterator are not detected. CONFIG_PER_VMA_LOCK is required; return -EOPNOTSUPP without it. Signed-off-by: Puranjay Mohan --- kernel/bpf/task_iter.c | 98 +++++++++++++++++++++++++++++++++--------- 1 file changed, 77 insertions(+), 21 deletions(-) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 718f0f9b6396..ddaf1cf0ecae 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "mmap_unlock_work.h" @@ -798,8 +799,8 @@ const struct bpf_func_proto bpf_find_vma_proto = { struct bpf_iter_task_vma_kern_data { struct task_struct *task; struct mm_struct *mm; - struct mmap_unlock_irq_work *work; - struct vma_iterator vmi; + struct vm_area_struct *locked_vma; + u64 next_addr; }; struct bpf_iter_task_vma { @@ -820,15 +821,19 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, u64 addr) { struct bpf_iter_task_vma_kern *kit = (void *)it; - bool irq_work_busy = false; int err; BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma)); BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma)); + if (!IS_ENABLED(CONFIG_PER_VMA_LOCK)) { + kit->data = NULL; + return -EOPNOTSUPP; + } + /* * Reject irqs-disabled contexts including NMI. Operations used - * by _next() and _destroy() (mmap_read_unlock, mmput_async) + * by _next() and _destroy() (vma_end_read, mmput_async) * can take spinlocks with IRQs disabled (pi_lock, pool->lock). * Running from NMI or from a tracepoint that fires with those * locks held could deadlock. @@ -853,28 +858,15 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, goto err_cleanup_iter; } - /* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */ - irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work); - if (irq_work_busy) { - err = -EBUSY; - goto err_cleanup_iter; - } - if (!mmget_not_zero(kit->data->mm)) { err = -ENOENT; goto err_cleanup_iter; } - if (!mmap_read_trylock(kit->data->mm)) { - err = -EBUSY; - goto err_cleanup_mmget; - } - - vma_iter_init(&kit->data->vmi, kit->data->mm, addr); + kit->data->locked_vma = NULL; + kit->data->next_addr = addr; return 0; -err_cleanup_mmget: - mmput_async(kit->data->mm); err_cleanup_iter: put_task_struct(kit->data->task); bpf_mem_free(&bpf_global_ma, kit->data); @@ -883,13 +875,76 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, return err; } +/* + * Find and lock the next VMA at or after data->next_addr. + * + * lock_vma_under_rcu() is a point lookup (mas_walk): it finds the VMA + * containing a given address but cannot iterate. An RCU-protected + * maple tree walk with vma_next() (mas_find) is needed first to locate + * the next VMA's vm_start across any gap. + * + * Between the RCU walk and the lock, the VMA may be removed, shrunk, + * or write-locked. On failure, advance past it using vm_end from the + * RCU walk. SLAB_TYPESAFE_BY_RCU can make vm_end stale, so fall back + * to PAGE_SIZE advancement to guarantee forward progress. + */ +static struct vm_area_struct * +bpf_iter_task_vma_find_next(struct bpf_iter_task_vma_kern_data *data) +{ + struct vm_area_struct *vma; + struct vma_iterator vmi; + unsigned long start, end; + +retry: + rcu_read_lock(); + vma_iter_init(&vmi, data->mm, data->next_addr); + vma = vma_next(&vmi); + if (!vma) { + rcu_read_unlock(); + return NULL; + } + start = vma->vm_start; + end = vma->vm_end; + rcu_read_unlock(); + + vma = lock_vma_under_rcu(data->mm, start); + if (!vma) { + if (end > data->next_addr) + data->next_addr = end; + else + data->next_addr += PAGE_SIZE; + goto retry; + } + + if (unlikely(data->next_addr >= vma->vm_end)) { + data->next_addr += PAGE_SIZE; + vma_end_read(vma); + goto retry; + } + + return vma; +} + __bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) { struct bpf_iter_task_vma_kern *kit = (void *)it; + struct vm_area_struct *vma; if (!kit->data) /* bpf_iter_task_vma_new failed */ return NULL; - return vma_next(&kit->data->vmi); + + if (kit->data->locked_vma) { + vma_end_read(kit->data->locked_vma); + kit->data->locked_vma = NULL; + } + + vma = bpf_iter_task_vma_find_next(kit->data); + if (!vma) + return NULL; + + kit->data->locked_vma = vma; + kit->data->next_addr = vma->vm_end; + return vma; } __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) @@ -897,7 +952,8 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) struct bpf_iter_task_vma_kern *kit = (void *)it; if (kit->data) { - bpf_mmap_unlock_mm(kit->data->work, kit->data->mm); + if (kit->data->locked_vma) + vma_end_read(kit->data->locked_vma); put_task_struct(kit->data->task); mmput_async(kit->data->mm); bpf_mem_free(&bpf_global_ma, kit->data); -- 2.52.0