The per-process BPF-THP mode is unsuitable for managing shared resources such as shmem THP and file-backed THP. This aligns with known cgroup limitations for similar scenarios [0]. Introduce a global BPF-THP mode to address this gap. When registered: - All existing per-process instances are disabled - New per-process registrations are blocked - Existing per-process instances remain registered (no forced unregistration) The global mode takes precedence over per-process instances. Updates are type-isolated: global instances can only be updated by new global instances, and per-process instances by new per-process instances. Link: https://lore.kernel.org/linux-mm/YwNold0GMOappUxc@slm.duckdns.org/ [0] Signed-off-by: Yafang Shao --- mm/huge_memory_bpf.c | 111 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 2 deletions(-) diff --git a/mm/huge_memory_bpf.c b/mm/huge_memory_bpf.c index f69c5851ea61..f8383c2a299f 100644 --- a/mm/huge_memory_bpf.c +++ b/mm/huge_memory_bpf.c @@ -35,6 +35,30 @@ struct bpf_thp_ops { }; static DEFINE_SPINLOCK(thp_ops_lock); +static struct bpf_thp_ops __rcu *bpf_thp_global; /* global mode */ + +static unsigned long +bpf_hook_thp_get_orders_global(struct vm_area_struct *vma, + enum tva_type type, + unsigned long orders) +{ + static struct bpf_thp_ops *bpf_thp; + int bpf_order; + + rcu_read_lock(); + bpf_thp = rcu_dereference(bpf_thp_global); + if (!bpf_thp || !bpf_thp->thp_get_order) + goto out; + + bpf_order = bpf_thp->thp_get_order(vma, type, orders); + if (bpf_order < 0) + goto out; + orders &= BIT(bpf_order); + +out: + rcu_read_unlock(); + return orders; +} unsigned long bpf_hook_thp_get_orders(struct vm_area_struct *vma, enum tva_type type, @@ -47,6 +71,10 @@ unsigned long bpf_hook_thp_get_orders(struct vm_area_struct *vma, if (!mm) return orders; + /* Global BPF-THP takes precedence over per-process BPF-THP. */ + if (rcu_access_pointer(bpf_thp_global)) + return bpf_hook_thp_get_orders_global(vma, type, orders); + rcu_read_lock(); bpf_thp = rcu_dereference(mm->bpf_mm.bpf_thp); if (!bpf_thp || !bpf_thp->thp_get_order) @@ -181,6 +209,23 @@ static int bpf_thp_init_member(const struct btf_type *t, return 0; } +static int bpf_thp_reg_gloabl(void *kdata, struct bpf_link *link) +{ + struct bpf_thp_ops *ops = kdata; + + /* Protect the global pointer bpf_thp_global from concurrent writes. */ + spin_lock(&thp_ops_lock); + /* Only one instance is allowed. */ + if (rcu_access_pointer(bpf_thp_global)) { + spin_unlock(&thp_ops_lock); + return -EBUSY; + } + + rcu_assign_pointer(bpf_thp_global, ops); + spin_unlock(&thp_ops_lock); + return 0; +} + static int bpf_thp_reg(void *kdata, struct bpf_link *link) { struct bpf_thp_ops *bpf_thp = kdata; @@ -191,6 +236,11 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link) pid_t pid; pid = bpf_thp->pid; + + /* Fallback to global mode if pid is not set. */ + if (!pid) + return bpf_thp_reg_gloabl(kdata, link); + p = find_get_task_by_vpid(pid); if (!p) return -ESRCH; @@ -209,8 +259,10 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link) * might register this task simultaneously. */ spin_lock(&thp_ops_lock); - /* Each process is exclusively managed by a single BPF-THP. */ - if (rcu_access_pointer(mm->bpf_mm.bpf_thp)) { + /* Each process is exclusively managed by a single BPF-THP. + * Global mode disables per-process instances. + */ + if (rcu_access_pointer(mm->bpf_mm.bpf_thp) || rcu_access_pointer(bpf_thp_global)) { err = -EBUSY; goto out; } @@ -226,12 +278,33 @@ static int bpf_thp_reg(void *kdata, struct bpf_link *link) return err; } +static void bpf_thp_unreg_global(void *kdata, struct bpf_link *link) +{ + struct bpf_thp_ops *bpf_thp; + + spin_lock(&thp_ops_lock); + if (!rcu_access_pointer(bpf_thp_global)) { + spin_unlock(&thp_ops_lock); + return; + } + + bpf_thp = rcu_replace_pointer(bpf_thp_global, NULL, + lockdep_is_held(&thp_ops_lock)); + WARN_ON_ONCE(!bpf_thp); + spin_unlock(&thp_ops_lock); + + synchronize_rcu(); +} + static void bpf_thp_unreg(void *kdata, struct bpf_link *link) { struct bpf_thp_ops *bpf_thp = kdata; struct bpf_mm_ops *bpf_mm; struct list_head *pos, *n; + if (!bpf_thp->pid) + return bpf_thp_unreg_global(kdata, link); + spin_lock(&thp_ops_lock); list_for_each_safe(pos, n, &bpf_thp->mm_list) { bpf_mm = list_entry(pos, struct bpf_mm_ops, bpf_thp_list); @@ -244,6 +317,31 @@ static void bpf_thp_unreg(void *kdata, struct bpf_link *link) synchronize_rcu(); } +static int bpf_thp_update_global(void *kdata, void *old_kdata, struct bpf_link *link) +{ + struct bpf_thp_ops *old_bpf_thp = old_kdata; + struct bpf_thp_ops *bpf_thp = kdata; + struct bpf_thp_ops *old_global; + + if (!old_bpf_thp || !bpf_thp) + return -EINVAL; + + spin_lock(&thp_ops_lock); + /* BPF-THP global instance has already been removed. */ + if (!rcu_access_pointer(bpf_thp_global)) { + spin_unlock(&thp_ops_lock); + return -ENOENT; + } + + old_global = rcu_replace_pointer(bpf_thp_global, bpf_thp, + lockdep_is_held(&thp_ops_lock)); + WARN_ON_ONCE(!old_global); + spin_unlock(&thp_ops_lock); + + synchronize_rcu(); + return 0; +} + static int bpf_thp_update(void *kdata, void *old_kdata, struct bpf_link *link) { struct bpf_thp_ops *old_bpf_thp = old_kdata; @@ -251,6 +349,15 @@ static int bpf_thp_update(void *kdata, void *old_kdata, struct bpf_link *link) struct bpf_mm_ops *bpf_mm; struct list_head *pos, *n; + /* Updates are confined to instances of the same scope: + * global to global, process-local to process-local. + */ + if (!!old_bpf_thp->pid != !!bpf_thp->pid) + return -EINVAL; + + if (!old_bpf_thp->pid) + return bpf_thp_update_global(kdata, old_kdata, link); + INIT_LIST_HEAD(&bpf_thp->mm_list); /* Could be optimized to a per-instance lock if this lock becomes a bottleneck. */ -- 2.47.3