Currently, unhash_nsid() scans the entire system for each netns being killed, leading to O(M_batch * N_system * log(N_ids)) complexity. Optimize this to O(N_system * N_ids) by batching unhash operations. Move unhash_nsid() out of the per-netns loop in cleanup_net() to perform a single-pass traversal over survivor namespaces using idr_for_each(). Identify dying peers by an 'is_dying' flag, which is set under net_rwsem write lock after the netns is removed from the global list. This batches the unhashing work and eliminates the O(M_batch) multiplier. Clean up redundant nsid_lock and simplify the destruction loop now that unhashing is centralized. Signed-off-by: Qiliang Yuan --- v4: - Move unhash_nsid() out of the batch loop to reduce complexity from O(M*N) to O(N). - Use idr_for_each() for efficient, single-pass IDR traversal. - Mark 'is_dying' under net_rwsem to safely identify and batch unhashing. - Simplify destruction loop by removing redundant locking and per-netns unhash logic. v3: - Update target tree to net-next. - Post as a new thread instead of a reply. v2: - Move 'is_dying' setting to __put_net() to eliminate the O(M_batch) loop. - Remove redundant initializations in preinit_net(). v1: - Initial implementation of batch unhash_nsid(). include/net/net_namespace.h | 1 + net/core/net_namespace.c | 41 +++++++++++++++++++++++-------------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index cb664f6e3558..bd1acc6056ac 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -69,6 +69,7 @@ struct net { unsigned int dev_base_seq; /* protected by rtnl_mutex */ u32 ifindex; + bool is_dying; spinlock_t nsid_lock; atomic_t fnhe_genid; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a6e6a964a287..9ea05fb9df5b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -624,9 +624,29 @@ void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid) } EXPORT_SYMBOL_GPL(net_ns_get_ownership); -static void unhash_nsid(struct net *net, struct net *last) +static int unhash_nsid_callback(int id, void *p, void *data) +{ + struct net *tmp = data; + struct net *peer = p; + + if (peer->is_dying) { + spin_lock(&tmp->nsid_lock); + if (idr_find(&tmp->netns_ids, id) == peer) + idr_remove(&tmp->netns_ids, id); + else + peer = NULL; + spin_unlock(&tmp->nsid_lock); + + if (peer) + rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, GFP_KERNEL); + } + return 0; +} + +static void unhash_nsid(struct net *last) { struct net *tmp; + /* This function is only called from cleanup_net() work, * and this work is the only process, that may delete * a net from net_namespace_list. So, when the below @@ -634,22 +654,10 @@ static void unhash_nsid(struct net *net, struct net *last) * use for_each_net_rcu() or net_rwsem. */ for_each_net(tmp) { - int id; - - spin_lock(&tmp->nsid_lock); - id = __peernet2id(tmp, net); - if (id >= 0) - idr_remove(&tmp->netns_ids, id); - spin_unlock(&tmp->nsid_lock); - if (id >= 0) - rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, - GFP_KERNEL); + idr_for_each(&tmp->netns_ids, unhash_nsid_callback, tmp); if (tmp == last) break; } - spin_lock(&net->nsid_lock); - idr_destroy(&net->netns_ids); - spin_unlock(&net->nsid_lock); } static LLIST_HEAD(cleanup_list); @@ -674,6 +682,7 @@ static void cleanup_net(struct work_struct *work) llist_for_each_entry(net, net_kill_list, cleanup_list) { ns_tree_remove(net); list_del_rcu(&net->list); + net->is_dying = true; } /* Cache last net. After we unlock rtnl, no one new net * added to net_namespace_list can assign nsid pointer @@ -688,8 +697,10 @@ static void cleanup_net(struct work_struct *work) last = list_last_entry(&net_namespace_list, struct net, list); up_write(&net_rwsem); + unhash_nsid(last); + llist_for_each_entry(net, net_kill_list, cleanup_list) { - unhash_nsid(net, last); + idr_destroy(&net->netns_ids); list_add_tail(&net->exit_list, &net_exit_list); } -- 2.51.0