From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: "Rafael J. Wysocki" Cc: Tony Luck Cc: Hanjun Guo Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/acpi/apei/ghes.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index a0d54993edb3..97ee19f2cae0 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -1207,12 +1207,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event, int ret = NOTIFY_DONE; spin_lock_irqsave(&ghes_notify_lock_irq, flags); - rcu_read_lock(); list_for_each_entry_rcu(ghes, &ghes_hed, list) { if (!ghes_proc(ghes)) ret = NOTIFY_OK; } - rcu_read_unlock(); spin_unlock_irqrestore(&ghes_notify_lock_irq, flags); return ret; -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Jani Nikula Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 2a6d79abf25b..bf73166a1337 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -392,14 +392,12 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled) rq = NULL; spin_lock_irqsave(&engine->sched_engine->lock, flags); - rcu_read_lock(); list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { if (!__i915_request_is_complete(pos)) { rq = pos; break; } } - rcu_read_unlock(); /* * The guilty request will get skipped on a hung engine. -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Benjamin LaHaise Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- fs/aio.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 7fc7b6221312..e3f9a5a391b5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -359,15 +359,14 @@ static int aio_ring_mremap(struct vm_area_struct *vma) int i, res = -EINVAL; spin_lock(&mm->ioctx_lock); - rcu_read_lock(); - table = rcu_dereference(mm->ioctx_table); + table = rcu_dereference_check(mm->ioctx_table, lockdep_is_held(&mm->ioctx_lock)); if (!table) goto out_unlock; for (i = 0; i < table->nr; i++) { struct kioctx *ctx; - ctx = rcu_dereference(table->table[i]); + ctx = rcu_dereference_check(table->table[i], lockdep_is_held(&mm->ioctx_lock)); if (ctx && ctx->aio_ring_file == file) { if (!atomic_read(&ctx->dead)) { ctx->user_id = ctx->mmap_base = vma->vm_start; @@ -378,7 +377,6 @@ static int aio_ring_mremap(struct vm_area_struct *vma) } out_unlock: - rcu_read_unlock(); spin_unlock(&mm->ioctx_lock); return res; } -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Trond Myklebust Cc: Anna Schumaker Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- fs/nfs/callback_proc.c | 2 -- fs/nfs/nfs4state.c | 2 -- fs/nfs/pnfs.c | 12 +----------- fs/nfs/pnfs_dev.c | 4 ---- 4 files changed, 1 insertion(+), 19 deletions(-) diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 8397c43358bd..16144db39335 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -721,7 +721,6 @@ __be32 nfs4_callback_offload(void *data, void *dummy, return cpu_to_be32(NFS4ERR_DELAY); spin_lock(&cps->clp->cl_lock); - rcu_read_lock(); list_for_each_entry_rcu(server, &cps->clp->cl_superblocks, client_link) { list_for_each_entry(tmp_copy, &server->ss_copies, copies) { @@ -736,7 +735,6 @@ __be32 nfs4_callback_offload(void *data, void *dummy, } } out: - rcu_read_unlock(); if (!found) { memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE); nfs4_copy_cb_args(copy, args); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 7612e977e80b..598229fc07ed 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -241,13 +241,11 @@ const struct cred *nfs4_get_renew_cred(struct nfs_client *clp) goto out; spin_lock(&clp->cl_lock); - rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { cred = nfs4_get_renew_cred_server_locked(server); if (cred != NULL) break; } - rcu_read_unlock(); spin_unlock(&clp->cl_lock); out: diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index a3135b5af7ee..650f86fa144a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -838,13 +838,12 @@ pnfs_layout_add_bulk_destroy_list(struct inode *inode, return ret; } -/* Caller must hold rcu_read_lock and clp->cl_lock */ +/* Caller must hold clp->cl_lock (implies rcu_read_lock) */ static int pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, struct nfs_server *server, struct list_head *layout_list) __must_hold(&clp->cl_lock) - __must_hold(RCU) { struct pnfs_layout_hdr *lo, *next; struct inode *inode; @@ -862,16 +861,13 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp, if (pnfs_layout_add_bulk_destroy_list(inode, layout_list)) continue; - rcu_read_unlock(); spin_unlock(&clp->cl_lock); iput(inode); } else { - rcu_read_unlock(); spin_unlock(&clp->cl_lock); } nfs_sb_deactive(server->super); spin_lock(&clp->cl_lock); - rcu_read_lock(); return -EAGAIN; } return 0; @@ -922,7 +918,6 @@ int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid, LIST_HEAD(layout_list); spin_lock(&clp->cl_lock); - rcu_read_lock(); restart: list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0) @@ -932,7 +927,6 @@ int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid, &layout_list) != 0) goto restart; } - rcu_read_unlock(); spin_unlock(&clp->cl_lock); return pnfs_layout_free_bulk_destroy_list(&layout_list, mode); @@ -944,14 +938,12 @@ static void pnfs_layout_build_destroy_list_byclient(struct nfs_client *clp, struct nfs_server *server; spin_lock(&clp->cl_lock); - rcu_read_lock(); restart: list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { if (pnfs_layout_bulk_destroy_byserver_locked(clp, server, list) != 0) goto restart; } - rcu_read_unlock(); spin_unlock(&clp->cl_lock); } @@ -990,7 +982,6 @@ static void pnfs_layout_build_recover_list_byclient(struct nfs_client *clp, struct nfs_server *server; spin_lock(&clp->cl_lock); - rcu_read_lock(); restart: list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { if (!(server->caps & NFS_CAP_REBOOT_LAYOUTRETURN)) @@ -999,7 +990,6 @@ static void pnfs_layout_build_recover_list_byclient(struct nfs_client *clp, list) != 0) goto restart; } - rcu_read_unlock(); spin_unlock(&clp->cl_lock); } diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index bf0f2d67e96c..d19752ec1a95 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -231,9 +231,7 @@ nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld, struct nfs4_deviceid_node *d; spin_lock(&nfs4_deviceid_lock); - rcu_read_lock(); d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id)); - rcu_read_unlock(); if (!d) { spin_unlock(&nfs4_deviceid_lock); return; @@ -331,14 +329,12 @@ _deviceid_purge_client(const struct nfs_client *clp, long hash) HLIST_HEAD(tmp); spin_lock(&nfs4_deviceid_lock); - rcu_read_lock(); hlist_for_each_entry_rcu(d, &nfs4_deviceid_cache[hash], node) if (d->nfs_client == clp && atomic_read(&d->ref)) { hlist_del_init_rcu(&d->node); hlist_add_head(&d->tmpnode, &tmp); clear_bit(NFS_DEVICEID_NOCACHE, &d->flags); } - rcu_read_unlock(); spin_unlock(&nfs4_deviceid_lock); if (hlist_empty(&tmp)) -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Harald Freudenberger Cc: Holger Dengler Cc: Vasily Gorbik Cc: Alexander Gordeev Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/s390/crypto/pkey_base.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c index b15741461a63..4c4a9feecccc 100644 --- a/drivers/s390/crypto/pkey_base.c +++ b/drivers/s390/crypto/pkey_base.c @@ -48,16 +48,13 @@ int pkey_handler_register(struct pkey_handler *handler) spin_lock(&handler_list_write_lock); - rcu_read_lock(); list_for_each_entry_rcu(h, &handler_list, list) { if (h == handler) { - rcu_read_unlock(); spin_unlock(&handler_list_write_lock); module_put(handler->module); return -EEXIST; } } - rcu_read_unlock(); list_add_rcu(&handler->list, &handler_list); spin_unlock(&handler_list_write_lock); -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- ipc/msg.c | 1 - ipc/sem.c | 1 - ipc/shm.c | 1 - ipc/util.c | 2 -- 4 files changed, 5 deletions(-) diff --git a/ipc/msg.c b/ipc/msg.c index ee6af4fe52bf..1e579b57023f 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -179,7 +179,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) } ipc_unlock_object(&msq->q_perm); - rcu_read_unlock(); return msq->q_perm.id; } diff --git a/ipc/sem.c b/ipc/sem.c index a39cdc7bf88f..38ad57b2b558 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -579,7 +579,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) ns->used_sems += nsems; sem_unlock(sma, -1); - rcu_read_unlock(); return sma->sem_perm.id; } diff --git a/ipc/shm.c b/ipc/shm.c index a9310b6dbbc3..61fae1b6a18e 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -795,7 +795,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); - rcu_read_unlock(); return error; no_id: diff --git a/ipc/util.c b/ipc/util.c index cae60f11d9c2..1be691b5dcad 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -293,7 +293,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) idr_preload(GFP_KERNEL); spin_lock_init(&new->lock); - rcu_read_lock(); spin_lock(&new->lock); current_euid_egid(&euid, &egid); @@ -316,7 +315,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int limit) if (idx < 0) { new->deleted = true; spin_unlock(&new->lock); - rcu_read_unlock(); return idx; } -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Kees Cook Cc: Paul Moore Cc: James Morris Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- security/yama/yama_lsm.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 3d064dd4e03f..60d38deb181b 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -117,14 +117,12 @@ static void yama_relation_cleanup(struct work_struct *work) struct ptrace_relation *relation; spin_lock(&ptracer_relations_lock); - rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) { list_del_rcu(&relation->node); kfree_rcu(relation, rcu); } } - rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); } @@ -152,7 +150,6 @@ static int yama_ptracer_add(struct task_struct *tracer, added->invalid = false; spin_lock(&ptracer_relations_lock); - rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) continue; @@ -166,7 +163,6 @@ static int yama_ptracer_add(struct task_struct *tracer, list_add_rcu(&added->node, &ptracer_relations); out: - rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); return 0; } -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Tejun Heo Cc: Johannes Weiner Cc: Waiman Long Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- kernel/cgroup/cgroup.c | 2 -- kernel/cgroup/debug.c | 4 ---- 2 files changed, 6 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 312c6a8b55bb..db9e00a559df 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2944,14 +2944,12 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, /* look up all src csets */ spin_lock_irq(&css_set_lock); - rcu_read_lock(); task = leader; do { cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx); if (!threadgroup) break; } while_each_thread(leader, task); - rcu_read_unlock(); spin_unlock_irq(&css_set_lock); /* prepare dst csets and commit */ diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c index 80aa3f027ac3..81ea38dd6f9d 100644 --- a/kernel/cgroup/debug.c +++ b/kernel/cgroup/debug.c @@ -49,7 +49,6 @@ static int current_css_set_read(struct seq_file *seq, void *v) return -ENODEV; spin_lock_irq(&css_set_lock); - rcu_read_lock(); cset = task_css_set(current); refcnt = refcount_read(&cset->refcount); seq_printf(seq, "css_set %pK %d", cset, refcnt); @@ -67,7 +66,6 @@ static int current_css_set_read(struct seq_file *seq, void *v) seq_printf(seq, "%2d: %-4s\t- %p[%d]\n", ss->id, ss->name, css, css->id); } - rcu_read_unlock(); spin_unlock_irq(&css_set_lock); cgroup_kn_unlock(of->kn); return 0; @@ -95,7 +93,6 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) return -ENOMEM; spin_lock_irq(&css_set_lock); - rcu_read_lock(); cset = task_css_set(current); list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { struct cgroup *c = link->cgrp; @@ -104,7 +101,6 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) seq_printf(seq, "Root %d group %s\n", c->root->hierarchy_id, name_buf); } - rcu_read_unlock(); spin_unlock_irq(&css_set_lock); kfree(name_buf); return 0; -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Waiman Long Cc: Johannes Weiner Acked-by: Waiman Long Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- kernel/cgroup/cpuset.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 27adb04df675..9b7e8e8e9411 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -4073,7 +4073,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) struct cpuset *cs; spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); cs = task_cs(tsk); if (cs != &top_cpuset) @@ -4095,7 +4094,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) cpumask_copy(pmask, possible_mask); } - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); } @@ -4168,9 +4166,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) unsigned long flags; spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return mask; @@ -4265,10 +4261,8 @@ bool cpuset_current_node_allowed(int node, gfp_t gfp_mask) /* Not hardwall and node outside mems_allowed: scan up cpusets */ spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return allowed; -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Johannes Berg Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- net/mac80211/cfg.c | 2 -- net/mac80211/debugfs.c | 2 -- net/mac80211/debugfs_netdev.c | 2 -- net/mac80211/debugfs_sta.c | 2 -- net/mac80211/sta_info.c | 2 -- 5 files changed, 10 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2ed07fa121ab..4fe50d4c461d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -4825,7 +4825,6 @@ static int ieee80211_get_txq_stats(struct wiphy *wiphy, int ret = 0; spin_lock_bh(&local->fq.lock); - rcu_read_lock(); if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); @@ -4851,7 +4850,6 @@ static int ieee80211_get_txq_stats(struct wiphy *wiphy, } out: - rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return ret; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index e8b78ec682da..82099f4cedbe 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -82,7 +82,6 @@ static ssize_t aqm_read(struct file *file, int len = 0; spin_lock_bh(&local->fq.lock); - rcu_read_lock(); len = scnprintf(buf, sizeof(buf), "access name value\n" @@ -105,7 +104,6 @@ static ssize_t aqm_read(struct file *file, fq->limit, fq->quantum); - rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return simple_read_from_buffer(user_buf, count, ppos, diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 1dac78271045..30a5a978a678 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -625,7 +625,6 @@ static ssize_t ieee80211_if_fmt_aqm( txqi = to_txq_info(sdata->vif.txq); spin_lock_bh(&local->fq.lock); - rcu_read_lock(); len = scnprintf(buf, buflen, @@ -642,7 +641,6 @@ static ssize_t ieee80211_if_fmt_aqm( txqi->tin.tx_bytes, txqi->tin.tx_packets); - rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return len; diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 49061bd4151b..ef75255d47d5 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -148,7 +148,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, return -ENOMEM; spin_lock_bh(&local->fq.lock); - rcu_read_lock(); p += scnprintf(p, bufsz + buf - p, @@ -178,7 +177,6 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf, test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : ""); } - rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 8c550aab9bdc..663318a75d7f 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2637,13 +2637,11 @@ static void sta_set_tidstats(struct sta_info *sta, if (link_id < 0 && tid < IEEE80211_NUM_TIDS) { spin_lock_bh(&local->fq.lock); - rcu_read_lock(); tidstats->filled |= BIT(NL80211_TID_STATS_TXQ_STATS); ieee80211_fill_txq_stats(&tidstats->txq_stats, to_txq_info(sta->sta.txq[tid])); - rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); } } -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Samuel Mendoza-Jonas Cc: Paul Fertser Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- net/ncsi/ncsi-manage.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 446e4e3b9553..6e36cd64a31e 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -650,7 +650,6 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, spin_lock_irqsave(&nc->lock, flags); - rcu_read_lock(); list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { vid = vlan->vid; for (i = 0; i < ncf->n_vids; i++) @@ -661,7 +660,6 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, if (vid) break; } - rcu_read_unlock(); if (!vid) { /* No VLAN ID is not set */ -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Taehee Yoo Cc: Andrew Lunn Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/net/amt.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ed86537b2f61..aaed9fbc7526 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -295,7 +295,6 @@ static void amt_source_work(struct work_struct *work) tunnel = gnode->tunnel_list; spin_lock_bh(&tunnel->lock); - rcu_read_lock(); if (gnode->filter_mode == MCAST_INCLUDE) { amt_destroy_source(snode); if (!gnode->nr_sources) @@ -306,7 +305,6 @@ static void amt_source_work(struct work_struct *work) */ snode->status = AMT_SOURCE_STATUS_D_FWD; } - rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); } @@ -426,7 +424,6 @@ static void amt_group_work(struct work_struct *work) goto out; } - rcu_read_lock(); for (i = 0; i < buckets; i++) { hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) { @@ -443,7 +440,6 @@ static void amt_group_work(struct work_struct *work) amt_del_group(amt, gnode); else gnode->filter_mode = MCAST_INCLUDE; - rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); out: dev_put(amt->dev); @@ -1327,11 +1323,9 @@ static void amt_clear_groups(struct amt_tunnel_list *tunnel) int i; spin_lock_bh(&tunnel->lock); - rcu_read_lock(); for (i = 0; i < amt->hash_buckets; i++) hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node) amt_del_group(amt, gnode); - rcu_read_unlock(); spin_unlock_bh(&tunnel->lock); } @@ -1343,11 +1337,9 @@ static void amt_tunnel_expire(struct work_struct *work) struct amt_dev *amt = tunnel->amt; spin_lock_bh(&amt->lock); - rcu_read_lock(); list_del_rcu(&tunnel->list); amt->nr_tunnels--; amt_clear_groups(tunnel); - rcu_read_unlock(); spin_unlock_bh(&amt->lock); kfree_rcu(tunnel, rcu); } -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Jay Vosburgh Cc: Paolo Abeni Cc: Jakub Kicinski Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/net/bonding/bond_3ad.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 4edc8e6b6b64..c53ea73f103a 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2485,7 +2485,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) * concurrently due to incoming LACPDU as well. */ spin_lock_bh(&bond->mode_lock); - rcu_read_lock(); /* check if there are any slaves */ if (!bond_has_slaves(bond)) @@ -2537,7 +2536,6 @@ void bond_3ad_state_machine_handler(struct work_struct *work) break; } } - rcu_read_unlock(); spin_unlock_bh(&bond->mode_lock); if (update_slave_arr) -- 2.34.1 From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: "Toke" Cc: Jakub Kicinski Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- drivers/net/wireless/ath/ath9k/xmit.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0ac9212e42f7..4a0f465aa2fe 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1993,7 +1993,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) ieee80211_txq_schedule_start(hw, txq->mac80211_qnum); spin_lock_bh(&sc->chan_lock); - rcu_read_lock(); if (sc->cur_chan->stopped) goto out; @@ -2011,7 +2010,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) } out: - rcu_read_unlock(); spin_unlock_bh(&sc->chan_lock); ieee80211_txq_schedule_end(hw, txq->mac80211_qnum); } -- 2.34.1