From: pengdonglin Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Kees Cook Cc: Paul Moore Cc: James Morris Signed-off-by: pengdonglin Signed-off-by: pengdonglin --- security/yama/yama_lsm.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 3d064dd4e03f..60d38deb181b 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -117,14 +117,12 @@ static void yama_relation_cleanup(struct work_struct *work) struct ptrace_relation *relation; spin_lock(&ptracer_relations_lock); - rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) { list_del_rcu(&relation->node); kfree_rcu(relation, rcu); } } - rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); } @@ -152,7 +150,6 @@ static int yama_ptracer_add(struct task_struct *tracer, added->invalid = false; spin_lock(&ptracer_relations_lock); - rcu_read_lock(); list_for_each_entry_rcu(relation, &ptracer_relations, node) { if (relation->invalid) continue; @@ -166,7 +163,6 @@ static int yama_ptracer_add(struct task_struct *tracer, list_add_rcu(&added->node, &ptracer_relations); out: - rcu_read_unlock(); spin_unlock(&ptracer_relations_lock); return 0; } -- 2.34.1