Disable propagation and unwinding of the waiter queue in case the head waiter detects a deadlock condition, but keep it enabled in case of the timeout fallback. Currently, when the head waiter experiences an AA deadlock, it will signal all its successors in the queue to exit with an error. This is not ideal for cases where the same lock is held in contexts which can cause errors in an unrestricted fashion (e.g., BPF programs, or kernel paths invoked through BPF programs), and core kernel logic which is written in a correct fashion and does not expect deadlocks. The same reasoning can be extended to ABBA situations. Depending on the actual runtime schedule, one or both of the head waiters involved in an ABBA situation can detect and exit directly without terminating their waiter queue. If the ABBA situation manifests again, the waiters will keep exiting until progress can be made, or a timeout is triggered in case of more complicated locking dependencies. We still preserve the queue destruction in case of timeouts, as either the locking dependencies are too complex to be captured by AA and ABBA heuristics, or the owner is perpetually stuck. As such, it would be unwise to continue to apply the timeout for each new head waiter without terminating the queue, since we may end up waiting for more than 250 ms in aggregate with all participants in the locking transaction. The patch itself is fairly simple; we can simply signal our successor to become the next head waiter, and leave the queue without attempting to acquire the lock. With this change, the behavior for waiters in case of deadlocks experienced by a predecessor changes. It is guaranteed that call sites will no longer receive errors if the predecessors encounter deadlocks and the successors do not participate in one. This should lower the failure rate for waiters that are not doing improper locking opreations, just because they were unlucky to queue behind a misbehaving waiter. However, timeouts are still a possibility, hence they must be accounted for, so users cannot rely upon errors not occuring at all. Suggested-by: Amery Hung Suggested-by: Alexei Starovoitov Signed-off-by: Kumar Kartikeya Dwivedi --- kernel/bpf/rqspinlock.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/bpf/rqspinlock.c b/kernel/bpf/rqspinlock.c index 21be48108e96..b94e258bf2b9 100644 --- a/kernel/bpf/rqspinlock.c +++ b/kernel/bpf/rqspinlock.c @@ -572,6 +572,14 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val) val = res_atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK) || RES_CHECK_TIMEOUT(ts, ret, _Q_LOCKED_PENDING_MASK)); + /* Disable queue destruction when we detect deadlocks. */ + if (ret == -EDEADLK) { + if (!next) + next = smp_cond_load_relaxed(&node->next, (VAL)); + arch_mcs_spin_unlock_contended(&next->locked); + goto err_release_node; + } + waitq_timeout: if (ret) { /* -- 2.51.0 Introduce a new mode for the rqspinlock stress test that exercises a deadlock that won't be detected by the AA and ABBA checks, such that we always reliably trigger the timeout fallback. We need 4 CPUs for this particular case, as CPU 0 is untouched, and three participant CPUs for triggering the ABBCCA case. Refactor the lock acquisition paths in the module to better reflect the three modes and choose the right lock depending on the context. Also drop ABBA case from running by default as part of test progs, since the stress test can consume a significant amount of time. Acked-by: Eduard Zingerman Reviewed-by: Amery Hung Signed-off-by: Kumar Kartikeya Dwivedi --- .../selftests/bpf/prog_tests/res_spin_lock.c | 8 +- .../bpf/test_kmods/bpf_test_rqspinlock.c | 85 ++++++++++++++----- 2 files changed, 66 insertions(+), 27 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c index 8c6c2043a432..f0a8c828f8f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/res_spin_lock.c @@ -110,8 +110,8 @@ void serial_test_res_spin_lock_stress(void) ASSERT_OK(load_module("bpf_test_rqspinlock.ko", false), "load module AA"); sleep(5); unload_module("bpf_test_rqspinlock", false); - - ASSERT_OK(load_module_params("bpf_test_rqspinlock.ko", "test_ab=1", false), "load module ABBA"); - sleep(5); - unload_module("bpf_test_rqspinlock", false); + /* + * Insert bpf_test_rqspinlock.ko manually with test_mode=[1|2] to test + * other cases (ABBA, ABBCCA). + */ } diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c index 769206fc70e4..4cced4bb8af1 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_test_rqspinlock.c @@ -22,23 +22,61 @@ static struct perf_event_attr hw_attr = { static rqspinlock_t lock_a; static rqspinlock_t lock_b; +static rqspinlock_t lock_c; + +enum rqsl_mode { + RQSL_MODE_AA = 0, + RQSL_MODE_ABBA, + RQSL_MODE_ABBCCA, +}; + +static int test_mode = RQSL_MODE_AA; +module_param(test_mode, int, 0644); +MODULE_PARM_DESC(test_mode, + "rqspinlock test mode: 0 = AA, 1 = ABBA, 2 = ABBCCA"); static struct perf_event **rqsl_evts; static int rqsl_nevts; -static bool test_ab = false; -module_param(test_ab, bool, 0644); -MODULE_PARM_DESC(test_ab, "Test ABBA situations instead of AA situations"); - static struct task_struct **rqsl_threads; static int rqsl_nthreads; static atomic_t rqsl_ready_cpus = ATOMIC_INIT(0); static int pause = 0; -static bool nmi_locks_a(int cpu) +static const char *rqsl_mode_names[] = { + [RQSL_MODE_AA] = "AA", + [RQSL_MODE_ABBA] = "ABBA", + [RQSL_MODE_ABBCCA] = "ABBCCA", +}; + +struct rqsl_lock_pair { + rqspinlock_t *worker_lock; + rqspinlock_t *nmi_lock; +}; + +static struct rqsl_lock_pair rqsl_get_lock_pair(int cpu) { - return (cpu & 1) && test_ab; + int mode = READ_ONCE(test_mode); + + switch (mode) { + default: + case RQSL_MODE_AA: + return (struct rqsl_lock_pair){ &lock_a, &lock_a }; + case RQSL_MODE_ABBA: + if (cpu & 1) + return (struct rqsl_lock_pair){ &lock_b, &lock_a }; + return (struct rqsl_lock_pair){ &lock_a, &lock_b }; + case RQSL_MODE_ABBCCA: + switch (cpu % 3) { + case 0: + return (struct rqsl_lock_pair){ &lock_a, &lock_b }; + case 1: + return (struct rqsl_lock_pair){ &lock_b, &lock_c }; + default: + return (struct rqsl_lock_pair){ &lock_c, &lock_a }; + } + } } static int rqspinlock_worker_fn(void *arg) @@ -51,19 +89,17 @@ static int rqspinlock_worker_fn(void *arg) atomic_inc(&rqsl_ready_cpus); while (!kthread_should_stop()) { + struct rqsl_lock_pair locks = rqsl_get_lock_pair(cpu); + rqspinlock_t *worker_lock = locks.worker_lock; + if (READ_ONCE(pause)) { msleep(1000); continue; } - if (nmi_locks_a(cpu)) - ret = raw_res_spin_lock_irqsave(&lock_b, flags); - else - ret = raw_res_spin_lock_irqsave(&lock_a, flags); + ret = raw_res_spin_lock_irqsave(worker_lock, flags); mdelay(20); - if (nmi_locks_a(cpu) && !ret) - raw_res_spin_unlock_irqrestore(&lock_b, flags); - else if (!ret) - raw_res_spin_unlock_irqrestore(&lock_a, flags); + if (!ret) + raw_res_spin_unlock_irqrestore(worker_lock, flags); cpu_relax(); } return 0; @@ -91,6 +127,7 @@ static int rqspinlock_worker_fn(void *arg) static void nmi_cb(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { + struct rqsl_lock_pair locks; int cpu = smp_processor_id(); unsigned long flags; int ret; @@ -98,17 +135,13 @@ static void nmi_cb(struct perf_event *event, struct perf_sample_data *data, if (!cpu || READ_ONCE(pause)) return; - if (nmi_locks_a(cpu)) - ret = raw_res_spin_lock_irqsave(&lock_a, flags); - else - ret = raw_res_spin_lock_irqsave(test_ab ? &lock_b : &lock_a, flags); + locks = rqsl_get_lock_pair(cpu); + ret = raw_res_spin_lock_irqsave(locks.nmi_lock, flags); mdelay(10); - if (nmi_locks_a(cpu) && !ret) - raw_res_spin_unlock_irqrestore(&lock_a, flags); - else if (!ret) - raw_res_spin_unlock_irqrestore(test_ab ? &lock_b : &lock_a, flags); + if (!ret) + raw_res_spin_unlock_irqrestore(locks.nmi_lock, flags); } static void free_rqsl_threads(void) @@ -142,13 +175,19 @@ static int bpf_test_rqspinlock_init(void) int i, ret; int ncpus = num_online_cpus(); - pr_err("Mode = %s\n", test_ab ? "ABBA" : "AA"); + if (test_mode < RQSL_MODE_AA || test_mode > RQSL_MODE_ABBCCA) { + pr_err("Invalid mode %d\n", test_mode); + return -EINVAL; + } + + pr_err("Mode = %s\n", rqsl_mode_names[test_mode]); if (ncpus < 3) return -ENOTSUPP; raw_res_spin_lock_init(&lock_a); raw_res_spin_lock_init(&lock_b); + raw_res_spin_lock_init(&lock_c); rqsl_evts = kcalloc(ncpus - 1, sizeof(*rqsl_evts), GFP_KERNEL); if (!rqsl_evts) -- 2.51.0