From: Jiayuan Chen tx_work_handler calls tls_tx_records with flags=-1, which preserves each record's original tx_flags but results in tcp_sendmsg_locked using an infinite send timeout. When the peer is unresponsive and the send buffer is full, tcp_sendmsg_locked blocks indefinitely in sk_stream_wait_memory. This causes tls_sk_proto_close to hang in cancel_delayed_work_sync waiting for tx_work_handler to finish, leading to a hung task: INFO: task ...: blocked for more than ... seconds. Call Trace: cancel_delayed_work_sync tls_sw_cancel_work_tx tls_sk_proto_close A workqueue handler should never block indefinitely. Fix this by introducing __tls_tx_records() with an extra_flags parameter that gets OR'd into each record's tx_flags. tx_work_handler uses this to pass MSG_DONTWAIT so tcp_sendmsg_locked returns -EAGAIN immediately when the send buffer is full, without overwriting the original per-record flags (MSG_MORE, MSG_NOSIGNAL, etc.). On -EAGAIN, the existing reschedule mechanism retries after a short delay. Also consolidate the two identical reschedule paths (lock contention and -EAGAIN) into one. Reported-by: syzbot+ca1345cca66556f3d79b@syzkaller.appspotmail.com Fixes: a42055e8d2c3 ("net/tls: Add support for async encryption of records for performance") Signed-off-by: Jiayuan Chen --- net/tls/tls_sw.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 9937d4c810f2..c9d3d44581da 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -404,7 +404,7 @@ static void tls_free_open_rec(struct sock *sk) } } -int tls_tx_records(struct sock *sk, int flags) +static int __tls_tx_records(struct sock *sk, int flags, int extra_flags) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); @@ -417,9 +417,9 @@ int tls_tx_records(struct sock *sk, int flags) struct tls_rec, list); if (flags == -1) - tx_flags = rec->tx_flags; + tx_flags = rec->tx_flags | extra_flags; else - tx_flags = flags; + tx_flags = flags | extra_flags; rc = tls_push_partial_record(sk, tls_ctx, tx_flags); if (rc) @@ -463,6 +463,11 @@ int tls_tx_records(struct sock *sk, int flags) return rc; } +int tls_tx_records(struct sock *sk, int flags) +{ + return __tls_tx_records(sk, flags, 0); +} + static void tls_encrypt_done(void *data, int err) { struct tls_sw_context_tx *ctx; @@ -2629,6 +2634,7 @@ static void tx_work_handler(struct work_struct *work) struct sock *sk = tx_work->sk; struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_sw_context_tx *ctx; + int rc; if (unlikely(!tls_ctx)) return; @@ -2642,16 +2648,21 @@ static void tx_work_handler(struct work_struct *work) if (mutex_trylock(&tls_ctx->tx_lock)) { lock_sock(sk); - tls_tx_records(sk, -1); + rc = __tls_tx_records(sk, -1, MSG_DONTWAIT); release_sock(sk); mutex_unlock(&tls_ctx->tx_lock); - } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { - /* Someone is holding the tx_lock, they will likely run Tx - * and cancel the work on their way out of the lock section. - * Schedule a long delay just in case. - */ - schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); + if (rc != -EAGAIN) + return; } + + /* Someone is holding the tx_lock, they will likely run Tx + * and cancel the work on their way out of the lock section. + * Schedule a long delay just in case. + * Also reschedule on -EAGAIN when the send buffer is full + * to avoid blocking the workqueue indefinitely. + */ + if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) + schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); } static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) -- 2.43.0