From: Melbin K Mathew The credit calculation in virtio_transport_get_credit() uses unsigned arithmetic: ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); If the peer shrinks its advertised buffer (peer_buf_alloc) while bytes are in flight, the subtraction can underflow and produce a large positive value, potentially allowing more data to be queued than the peer can handle. Reuse virtio_transport_has_space() which already handles this case and add a comment to make it clear why we are doing that. Fixes: 06a8fc78367d ("VSOCK: Introduce virtio_vsock_common.ko") Suggested-by: Stefano Garzarella Signed-off-by: Melbin K Mathew [Stefano: use virtio_transport_has_space() instead of duplicating the code] [Stefano: tweak the commit message] Signed-off-by: Stefano Garzarella --- net/vmw_vsock/virtio_transport_common.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index dcc8a1d5851e..2fe341be6ce2 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -28,6 +28,7 @@ static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, bool cancel_timeout); +static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs); static const struct virtio_transport * virtio_transport_get_ops(struct vsock_sock *vsk) @@ -499,9 +500,7 @@ u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 credit) return 0; spin_lock_bh(&vvs->tx_lock); - ret = vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); - if (ret > credit) - ret = credit; + ret = min_t(u32, credit, virtio_transport_has_space(vvs)); vvs->tx_cnt += ret; vvs->bytes_unsent += ret; spin_unlock_bh(&vvs->tx_lock); @@ -877,11 +876,14 @@ u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk) } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_has_data); -static s64 virtio_transport_has_space(struct vsock_sock *vsk) +static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs) { - struct virtio_vsock_sock *vvs = vsk->trans; s64 bytes; + /* Use s64 arithmetic so if the peer shrinks peer_buf_alloc while + * we have bytes in flight (tx_cnt - peer_fwd_cnt), the subtraction + * does not underflow. + */ bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); if (bytes < 0) bytes = 0; @@ -895,7 +897,7 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk) s64 bytes; spin_lock_bh(&vvs->tx_lock); - bytes = virtio_transport_has_space(vsk); + bytes = virtio_transport_has_space(vvs); spin_unlock_bh(&vvs->tx_lock); return bytes; @@ -1490,7 +1492,7 @@ static bool virtio_transport_space_update(struct sock *sk, spin_lock_bh(&vvs->tx_lock); vvs->peer_buf_alloc = le32_to_cpu(hdr->buf_alloc); vvs->peer_fwd_cnt = le32_to_cpu(hdr->fwd_cnt); - space_available = virtio_transport_has_space(vsk); + space_available = virtio_transport_has_space(vvs); spin_unlock_bh(&vvs->tx_lock); return space_available; } -- 2.52.0 From: Stefano Garzarella The test requires the sender (client) to send all messages before waking up the receiver (server). Since virtio-vsock had a bug and did not respect the size of the TX buffer, this test worked, but now that we are going to fix the bug, the test hangs because the sender would fill the TX buffer before waking up the receiver. Set the buffer size in the sender (client) as well, as we already do for the receiver (server). Fixes: 5c338112e48a ("test/vsock: rework message bounds test") Signed-off-by: Stefano Garzarella --- tools/testing/vsock/vsock_test.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index bbe3723babdc..ad1eea0f5ab8 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -351,6 +351,7 @@ static void test_stream_msg_peek_server(const struct test_opts *opts) static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) { + unsigned long long sock_buf_size; unsigned long curr_hash; size_t max_msg_size; int page_size; @@ -363,6 +364,16 @@ static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) exit(EXIT_FAILURE); } + sock_buf_size = SOCK_BUF_SIZE; + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); + /* Wait, until receiver sets buffer size. */ control_expectln("SRVREADY"); -- 2.52.0 From: Melbin K Mathew The virtio transports derives its TX credit directly from peer_buf_alloc, which is set from the remote endpoint's SO_VM_SOCKETS_BUFFER_SIZE value. On the host side this means that the amount of data we are willing to queue for a connection is scaled by a guest-chosen buffer size, rather than the host's own vsock configuration. A malicious guest can advertise a large buffer and read slowly, causing the host to allocate a correspondingly large amount of sk_buff memory. The same thing would happen in the guest with a malicious host, since virtio transports share the same code base. Introduce a small helper, virtio_transport_tx_buf_size(), that returns min(peer_buf_alloc, buf_alloc), and use it wherever we consume peer_buf_alloc. This ensures the effective TX window is bounded by both the peer's advertised buffer and our own buf_alloc (already clamped to buffer_max_size via SO_VM_SOCKETS_BUFFER_MAX_SIZE), so a remote peer cannot force the other to queue more data than allowed by its own vsock settings. On an unpatched Ubuntu 22.04 host (~64 GiB RAM), running a PoC with 32 guest vsock connections advertising 2 GiB each and reading slowly drove Slab/SUnreclaim from ~0.5 GiB to ~57 GiB; the system only recovered after killing the QEMU process. That said, if QEMU memory is limited with cgroups, the maximum memory used will be limited. With this patch applied: Before: MemFree: ~61.6 GiB Slab: ~142 MiB SUnreclaim: ~117 MiB After 32 high-credit connections: MemFree: ~61.5 GiB Slab: ~178 MiB SUnreclaim: ~152 MiB Only ~35 MiB increase in Slab/SUnreclaim, no host OOM, and the guest remains responsive. Compatibility with non-virtio transports: - VMCI uses the AF_VSOCK buffer knobs to size its queue pairs per socket based on the local vsk->buffer_* values; the remote side cannot enlarge those queues beyond what the local endpoint configured. - Hyper-V's vsock transport uses fixed-size VMBus ring buffers and an MTU bound; there is no peer-controlled credit field comparable to peer_buf_alloc, and the remote endpoint cannot drive in-flight kernel memory above those ring sizes. - The loopback path reuses virtio_transport_common.c, so it naturally follows the same semantics as the virtio transport. This change is limited to virtio_transport_common.c and thus affects virtio-vsock, vhost-vsock, and loopback, bringing them in line with the "remote window intersected with local policy" behaviour that VMCI and Hyper-V already effectively have. Fixes: 06a8fc78367d ("VSOCK: Introduce virtio_vsock_common.ko") Suggested-by: Stefano Garzarella Signed-off-by: Melbin K Mathew [Stefano: small adjustments after changing the previous patch] [Stefano: tweak the commit message] Signed-off-by: Stefano Garzarella --- net/vmw_vsock/virtio_transport_common.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 2fe341be6ce2..00f4cf86beac 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -821,6 +821,15 @@ virtio_transport_seqpacket_dequeue(struct vsock_sock *vsk, } EXPORT_SYMBOL_GPL(virtio_transport_seqpacket_dequeue); +static u32 virtio_transport_tx_buf_size(struct virtio_vsock_sock *vvs) +{ + /* The peer advertises its receive buffer via peer_buf_alloc, but we + * cap it to our local buf_alloc so a remote peer cannot force us to + * queue more data than our own buffer configuration allows. + */ + return min(vvs->peer_buf_alloc, vvs->buf_alloc); +} + int virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, struct msghdr *msg, @@ -830,7 +839,7 @@ virtio_transport_seqpacket_enqueue(struct vsock_sock *vsk, spin_lock_bh(&vvs->tx_lock); - if (len > vvs->peer_buf_alloc) { + if (len > virtio_transport_tx_buf_size(vvs)) { spin_unlock_bh(&vvs->tx_lock); return -EMSGSIZE; } @@ -884,7 +893,8 @@ static s64 virtio_transport_has_space(struct virtio_vsock_sock *vvs) * we have bytes in flight (tx_cnt - peer_fwd_cnt), the subtraction * does not underflow. */ - bytes = (s64)vvs->peer_buf_alloc - (vvs->tx_cnt - vvs->peer_fwd_cnt); + bytes = (s64)virtio_transport_tx_buf_size(vvs) - + (vvs->tx_cnt - vvs->peer_fwd_cnt); if (bytes < 0) bytes = 0; -- 2.52.0 From: Melbin K Mathew Add a regression test for the TX credit bounds fix. The test verifies that a sender with a small local buffer size cannot queue excessive data even when the peer advertises a large receive buffer. The client: - Sets a small buffer size (64 KiB) - Connects to server (which advertises 2 MiB buffer) - Sends in non-blocking mode until EAGAIN - Verifies total queued data is bounded This guards against the original vulnerability where a remote peer could cause unbounded kernel memory allocation by advertising a large buffer and reading slowly. Suggested-by: Stefano Garzarella Signed-off-by: Melbin K Mathew [Stefano: use sock_buf_size to check the bytes sent + small fixes] Signed-off-by: Stefano Garzarella --- tools/testing/vsock/vsock_test.c | 101 +++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index ad1eea0f5ab8..6933f986ef2a 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -347,6 +347,7 @@ static void test_stream_msg_peek_server(const struct test_opts *opts) } #define SOCK_BUF_SIZE (2 * 1024 * 1024) +#define SOCK_BUF_SIZE_SMALL (64 * 1024) #define MAX_MSG_PAGES 4 static void test_seqpacket_msg_bounds_client(const struct test_opts *opts) @@ -2230,6 +2231,101 @@ static void test_stream_accepted_setsockopt_server(const struct test_opts *opts) close(fd); } +static void test_stream_tx_credit_bounds_client(const struct test_opts *opts) +{ + unsigned long long sock_buf_size; + size_t total = 0; + char buf[4096]; + int fd; + + memset(buf, 'A', sizeof(buf)); + + fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); + if (fd < 0) { + perror("connect"); + exit(EXIT_FAILURE); + } + + sock_buf_size = SOCK_BUF_SIZE_SMALL; + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); + + if (fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK) < 0) { + perror("fcntl(F_SETFL)"); + exit(EXIT_FAILURE); + } + + control_expectln("SRVREADY"); + + for (;;) { + ssize_t sent = send(fd, buf, sizeof(buf), 0); + + if (sent == 0) { + fprintf(stderr, "unexpected EOF while sending bytes\n"); + exit(EXIT_FAILURE); + } + + if (sent < 0) { + if (errno == EINTR) + continue; + + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + + perror("send"); + exit(EXIT_FAILURE); + } + + total += sent; + } + + control_writeln("CLIDONE"); + close(fd); + + /* We should not be able to send more bytes than the value set as + * local buffer size. + */ + if (total > sock_buf_size) { + fprintf(stderr, + "TX credit too large: queued %zu bytes (expected <= %llu)\n", + total, sock_buf_size); + exit(EXIT_FAILURE); + } +} + +static void test_stream_tx_credit_bounds_server(const struct test_opts *opts) +{ + unsigned long long sock_buf_size; + int fd; + + fd = vsock_stream_accept(VMADDR_CID_ANY, opts->peer_port, NULL); + if (fd < 0) { + perror("accept"); + exit(EXIT_FAILURE); + } + + sock_buf_size = SOCK_BUF_SIZE; + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_MAX_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_MAX_SIZE)"); + + setsockopt_ull_check(fd, AF_VSOCK, SO_VM_SOCKETS_BUFFER_SIZE, + sock_buf_size, + "setsockopt(SO_VM_SOCKETS_BUFFER_SIZE)"); + + control_writeln("SRVREADY"); + control_expectln("CLIDONE"); + + close(fd); +} + static struct test_case test_cases[] = { { .name = "SOCK_STREAM connection reset", @@ -2414,6 +2510,11 @@ static struct test_case test_cases[] = { .run_client = test_stream_accepted_setsockopt_client, .run_server = test_stream_accepted_setsockopt_server, }, + { + .name = "SOCK_STREAM TX credit bounds", + .run_client = test_stream_tx_credit_bounds_client, + .run_server = test_stream_tx_credit_bounds_server, + }, {}, }; -- 2.52.0