vhost_vsock_alloc_skb() returns NULL for packets advertising a length larger than VIRTIO_VSOCK_MAX_PKT_BUF_SIZE in the packet header. However, this is only checked once the SKB has been allocated and, if the length in the packet header is zero, the SKB may not be freed immediately. Hoist the size check before the SKB allocation so that an iovec larger than VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + the header size is rejected outright. The subsequent check on the length field in the header can then simply check that the allocated SKB is indeed large enough to hold the packet. Cc: Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") Signed-off-by: Will Deacon --- drivers/vhost/vsock.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 802153e23073..66a0f060770e 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -344,6 +344,9 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, len = iov_length(vq->iov, out); + if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) + return NULL; + /* len contains both payload and hdr */ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) @@ -367,8 +370,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return skb; /* The pkt is too big or the length in the header is invalid */ - if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE || - payload_len + sizeof(*hdr) > len) { + if (payload_len + sizeof(*hdr) > len) { kfree_skb(skb); return NULL; } -- 2.50.0.727.gbf7dc18ff4-goog When receiving a vsock packet in the guest, only the virtqueue buffer size is validated prior to virtio_vsock_skb_rx_put(). Unfortunately, virtio_vsock_skb_rx_put() uses the length from the packet header as the length argument to skb_put(), potentially resulting in SKB overflow if the host has gone wonky. Validate the length as advertised by the packet header before calling virtio_vsock_skb_rx_put(). Cc: Fixes: 71dc9ec9ac7d ("virtio/vsock: replace virtio_vsock_pkt with sk_buff") Signed-off-by: Will Deacon --- net/vmw_vsock/virtio_transport.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index f0e48e6911fc..bd2c6aaa1a93 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -624,8 +624,9 @@ static void virtio_transport_rx_work(struct work_struct *work) do { virtqueue_disable_cb(vq); for (;;) { + unsigned int len, payload_len; + struct virtio_vsock_hdr *hdr; struct sk_buff *skb; - unsigned int len; if (!virtio_transport_more_replies(vsock)) { /* Stop rx until the device processes already @@ -642,12 +643,19 @@ static void virtio_transport_rx_work(struct work_struct *work) vsock->rx_buf_nr--; /* Drop short/long packets */ - if (unlikely(len < sizeof(struct virtio_vsock_hdr) || + if (unlikely(len < sizeof(*hdr) || len > virtio_vsock_skb_len(skb))) { kfree_skb(skb); continue; } + hdr = virtio_vsock_hdr(skb); + payload_len = le32_to_cpu(hdr->len); + if (payload_len > len - sizeof(*hdr)) { + kfree_skb(skb); + continue; + } + virtio_vsock_skb_rx_put(skb); virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); -- 2.50.0.727.gbf7dc18ff4-goog virtio_vsock_skb_rx_put() only calls skb_put() if the length in the packet header is not zero even though skb_put() handles this case gracefully. Remove the functionally redundant check from virtio_vsock_skb_rx_put() and, on the assumption that this is a worthwhile optimisation for handling credit messages, augment the existing length checks in virtio_transport_rx_work() to elide the call for zero-length payloads. Note that the vhost code already has similar logic in vhost_vsock_alloc_skb(). Signed-off-by: Will Deacon --- include/linux/virtio_vsock.h | 4 +--- net/vmw_vsock/virtio_transport.c | 4 +++- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 36fb3edfa403..eb6980aa19fd 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -52,9 +52,7 @@ static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) u32 len; len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - - if (len > 0) - skb_put(skb, len); + skb_put(skb, len); } static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index bd2c6aaa1a93..488e6ddc6ffa 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -656,7 +656,9 @@ static void virtio_transport_rx_work(struct work_struct *work) continue; } - virtio_vsock_skb_rx_put(skb); + if (payload_len) + virtio_vsock_skb_rx_put(skb); + virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); } -- 2.50.0.727.gbf7dc18ff4-goog When allocating receive buffers for the vsock virtio RX virtqueue, an SKB is allocated with a 4140 data payload (the 44-byte packet header + VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE). Even when factoring in the SKB overhead, the resulting 8KiB allocation thanks to the rounding in kmalloc_reserve() is wasteful (~3700 unusable bytes) and results in a higher-order page allocation for the sake of a few hundred bytes of packet data. Limit the vsock virtio RX buffers to a page per SKB, resulting in much better memory utilisation and removing the need to allocate higher-order pages entirely. Signed-off-by: Will Deacon --- include/linux/virtio_vsock.h | 1 - net/vmw_vsock/virtio_transport.c | 7 ++++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index eb6980aa19fd..1b5731186095 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -109,7 +109,6 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) return (size_t)(skb_end_pointer(skb) - skb->head); } -#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 488e6ddc6ffa..3daba06ed499 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -307,7 +307,12 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk) static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) { - int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM; + /* Dimension the SKB so that the entire thing fits exactly into + * a single page. This avoids wasting memory due to alloc_skb() + * rounding up to the next page order and also means that we + * don't leave higher-order pages sitting around in the RX queue. + */ + int total_len = SKB_WITH_OVERHEAD(PAGE_SIZE); struct scatterlist pkt, *p; struct virtqueue *vq; struct sk_buff *skb; -- 2.50.0.727.gbf7dc18ff4-goog In preparation for nonlinear allocations for large SKBs, introduce a new virtio_vsock_alloc_linear_skb() helper to return linear SKBs unconditionally and switch all callers over to this new interface for now. No functional change. Signed-off-by: Will Deacon --- drivers/vhost/vsock.c | 2 +- include/linux/virtio_vsock.h | 6 ++++++ net/vmw_vsock/virtio_transport.c | 2 +- net/vmw_vsock/virtio_transport_common.c | 2 +- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 66a0f060770e..b13f6be452ba 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -348,7 +348,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return NULL; /* len contains both payload and hdr */ - skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); + skb = virtio_vsock_alloc_linear_skb(len, GFP_KERNEL); if (!skb) return NULL; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 1b5731186095..6d4a933c895a 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -70,6 +70,12 @@ static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t ma return skb; } +static inline struct sk_buff * +virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask) +{ + return virtio_vsock_alloc_skb(size, mask); +} + static inline void virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) { diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 3daba06ed499..2959db0404ed 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -321,7 +321,7 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) vq = vsock->vqs[VSOCK_VQ_RX]; do { - skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL); + skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL); if (!skb) break; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 1b5d9896edae..c9eb7f7ac00d 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -261,7 +261,7 @@ static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info * if (!zcopy) skb_len += payload_len; - skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); + skb = virtio_vsock_alloc_linear_skb(skb_len, GFP_KERNEL); if (!skb) return NULL; -- 2.50.0.727.gbf7dc18ff4-goog When receiving a packet from a guest, vhost_vsock_handle_tx_kick() calls vhost_vsock_alloc_linear_skb() to allocate and fill an SKB with the receive data. Unfortunately, these are always linear allocations and can therefore result in significant pressure on kmalloc() considering that the maximum packet size (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) is a little over 64KiB, resulting in a 128KiB allocation for each packet. Rework the vsock SKB allocation so that, for sizes with page order greater than PAGE_ALLOC_COSTLY_ORDER, a nonlinear SKB is allocated instead with the packet header in the SKB and the receive data in the fragments. Move the VIRTIO_VSOCK_SKB_HEADROOM check out of the allocation function and into the single caller that needs it and add a debug warning if virtio_vsock_skb_rx_put() is ever called on an SKB with a non-zero length, as this would be destructive for the nonlinear case. Signed-off-by: Will Deacon --- drivers/vhost/vsock.c | 11 +++++------ include/linux/virtio_vsock.h | 32 +++++++++++++++++++++++++------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index b13f6be452ba..f3c2ea1d0ae7 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -344,11 +344,12 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, len = iov_length(vq->iov, out); - if (len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) + if (len < VIRTIO_VSOCK_SKB_HEADROOM || + len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) return NULL; /* len contains both payload and hdr */ - skb = virtio_vsock_alloc_linear_skb(len, GFP_KERNEL); + skb = virtio_vsock_alloc_skb(len, GFP_KERNEL); if (!skb) return NULL; @@ -377,10 +378,8 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, virtio_vsock_skb_rx_put(skb); - nbytes = copy_from_iter(skb->data, payload_len, &iov_iter); - if (nbytes != payload_len) { - vq_err(vq, "Expected %zu byte payload, got %zu bytes\n", - payload_len, nbytes); + if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) { + vq_err(vq, "Failed to copy %zu byte payload\n", payload_len); kfree_skb(skb); return NULL; } diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 6d4a933c895a..ad69668f6b91 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -51,29 +51,47 @@ static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) { u32 len; + DEBUG_NET_WARN_ON_ONCE(skb->len); len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - skb_put(skb, len); + + if (skb_is_nonlinear(skb)) + skb->len = len; + else + skb_put(skb, len); } -static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +static inline struct sk_buff * +__virtio_vsock_alloc_skb_with_frags(unsigned int header_len, + unsigned int data_len, + gfp_t mask) { struct sk_buff *skb; + int err; - if (size < VIRTIO_VSOCK_SKB_HEADROOM) - return NULL; - - skb = alloc_skb(size, mask); + skb = alloc_skb_with_frags(header_len, data_len, + PAGE_ALLOC_COSTLY_ORDER, &err, mask); if (!skb) return NULL; skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); + skb->data_len = data_len; return skb; } static inline struct sk_buff * virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask) { - return virtio_vsock_alloc_skb(size, mask); + return __virtio_vsock_alloc_skb_with_frags(size, 0, mask); +} + +static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +{ + if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + return virtio_vsock_alloc_linear_skb(size, mask); + + size -= VIRTIO_VSOCK_SKB_HEADROOM; + return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM, + size, mask); } static inline void -- 2.50.0.727.gbf7dc18ff4-goog In preparation for using virtio_vsock_skb_rx_put() when populating SKBs on the vsock TX path, rename virtio_vsock_skb_rx_put() to virtio_vsock_skb_put(). No functional change. Reviewed-by: Stefano Garzarella Signed-off-by: Will Deacon --- drivers/vhost/vsock.c | 2 +- include/linux/virtio_vsock.h | 2 +- net/vmw_vsock/virtio_transport.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index f3c2ea1d0ae7..a6cd72a32f63 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -376,7 +376,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq, return NULL; } - virtio_vsock_skb_rx_put(skb); + virtio_vsock_skb_put(skb); if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) { vq_err(vq, "Failed to copy %zu byte payload\n", payload_len); diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index ad69668f6b91..ed5eab46e3dc 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -47,7 +47,7 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; } -static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) +static inline void virtio_vsock_skb_put(struct sk_buff *skb) { u32 len; diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 2959db0404ed..44751cf8dfca 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c @@ -662,7 +662,7 @@ static void virtio_transport_rx_work(struct work_struct *work) } if (payload_len) - virtio_vsock_skb_rx_put(skb); + virtio_vsock_skb_put(skb); virtio_transport_deliver_tap_pkt(skb); virtio_transport_recv_pkt(&virtio_transport, skb); -- 2.50.0.727.gbf7dc18ff4-goog When transmitting a vsock packet, virtio_transport_send_pkt_info() calls virtio_transport_alloc_linear_skb() to allocate and fill SKBs with the transmit data. Unfortunately, these are always linear allocations and can therefore result in significant pressure on kmalloc() considering that the maximum packet size (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM) is a little over 64KiB, resulting in a 128KiB allocation for each packet. Rework the vsock SKB allocation so that, for sizes with page order greater than PAGE_ALLOC_COSTLY_ORDER, a nonlinear SKB is allocated instead with the packet header in the SKB and the transmit data in the fragments. No that this affects both the vhost and virtio transports. Signed-off-by: Will Deacon --- net/vmw_vsock/virtio_transport_common.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index c9eb7f7ac00d..f74677c3511e 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -109,7 +109,8 @@ static int virtio_transport_fill_skb(struct sk_buff *skb, return __zerocopy_sg_from_iter(info->msg, NULL, skb, &info->msg->msg_iter, len, NULL); - return memcpy_from_msg(skb_put(skb, len), info->msg, len); + virtio_vsock_skb_put(skb); + return skb_copy_datagram_from_iter(skb, 0, &info->msg->msg_iter, len); } static void virtio_transport_init_hdr(struct sk_buff *skb, @@ -261,7 +262,7 @@ static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info * if (!zcopy) skb_len += payload_len; - skb = virtio_vsock_alloc_linear_skb(skb_len, GFP_KERNEL); + skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL); if (!skb) return NULL; -- 2.50.0.727.gbf7dc18ff4-goog