This patch moves the check for available free space for a new entry into a separate function. As a result, __ptr_ring_produce() remains logically unchanged, while the new helper allows callers to determine in advance whether subsequent __ptr_ring_produce() calls will succeed. This information can, for example, be used to temporarily stop producing until __ptr_ring_peek() indicates that space is available again. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- include/linux/ptr_ring.h | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 534531807d95..a5a3fa4916d3 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -96,6 +96,14 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) return ret; } +static inline int __ptr_ring_produce_peek(struct ptr_ring *r) +{ + if (unlikely(!r->size) || r->queue[r->producer]) + return -ENOSPC; + + return 0; +} + /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. * Callers are responsible for making sure pointer that is being queued @@ -103,8 +111,10 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) */ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) { - if (unlikely(!r->size) || r->queue[r->producer]) - return -ENOSPC; + int p = __ptr_ring_produce_peek(r); + + if (p) + return p; /* Make sure the pointer we are storing points to a valid data. */ /* Pairs with the dependency ordering in __ptr_ring_consume. */ -- 2.43.0 This proposed function checks whether __ptr_ring_zero_tail() was invoked within the last n calls to __ptr_ring_consume(), which indicates that new free space was created. Since __ptr_ring_zero_tail() moves the tail to the head - and no other function modifies either the head or the tail, aside from the wrap-around case described below - detecting such a movement is sufficient to detect the invocation of __ptr_ring_zero_tail(). The implementation detects this movement by checking whether the tail is at most n positions behind the head. If this condition holds, the shift of the tail to its current position must have occurred within the last n calls to __ptr_ring_consume(), indicating that __ptr_ring_zero_tail() was invoked and that new free space was created. This logic also correctly handles the wrap-around case in which __ptr_ring_zero_tail() is invoked and the head and the tail are reset to 0. Since this reset likewise moves the tail to the head, the same detection logic applies. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- include/linux/ptr_ring.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index a5a3fa4916d3..7cdae6d1d400 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -438,6 +438,19 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, return ret; } +/* Returns true if the consume of the last n elements has created space + * in the ring buffer (i.e., a new element can be produced). + * + * Note: Because of batching, a successful call to __ptr_ring_consume() / + * __ptr_ring_consume_batched() does not guarantee that the next call to + * __ptr_ring_produce() will succeed. + */ +static inline bool __ptr_ring_consume_created_space(struct ptr_ring *r, + int n) +{ + return r->consumer_head - r->consumer_tail < n; +} + /* Cast to structure type and call a function without discarding from FIFO. * Function must return a value. * Callers must take consumer_lock. -- 2.43.0 Introduce {tun,tap}_ring_consume() helpers that wrap __ptr_ring_consume() and wake the corresponding netdev subqueue when consuming an entry frees space in the underlying ptr_ring. Stopping of the netdev queue when the ptr_ring is full will be introduced in an upcoming commit. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 23 ++++++++++++++++++++++- drivers/net/tun.c | 25 +++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 1197f245e873..2442cf7ac385 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -753,6 +753,27 @@ static ssize_t tap_put_user(struct tap_queue *q, return ret ? ret : total; } +static void *tap_ring_consume(struct tap_queue *q) +{ + struct ptr_ring *ring = &q->ring; + struct net_device *dev; + void *ptr; + + spin_lock(&ring->consumer_lock); + + ptr = __ptr_ring_consume(ring); + if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) { + rcu_read_lock(); + dev = rcu_dereference(q->tap)->dev; + netif_wake_subqueue(dev, q->queue_index); + rcu_read_unlock(); + } + + spin_unlock(&ring->consumer_lock); + + return ptr; +} + static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, int noblock, struct sk_buff *skb) @@ -774,7 +795,7 @@ static ssize_t tap_do_read(struct tap_queue *q, TASK_INTERRUPTIBLE); /* Read frames from the queue */ - skb = ptr_ring_consume(&q->ring); + skb = tap_ring_consume(q); if (skb) break; if (noblock) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8192740357a0..7148f9a844a4 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2113,13 +2113,34 @@ static ssize_t tun_put_user(struct tun_struct *tun, return total; } +static void *tun_ring_consume(struct tun_file *tfile) +{ + struct ptr_ring *ring = &tfile->tx_ring; + struct net_device *dev; + void *ptr; + + spin_lock(&ring->consumer_lock); + + ptr = __ptr_ring_consume(ring); + if (unlikely(ptr && __ptr_ring_consume_created_space(ring, 1))) { + rcu_read_lock(); + dev = rcu_dereference(tfile->tun)->dev; + netif_wake_subqueue(dev, tfile->queue_index); + rcu_read_unlock(); + } + + spin_unlock(&ring->consumer_lock); + + return ptr; +} + static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; - ptr = ptr_ring_consume(&tfile->tx_ring); + ptr = tun_ring_consume(tfile); if (ptr) goto out; if (noblock) { @@ -2131,7 +2152,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) while (1) { set_current_state(TASK_INTERRUPTIBLE); - ptr = ptr_ring_consume(&tfile->tx_ring); + ptr = tun_ring_consume(tfile); if (ptr) break; if (signal_pending(current)) { -- 2.43.0 Add {tun,tap}_ring_consume_batched() that wrap __ptr_ring_consume_batched() and wake the corresponding netdev subqueue when consuming the entries frees space in the ptr_ring. These wrappers are supposed to be used by vhost-net in an upcoming commit. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 23 +++++++++++++++++++++++ drivers/net/tun.c | 23 +++++++++++++++++++++++ include/linux/if_tap.h | 6 ++++++ include/linux/if_tun.h | 7 +++++++ 4 files changed, 59 insertions(+) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 2442cf7ac385..7e3b4eed797c 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -774,6 +774,29 @@ static void *tap_ring_consume(struct tap_queue *q) return ptr; } +int tap_ring_consume_batched(struct file *file, void **array, int n) +{ + struct tap_queue *q = file->private_data; + struct ptr_ring *ring = &q->ring; + struct net_device *dev; + int i; + + spin_lock(&ring->consumer_lock); + + i = __ptr_ring_consume_batched(ring, array, n); + if (__ptr_ring_consume_created_space(ring, i)) { + rcu_read_lock(); + dev = rcu_dereference(q->tap)->dev; + netif_wake_subqueue(dev, q->queue_index); + rcu_read_unlock(); + } + + spin_unlock(&ring->consumer_lock); + + return i; +} +EXPORT_SYMBOL_GPL(tap_ring_consume_batched); + static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, int noblock, struct sk_buff *skb) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7148f9a844a4..db3b72025cfb 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3736,6 +3736,29 @@ struct socket *tun_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_socket); +int tun_ring_consume_batched(struct file *file, void **array, int n) +{ + struct tun_file *tfile = file->private_data; + struct ptr_ring *ring = &tfile->tx_ring; + struct net_device *dev; + int i; + + spin_lock(&ring->consumer_lock); + + i = __ptr_ring_consume_batched(ring, array, n); + if (__ptr_ring_consume_created_space(ring, i)) { + rcu_read_lock(); + dev = rcu_dereference(tfile->tun)->dev; + netif_wake_subqueue(dev, tfile->queue_index); + rcu_read_unlock(); + } + + spin_unlock(&ring->consumer_lock); + + return i; +} +EXPORT_SYMBOL_GPL(tun_ring_consume_batched); + struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 553552fa635c..cf8b90320b8d 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -11,6 +11,7 @@ struct socket; #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); +int tap_ring_consume_batched(struct file *file, void **array, int n); #else #include #include @@ -22,6 +23,11 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline int tap_ring_consume_batched(struct file *f, + void **array, int n) +{ + return 0; +} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 80166eb62f41..444dda75a372 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -22,6 +22,7 @@ struct tun_msg_ctl { #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); +int tun_ring_consume_batched(struct file *file, void **array, int n); static inline bool tun_is_xdp_frame(void *ptr) { @@ -55,6 +56,12 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f) return ERR_PTR(-EINVAL); } +static inline int tun_ring_consume_batched(struct file *file, + void **array, int n) +{ + return 0; +} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Add {tun,tap}_ring_unconsume() wrappers to allow external modules (e.g. vhost-net) to return previously consumed entries back to the ptr_ring. The functions delegate to ptr_ring_unconsume() and take a destroy callback for entries that cannot be returned to the ring. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Co-developed by: Jon Kohler Signed-off-by: Jon Kohler Signed-off-by: Simon Schippers --- drivers/net/tap.c | 10 ++++++++++ drivers/net/tun.c | 10 ++++++++++ include/linux/if_tap.h | 4 ++++ include/linux/if_tun.h | 5 +++++ 4 files changed, 29 insertions(+) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 7e3b4eed797c..4ffe4e95b5a6 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -797,6 +797,16 @@ int tap_ring_consume_batched(struct file *file, void **array, int n) } EXPORT_SYMBOL_GPL(tap_ring_consume_batched); +void tap_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)) +{ + struct tap_queue *q = file->private_data; + struct ptr_ring *ring = &q->ring; + + ptr_ring_unconsume(ring, batch, n, destroy); +} +EXPORT_SYMBOL_GPL(tap_ring_unconsume); + static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, int noblock, struct sk_buff *skb) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db3b72025cfb..d44d206c65e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3759,6 +3759,16 @@ int tun_ring_consume_batched(struct file *file, void **array, int n) } EXPORT_SYMBOL_GPL(tun_ring_consume_batched); +void tun_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)) +{ + struct tun_file *tfile = file->private_data; + struct ptr_ring *ring = &tfile->tx_ring; + + ptr_ring_unconsume(ring, batch, n, destroy); +} +EXPORT_SYMBOL_GPL(tun_ring_unconsume); + struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index cf8b90320b8d..28326a69745a 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -12,6 +12,8 @@ struct socket; struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); int tap_ring_consume_batched(struct file *file, void **array, int n); +void tap_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)); #else #include #include @@ -28,6 +30,8 @@ static inline int tap_ring_consume_batched(struct file *f, { return 0; } +static inline void tap_ring_unconsume(struct file *file, void **batch, + int n, void (*destroy)(void *)) {} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 444dda75a372..1274c6b34eb6 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -23,6 +23,8 @@ struct tun_msg_ctl { struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); int tun_ring_consume_batched(struct file *file, void **array, int n); +void tun_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)); static inline bool tun_is_xdp_frame(void *ptr) { @@ -62,6 +64,9 @@ static inline int tun_ring_consume_batched(struct file *file, return 0; } +static inline void tun_ring_unconsume(struct file *file, void **batch, + int n, void (*destroy)(void *)) {} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Add tun_is_tun_file() and tap_is_tap_file() helper functions to check if a file is a TUN or TAP file, which will be utilized by vhost-net. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Co-developed by: Jon Kohler Signed-off-by: Jon Kohler Signed-off-by: Simon Schippers --- drivers/net/tap.c | 13 +++++++++++++ drivers/net/tun.c | 13 +++++++++++++ include/linux/if_tap.h | 5 +++++ include/linux/if_tun.h | 6 ++++++ 4 files changed, 37 insertions(+) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 4ffe4e95b5a6..cf19d7181c2f 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1243,6 +1243,19 @@ struct ptr_ring *tap_get_ptr_ring(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_ptr_ring); +bool tap_is_tap_file(struct file *file) +{ + struct tap_queue *q; + + if (file->f_op != &tap_fops) + return false; + q = file->private_data; + if (!q) + return false; + return true; +} +EXPORT_SYMBOL_GPL(tap_is_tap_file); + int tap_queue_resize(struct tap_dev *tap) { struct net_device *dev = tap->dev; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d44d206c65e8..9d6f98e00661 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3782,6 +3782,19 @@ struct ptr_ring *tun_get_tx_ring(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_tx_ring); +bool tun_is_tun_file(struct file *file) +{ + struct tun_file *tfile; + + if (file->f_op != &tun_fops) + return false; + tfile = file->private_data; + if (!tfile) + return false; + return true; +} +EXPORT_SYMBOL_GPL(tun_is_tun_file); + module_init(tun_init); module_exit(tun_cleanup); MODULE_DESCRIPTION(DRV_DESCRIPTION); diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 28326a69745a..14194342b784 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -14,6 +14,7 @@ struct ptr_ring *tap_get_ptr_ring(struct file *file); int tap_ring_consume_batched(struct file *file, void **array, int n); void tap_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); +bool tap_is_tap_file(struct file *file); #else #include #include @@ -32,6 +33,10 @@ static inline int tap_ring_consume_batched(struct file *f, } static inline void tap_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)) {} +static inline bool tap_is_tap_file(struct file *f) +{ + return false; +} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 1274c6b34eb6..0910c6dbac20 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file); int tun_ring_consume_batched(struct file *file, void **array, int n); void tun_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); +bool tun_is_tun_file(struct file *file); static inline bool tun_is_xdp_frame(void *ptr) { @@ -67,6 +68,11 @@ static inline int tun_ring_consume_batched(struct file *file, static inline void tun_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)) {} +static inline bool tun_is_tun_file(struct file *f) +{ + return false; +} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Replace the direct use of ptr_ring in the vhost-net virtqueue with tun/tap ring wrapper helpers. Instead of storing an rx_ring pointer, the virtqueue now stores the interface type (IF_TUN, IF_TAP, or IF_NONE) and dispatches to the corresponding tun/tap helpers for ring produce, consume, and unconsume operations. Routing ring operations through the tun/tap helpers enables netdev queue wakeups, which are required for upcoming netdev queue flow control support shared by tun/tap and vhost-net. No functional change is intended beyond switching to the wrapper helpers. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Co-developed by: Jon Kohler Signed-off-by: Jon Kohler Signed-off-by: Simon Schippers --- drivers/vhost/net.c | 92 +++++++++++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 7f886d3dba7d..215556f7cd40 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -90,6 +90,12 @@ enum { VHOST_NET_VQ_MAX = 2, }; +enum if_type { + IF_NONE = 0, + IF_TUN = 1, + IF_TAP = 2, +}; + struct vhost_net_ubuf_ref { /* refcount follows semantics similar to kref: * 0: object is released @@ -127,10 +133,11 @@ struct vhost_net_virtqueue { /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; - struct ptr_ring *rx_ring; struct vhost_net_buf rxq; /* Batched XDP buffs */ struct xdp_buff *xdp; + /* Interface type */ + enum if_type type; }; struct vhost_net { @@ -176,24 +183,50 @@ static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) return ret; } -static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) +static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq, + struct sock *sk) { + struct file *file = sk->sk_socket->file; struct vhost_net_buf *rxq = &nvq->rxq; rxq->head = 0; - rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, - VHOST_NET_BATCH); + switch (nvq->type) { + case IF_TUN: + rxq->tail = tun_ring_consume_batched(file, rxq->queue, + VHOST_NET_BATCH); + break; + case IF_TAP: + rxq->tail = tap_ring_consume_batched(file, rxq->queue, + VHOST_NET_BATCH); + break; + case IF_NONE: + return 0; + } return rxq->tail; } -static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) +static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq, + struct socket *sk) { struct vhost_net_buf *rxq = &nvq->rxq; - - if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { - ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, - vhost_net_buf_get_size(rxq), - tun_ptr_free); + struct file *file; + + if (sk && !vhost_net_buf_is_empty(rxq)) { + file = sk->file; + switch (nvq->type) { + case IF_TUN: + tun_ring_unconsume(file, rxq->queue + rxq->head, + vhost_net_buf_get_size(rxq), + tun_ptr_free); + break; + case IF_TAP: + tap_ring_unconsume(file, rxq->queue + rxq->head, + vhost_net_buf_get_size(rxq), + tun_ptr_free); + break; + case IF_NONE: + return; + } rxq->head = rxq->tail = 0; } } @@ -209,14 +242,15 @@ static int vhost_net_buf_peek_len(void *ptr) return __skb_array_len_with_tag(ptr); } -static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) +static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq, + struct sock *sk) { struct vhost_net_buf *rxq = &nvq->rxq; if (!vhost_net_buf_is_empty(rxq)) goto out; - if (!vhost_net_buf_produce(nvq)) + if (!vhost_net_buf_produce(nvq, sk)) return 0; out: @@ -996,8 +1030,8 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) int len = 0; unsigned long flags; - if (rvq->rx_ring) - return vhost_net_buf_peek(rvq); + if (rvq->type) + return vhost_net_buf_peek(rvq, sk); spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); @@ -1212,7 +1246,7 @@ static void handle_rx(struct vhost_net *net) goto out; } busyloop_intr = false; - if (nvq->rx_ring) + if (nvq->type) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { @@ -1368,7 +1402,6 @@ static int vhost_net_open(struct inode *inode, struct file *f) n->vqs[i].batched_xdp = 0; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; - n->vqs[i].rx_ring = NULL; vhost_net_buf_init(&n->vqs[i].rxq); } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, @@ -1398,8 +1431,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, sock = vhost_vq_get_backend(vq); vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, NULL); - vhost_net_buf_unproduce(nvq); - nvq->rx_ring = NULL; + vhost_net_buf_unproduce(nvq, sock); + nvq->type = IF_NONE; mutex_unlock(&vq->mutex); return sock; } @@ -1479,18 +1512,13 @@ static struct socket *get_raw_socket(int fd) return ERR_PTR(r); } -static struct ptr_ring *get_tap_ptr_ring(struct file *file) +static enum if_type get_if_type(struct file *file) { - struct ptr_ring *ring; - ring = tun_get_tx_ring(file); - if (!IS_ERR(ring)) - goto out; - ring = tap_get_ptr_ring(file); - if (!IS_ERR(ring)) - goto out; - ring = NULL; -out: - return ring; + if (tap_is_tap_file(file)) + return IF_TAP; + if (tun_is_tun_file(file)) + return IF_TUN; + return IF_NONE; } static struct socket *get_tap_socket(int fd) @@ -1572,7 +1600,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, sock); - vhost_net_buf_unproduce(nvq); + vhost_net_buf_unproduce(nvq, sock); r = vhost_vq_init_access(vq); if (r) goto err_used; @@ -1581,9 +1609,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) goto err_used; if (index == VHOST_NET_VQ_RX) { if (sock) - nvq->rx_ring = get_tap_ptr_ring(sock->file); + nvq->type = get_if_type(sock->file); else - nvq->rx_ring = NULL; + nvq->type = IF_NONE; } oldubufs = nvq->ubufs; -- 2.43.0 tun_get_tx_ring and tap_get_ptr_ring no longer have in-tree consumers and can be dropped. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Co-developed by: Jon Kohler Signed-off-by: Jon Kohler Signed-off-by: Simon Schippers --- drivers/net/tap.c | 13 ------------- drivers/net/tun.c | 13 ------------- include/linux/if_tap.h | 5 ----- include/linux/if_tun.h | 6 ------ 4 files changed, 37 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index cf19d7181c2f..8821f26d0baa 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1230,19 +1230,6 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); -struct ptr_ring *tap_get_ptr_ring(struct file *file) -{ - struct tap_queue *q; - - if (file->f_op != &tap_fops) - return ERR_PTR(-EINVAL); - q = file->private_data; - if (!q) - return ERR_PTR(-EBADFD); - return &q->ring; -} -EXPORT_SYMBOL_GPL(tap_get_ptr_ring); - bool tap_is_tap_file(struct file *file) { struct tap_queue *q; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9d6f98e00661..71b6981d07d7 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3769,19 +3769,6 @@ void tun_ring_unconsume(struct file *file, void **batch, int n, } EXPORT_SYMBOL_GPL(tun_ring_unconsume); -struct ptr_ring *tun_get_tx_ring(struct file *file) -{ - struct tun_file *tfile; - - if (file->f_op != &tun_fops) - return ERR_PTR(-EINVAL); - tfile = file->private_data; - if (!tfile) - return ERR_PTR(-EBADFD); - return &tfile->tx_ring; -} -EXPORT_SYMBOL_GPL(tun_get_tx_ring); - bool tun_is_tun_file(struct file *file) { struct tun_file *tfile; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 14194342b784..0e427b979c11 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -10,7 +10,6 @@ struct socket; #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); -struct ptr_ring *tap_get_ptr_ring(struct file *file); int tap_ring_consume_batched(struct file *file, void **array, int n); void tap_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); @@ -22,10 +21,6 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) -{ - return ERR_PTR(-EINVAL); -} static inline int tap_ring_consume_batched(struct file *f, void **array, int n) { diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 0910c6dbac20..80b734173a80 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -21,7 +21,6 @@ struct tun_msg_ctl { #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct ptr_ring *tun_get_tx_ring(struct file *file); int tun_ring_consume_batched(struct file *file, void **array, int n); void tun_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); @@ -54,11 +53,6 @@ static inline struct socket *tun_get_socket(struct file *f) return ERR_PTR(-EINVAL); } -static inline struct ptr_ring *tun_get_tx_ring(struct file *f) -{ - return ERR_PTR(-EINVAL); -} - static inline int tun_ring_consume_batched(struct file *file, void **array, int n) { -- 2.43.0 This commit prevents tail-drop when a qdisc is present and the ptr_ring becomes full. Once an entry is successfully produced and the ptr_ring reaches capacity, the netdev queue is stopped instead of dropping subsequent packets. If producing an entry fails anyways, the tun_net_xmit returns NETDEV_TX_BUSY, again avoiding a drop. Such failures are expected because LLTX is enabled and the transmit path operates without the usual locking. As a result, concurrent calls to tun_net_xmit() are not prevented. The existing __{tun,tap}_ring_consume functions free space in the ptr_ring and wake the netdev queue. Races between this wakeup and the queue-stop logic could leave the queue stopped indefinitely. To prevent this, a memory barrier is enforced (as discussed in a similar implementation in [1]), followed by a recheck that wakes the queue if space is already available. If no qdisc is present, the previous tail-drop behavior is preserved. +-------------------------+-----------+---------------+----------------+ | pktgen benchmarks to | Stock | Patched with | Patched with | | Debian VM, i5 6300HQ, | | noqueue qdisc | fq_codel qdisc | | 10M packets | | | | +-----------+-------------+-----------+---------------+----------------+ | TAP | Transmitted | 196 Kpps | 195 Kpps | 185 Kpps | | +-------------+-----------+---------------+----------------+ | | Lost | 1618 Kpps | 1556 Kpps | 0 | +-----------+-------------+-----------+---------------+----------------+ | TAP | Transmitted | 577 Kpps | 582 Kpps | 578 Kpps | | + +-------------+-----------+---------------+----------------+ | vhost-net | Lost | 1170 Kpps | 1109 Kpps | 0 | +-----------+-------------+-----------+---------------+----------------+ [1] Link: https://lore.kernel.org/all/20250424085358.75d817ae@kernel.org/ Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tun.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 71b6981d07d7..74d7fd09e9ba 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1008,6 +1008,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *queue; struct tun_file *tfile; int len = skb->len; + bool qdisc_present; + int ret; rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); @@ -1060,13 +1062,38 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); - if (ptr_ring_produce(&tfile->tx_ring, skb)) { + queue = netdev_get_tx_queue(dev, txq); + qdisc_present = !qdisc_txq_has_no_queue(queue); + + spin_lock(&tfile->tx_ring.producer_lock); + ret = __ptr_ring_produce(&tfile->tx_ring, skb); + if (__ptr_ring_produce_peek(&tfile->tx_ring) && qdisc_present) { + netif_tx_stop_queue(queue); + /* Avoid races with queue wake-up in + * __{tun,tap}_ring_consume by waking if space is + * available in a re-check. + * The barrier makes sure that the stop is visible before + * we re-check. + */ + smp_mb__after_atomic(); + if (!__ptr_ring_produce_peek(&tfile->tx_ring)) + netif_tx_wake_queue(queue); + } + spin_unlock(&tfile->tx_ring.producer_lock); + + if (ret) { + /* If a qdisc is attached to our virtual device, + * returning NETDEV_TX_BUSY is allowed. + */ + if (qdisc_present) { + rcu_read_unlock(); + return NETDEV_TX_BUSY; + } drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } /* dev->lltx requires to do our own update of trans_start */ - queue = netdev_get_tx_queue(dev, txq); txq_trans_cond_update(queue); /* Notify and wake up reader process */ -- 2.43.0