Useful if the caller would like to act before the ptr_ring gets full after the next __ptr_ring_produce call. Because __ptr_ring_produce has a smp_wmb(), taking action before ensures memory ordering. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- include/linux/ptr_ring.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 551329220e4f..c45e95071d7e 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -96,6 +96,28 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) return ret; } +/* Returns if the ptr_ring will be full after inserting the next ptr. + */ +static inline bool __ptr_ring_full_next(struct ptr_ring *r) +{ + int p; + + /* Since __ptr_ring_discard_one invalidates in reverse order, the + * next producer entry might be NULL even though the current one + * is not. Therefore, also check the current producer entry with + * __ptr_ring_full. + */ + if (unlikely(r->size <= 1 || __ptr_ring_full(r))) + return true; + + p = r->producer + 1; + + if (unlikely(p >= r->size)) + p = 0; + + return r->queue[p]; +} + /* Note: callers invoking this in a loop must use a compiler barrier, * for example cpu_relax(). Callers must hold producer_lock. * Callers are responsible for making sure pointer that is being queued -- 2.43.0 __ptr_ring_will_invalidate is useful if the caller would like to act before entries of the ptr_ring get invalidated by __ptr_ring_discard_one. __ptr_ring_consume calls the new method and passes the result to __ptr_ring_discard_one, preserving the pre-patch logic. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- include/linux/ptr_ring.h | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index c45e95071d7e..78fb3efedc7a 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -266,7 +266,22 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r) } /* Must only be called after __ptr_ring_peek returned !NULL */ -static inline void __ptr_ring_discard_one(struct ptr_ring *r) +static inline bool __ptr_ring_will_invalidate(struct ptr_ring *r) +{ + /* Once we have processed enough entries invalidate them in + * the ring all at once so producer can reuse their space in the ring. + * We also do this when we reach end of the ring - not mandatory + * but helps keep the implementation simple. + */ + int consumer_head = r->consumer_head + 1; + + return consumer_head - r->consumer_tail >= r->batch || + consumer_head >= r->size; +} + +/* Must only be called after __ptr_ring_peek returned !NULL */ +static inline void __ptr_ring_discard_one(struct ptr_ring *r, + bool invalidate) { /* Fundamentally, what we want to do is update consumer * index and zero out the entry so producer can reuse it. @@ -286,13 +301,7 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) int consumer_head = r->consumer_head; int head = consumer_head++; - /* Once we have processed enough entries invalidate them in - * the ring all at once so producer can reuse their space in the ring. - * We also do this when we reach end of the ring - not mandatory - * but helps keep the implementation simple. - */ - if (unlikely(consumer_head - r->consumer_tail >= r->batch || - consumer_head >= r->size)) { + if (unlikely(invalidate)) { /* Zero out entries in the reverse order: this way we touch the * cache line that producer might currently be reading the last; * producer won't make progress and touch other cache lines @@ -312,6 +321,7 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) static inline void *__ptr_ring_consume(struct ptr_ring *r) { + bool invalidate; void *ptr; /* The READ_ONCE in __ptr_ring_peek guarantees that anyone @@ -319,8 +329,10 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) * with smp_wmb in __ptr_ring_produce. */ ptr = __ptr_ring_peek(r); - if (ptr) - __ptr_ring_discard_one(r); + if (ptr) { + invalidate = __ptr_ring_will_invalidate(r); + __ptr_ring_discard_one(r, invalidate); + } return ptr; } -- 2.43.0 Stop the netdev queue ahead of __ptr_ring_produce when __ptr_ring_full_next signals the ring is about to fill. Due to the smp_wmb() of __ptr_ring_produce the consumer is guaranteed to be able to notice the stopped netdev queue after seeing the new ptr_ring entry. As both __ptr_ring_full_next and __ptr_ring_produce need the producer_lock, the lock is held during the execution of both methods. dev->lltx is disabled to ensure that tun_net_xmit is not called even though the netdev queue is stopped (which happened in my testing, resulting in rare packet drops). Consequently, the update of trans_start in tun_net_xmit is also removed. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tun.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 86a9e927d0ff..c6b22af9bae8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -931,7 +931,7 @@ static int tun_net_init(struct net_device *dev) dev->vlan_features = dev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); - dev->lltx = true; + dev->lltx = false; tun->flags = (tun->flags & ~TUN_FEATURES) | (ifr->ifr_flags & TUN_FEATURES); @@ -1060,14 +1060,18 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); - if (ptr_ring_produce(&tfile->tx_ring, skb)) { + queue = netdev_get_tx_queue(dev, txq); + + spin_lock(&tfile->tx_ring.producer_lock); + if (__ptr_ring_full_next(&tfile->tx_ring)) + netif_tx_stop_queue(queue); + + if (unlikely(__ptr_ring_produce(&tfile->tx_ring, skb))) { + spin_unlock(&tfile->tx_ring.producer_lock); drop_reason = SKB_DROP_REASON_FULL_RING; goto drop; } - - /* dev->lltx requires to do our own update of trans_start */ - queue = netdev_get_tx_queue(dev, txq); - txq_trans_cond_update(queue); + spin_unlock(&tfile->tx_ring.producer_lock); /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) -- 2.43.0 The new wrappers tun_ring_consume/tap_ring_consume deal with consuming an entry of the ptr_ring and then waking the netdev queue when entries got invalidated to be used again by the producer. To avoid waking the netdev queue when the ptr_ring is full, it is checked if the netdev queue is stopped before invalidating entries. Like that the netdev queue can be safely woken after invalidating entries. The READ_ONCE in __ptr_ring_peek, paired with the smp_wmb() in __ptr_ring_produce within tun_net_xmit guarantees that the information about the netdev queue being stopped is visible after __ptr_ring_peek is called. The netdev queue is also woken after resizing the ptr_ring. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 44 +++++++++++++++++++++++++++++++++++++++++++- drivers/net/tun.c | 47 +++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 88 insertions(+), 3 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 1197f245e873..f8292721a9d6 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -753,6 +753,46 @@ static ssize_t tap_put_user(struct tap_queue *q, return ret ? ret : total; } +static struct sk_buff *tap_ring_consume(struct tap_queue *q) +{ + struct netdev_queue *txq; + struct net_device *dev; + bool will_invalidate; + bool stopped; + void *ptr; + + spin_lock(&q->ring.consumer_lock); + ptr = __ptr_ring_peek(&q->ring); + if (!ptr) { + spin_unlock(&q->ring.consumer_lock); + return ptr; + } + + /* Check if the queue stopped before zeroing out, so no ptr get + * produced in the meantime, because this could result in waking + * even though the ptr_ring is full. The order of the operations + * is ensured by barrier(). + */ + will_invalidate = __ptr_ring_will_invalidate(&q->ring); + if (unlikely(will_invalidate)) { + rcu_read_lock(); + dev = rcu_dereference(q->tap)->dev; + txq = netdev_get_tx_queue(dev, q->queue_index); + stopped = netif_tx_queue_stopped(txq); + } + barrier(); + __ptr_ring_discard_one(&q->ring, will_invalidate); + + if (unlikely(will_invalidate)) { + if (stopped) + netif_tx_wake_queue(txq); + rcu_read_unlock(); + } + spin_unlock(&q->ring.consumer_lock); + + return ptr; +} + static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, int noblock, struct sk_buff *skb) @@ -774,7 +814,7 @@ static ssize_t tap_do_read(struct tap_queue *q, TASK_INTERRUPTIBLE); /* Read frames from the queue */ - skb = ptr_ring_consume(&q->ring); + skb = tap_ring_consume(q); if (skb) break; if (noblock) { @@ -1207,6 +1247,8 @@ int tap_queue_resize(struct tap_dev *tap) ret = ptr_ring_resize_multiple_bh(rings, n, dev->tx_queue_len, GFP_KERNEL, __skb_array_destroy_skb); + if (netif_running(dev)) + netif_tx_wake_all_queues(dev); kfree(rings); return ret; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index c6b22af9bae8..682df8157b55 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2114,13 +2114,53 @@ static ssize_t tun_put_user(struct tun_struct *tun, return total; } +static void *tun_ring_consume(struct tun_file *tfile) +{ + struct netdev_queue *txq; + struct net_device *dev; + bool will_invalidate; + bool stopped; + void *ptr; + + spin_lock(&tfile->tx_ring.consumer_lock); + ptr = __ptr_ring_peek(&tfile->tx_ring); + if (!ptr) { + spin_unlock(&tfile->tx_ring.consumer_lock); + return ptr; + } + + /* Check if the queue stopped before zeroing out, so no ptr get + * produced in the meantime, because this could result in waking + * even though the ptr_ring is full. The order of the operations + * is ensured by barrier(). + */ + will_invalidate = __ptr_ring_will_invalidate(&tfile->tx_ring); + if (unlikely(will_invalidate)) { + rcu_read_lock(); + dev = rcu_dereference(tfile->tun)->dev; + txq = netdev_get_tx_queue(dev, tfile->queue_index); + stopped = netif_tx_queue_stopped(txq); + } + barrier(); + __ptr_ring_discard_one(&tfile->tx_ring, will_invalidate); + + if (unlikely(will_invalidate)) { + if (stopped) + netif_tx_wake_queue(txq); + rcu_read_unlock(); + } + spin_unlock(&tfile->tx_ring.consumer_lock); + + return ptr; +} + static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); void *ptr = NULL; int error = 0; - ptr = ptr_ring_consume(&tfile->tx_ring); + ptr = tun_ring_consume(tfile); if (ptr) goto out; if (noblock) { @@ -2132,7 +2172,7 @@ static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) while (1) { set_current_state(TASK_INTERRUPTIBLE); - ptr = ptr_ring_consume(&tfile->tx_ring); + ptr = tun_ring_consume(tfile); if (ptr) break; if (signal_pending(current)) { @@ -3621,6 +3661,9 @@ static int tun_queue_resize(struct tun_struct *tun) dev->tx_queue_len, GFP_KERNEL, tun_ptr_free); + if (netif_running(dev)) + netif_tx_wake_all_queues(dev); + kfree(rings); return ret; } -- 2.43.0 The wrappers tun_ring_consume_batched/tap_ring_consume_batched are similar to the wrappers tun_ring_consume/tap_ring_consume. They deal with consuming a batch of entries of the ptr_ring and then waking the netdev queue whenever entries get invalidated to be used again by the producer. To avoid waking the netdev queue when the ptr_ring is full, it is checked if the netdev queue is stopped before invalidating entries. Like that the netdev queue can be safely woken after invalidating entries. The READ_ONCE in __ptr_ring_peek, paired with the smp_wmb() in __ptr_ring_produce within tun_net_xmit guarantees that the information about the netdev queue being stopped is visible after __ptr_ring_peek is called. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 52 ++++++++++++++++++++++++++++++++++++++++ drivers/net/tun.c | 54 ++++++++++++++++++++++++++++++++++++++++++ include/linux/if_tap.h | 6 +++++ include/linux/if_tun.h | 7 ++++++ 4 files changed, 119 insertions(+) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f8292721a9d6..651d48612329 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1216,6 +1216,58 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); +int tap_ring_consume_batched(struct file *file, + void **array, int n) +{ + struct tap_queue *q = file->private_data; + struct netdev_queue *txq; + struct net_device *dev; + bool will_invalidate; + bool stopped; + void *ptr; + int i; + + spin_lock(&q->ring.consumer_lock); + ptr = __ptr_ring_peek(&q->ring); + + if (!ptr) { + spin_unlock(&q->ring.consumer_lock); + return 0; + } + + i = 0; + do { + /* Check if the queue stopped before zeroing out, so no + * ptr get produced in the meantime, because this could + * result in waking even though the ptr_ring is full. + * The order of the operations is ensured by barrier(). + */ + will_invalidate = __ptr_ring_will_invalidate(&q->ring); + if (unlikely(will_invalidate)) { + rcu_read_lock(); + dev = rcu_dereference(q->tap)->dev; + txq = netdev_get_tx_queue(dev, q->queue_index); + stopped = netif_tx_queue_stopped(txq); + } + barrier(); + __ptr_ring_discard_one(&q->ring, will_invalidate); + + if (unlikely(will_invalidate)) { + if (stopped) + netif_tx_wake_queue(txq); + rcu_read_unlock(); + } + + array[i++] = ptr; + if (i >= n) + break; + } while ((ptr = __ptr_ring_peek(&q->ring))); + spin_unlock(&q->ring.consumer_lock); + + return i; +} +EXPORT_SYMBOL_GPL(tap_ring_consume_batched); + struct ptr_ring *tap_get_ptr_ring(struct file *file) { struct tap_queue *q; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 682df8157b55..7566b22780fb 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3759,6 +3759,60 @@ struct socket *tun_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_socket); +int tun_ring_consume_batched(struct file *file, + void **array, int n) +{ + struct tun_file *tfile = file->private_data; + struct netdev_queue *txq; + struct net_device *dev; + bool will_invalidate; + bool stopped; + void *ptr; + int i; + + spin_lock(&tfile->tx_ring.consumer_lock); + ptr = __ptr_ring_peek(&tfile->tx_ring); + + if (!ptr) { + spin_unlock(&tfile->tx_ring.consumer_lock); + return 0; + } + + i = 0; + do { + /* Check if the queue stopped before zeroing out, so no + * ptr get produced in the meantime, because this could + * result in waking even though the ptr_ring is full. + * The order of the operations is ensured by barrier(). + */ + will_invalidate = + __ptr_ring_will_invalidate(&tfile->tx_ring); + if (unlikely(will_invalidate)) { + rcu_read_lock(); + dev = rcu_dereference(tfile->tun)->dev; + txq = netdev_get_tx_queue(dev, + tfile->queue_index); + stopped = netif_tx_queue_stopped(txq); + } + barrier(); + __ptr_ring_discard_one(&tfile->tx_ring, will_invalidate); + + if (unlikely(will_invalidate)) { + if (stopped) + netif_tx_wake_queue(txq); + rcu_read_unlock(); + } + + array[i++] = ptr; + if (i >= n) + break; + } while ((ptr = __ptr_ring_peek(&tfile->tx_ring))); + spin_unlock(&tfile->tx_ring.consumer_lock); + + return i; +} +EXPORT_SYMBOL_GPL(tun_ring_consume_batched); + struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 553552fa635c..2e5542d6aef4 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -11,6 +11,7 @@ struct socket; #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); +int tap_ring_consume_batched(struct file *file, void **array, int n); #else #include #include @@ -22,6 +23,11 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline int tap_ring_consume_batched(struct file *f, + void **array, int n) +{ + return 0; +} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 80166eb62f41..5b41525ac007 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -22,6 +22,7 @@ struct tun_msg_ctl { #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); +int tun_ring_consume_batched(struct file *file, void **array, int n); static inline bool tun_is_xdp_frame(void *ptr) { @@ -55,6 +56,12 @@ static inline struct ptr_ring *tun_get_tx_ring(struct file *f) return ERR_PTR(-EINVAL); } +static inline int tun_ring_consume_batched(struct file *file, + void **array, int n) +{ + return 0; +} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 9 +++++++++ drivers/net/tun.c | 9 +++++++++ include/linux/if_tap.h | 4 ++++ include/linux/if_tun.h | 5 +++++ 4 files changed, 27 insertions(+) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 651d48612329..9720481f6d41 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1268,6 +1268,15 @@ int tap_ring_consume_batched(struct file *file, } EXPORT_SYMBOL_GPL(tap_ring_consume_batched); +void tap_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)) +{ + struct tap_queue *q = file->private_data; + + ptr_ring_unconsume(&q->ring, batch, n, destroy); +} +EXPORT_SYMBOL_GPL(tap_ring_unconsume); + struct ptr_ring *tap_get_ptr_ring(struct file *file) { struct tap_queue *q; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7566b22780fb..25b170e903e1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3813,6 +3813,15 @@ int tun_ring_consume_batched(struct file *file, } EXPORT_SYMBOL_GPL(tun_ring_consume_batched); +void tun_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)) +{ + struct tun_file *tfile = file->private_data; + + ptr_ring_unconsume(&tfile->tx_ring, batch, n, destroy); +} +EXPORT_SYMBOL_GPL(tun_ring_unconsume); + struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 2e5542d6aef4..0cf8cf200d52 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -12,6 +12,8 @@ struct socket; struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); int tap_ring_consume_batched(struct file *file, void **array, int n); +void tap_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)); #else #include #include @@ -28,6 +30,8 @@ static inline int tap_ring_consume_batched(struct file *f, { return 0; } +static inline void tap_ring_unconsume(struct file *file, void **batch, + int n, void (*destroy)(void *)) {} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index 5b41525ac007..bd954bb117e8 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -23,6 +23,8 @@ struct tun_msg_ctl { struct socket *tun_get_socket(struct file *); struct ptr_ring *tun_get_tx_ring(struct file *file); int tun_ring_consume_batched(struct file *file, void **array, int n); +void tun_ring_unconsume(struct file *file, void **batch, int n, + void (*destroy)(void *)); static inline bool tun_is_xdp_frame(void *ptr) { @@ -62,6 +64,9 @@ static inline int tun_ring_consume_batched(struct file *file, return 0; } +static inline void tun_ring_unconsume(struct file *file, void **batch, + int n, void (*destroy)(void *)) {} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Those wrappers are inspired by tun_get_tx_ring/tap_get_tx_ring and replace those methods. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/net/tap.c | 10 +++++----- drivers/net/tun.c | 10 +++++----- include/linux/if_tap.h | 5 +++++ include/linux/if_tun.h | 6 ++++++ 4 files changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 9720481f6d41..8d3e74330309 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1277,18 +1277,18 @@ void tap_ring_unconsume(struct file *file, void **batch, int n, } EXPORT_SYMBOL_GPL(tap_ring_unconsume); -struct ptr_ring *tap_get_ptr_ring(struct file *file) +bool is_tap_file(struct file *file) { struct tap_queue *q; if (file->f_op != &tap_fops) - return ERR_PTR(-EINVAL); + return false; q = file->private_data; if (!q) - return ERR_PTR(-EBADFD); - return &q->ring; + return false; + return true; } -EXPORT_SYMBOL_GPL(tap_get_ptr_ring); +EXPORT_SYMBOL_GPL(is_tap_file); int tap_queue_resize(struct tap_dev *tap) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 25b170e903e1..b0193b06fedc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3822,18 +3822,18 @@ void tun_ring_unconsume(struct file *file, void **batch, int n, } EXPORT_SYMBOL_GPL(tun_ring_unconsume); -struct ptr_ring *tun_get_tx_ring(struct file *file) +bool is_tun_file(struct file *file) { struct tun_file *tfile; if (file->f_op != &tun_fops) - return ERR_PTR(-EINVAL); + return false; tfile = file->private_data; if (!tfile) - return ERR_PTR(-EBADFD); - return &tfile->tx_ring; + return false; + return true; } -EXPORT_SYMBOL_GPL(tun_get_tx_ring); +EXPORT_SYMBOL_GPL(is_tun_file); module_init(tun_init); module_exit(tun_cleanup); diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 0cf8cf200d52..5bbcc8611bf5 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -14,6 +14,7 @@ struct ptr_ring *tap_get_ptr_ring(struct file *file); int tap_ring_consume_batched(struct file *file, void **array, int n); void tap_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); +bool is_tap_file(struct file *file); #else #include #include @@ -32,6 +33,10 @@ static inline int tap_ring_consume_batched(struct file *f, } static inline void tap_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)) {} +static inline bool is_tap_file(struct file *f) +{ + return false; +} #endif /* CONFIG_TAP */ /* diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bd954bb117e8..869d61898e60 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -25,6 +25,7 @@ struct ptr_ring *tun_get_tx_ring(struct file *file); int tun_ring_consume_batched(struct file *file, void **array, int n); void tun_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)); +bool is_tun_file(struct file *file); static inline bool tun_is_xdp_frame(void *ptr) { @@ -67,6 +68,11 @@ static inline int tun_ring_consume_batched(struct file *file, static inline void tun_ring_unconsume(struct file *file, void **batch, int n, void (*destroy)(void *)) {} +static inline bool is_tun_file(struct file *f) +{ + return false; +} + static inline bool tun_is_xdp_frame(void *ptr) { return false; -- 2.43.0 Instead of the rx_ring, the virtqueue saves the interface type TUN, TAP (or IF_NONE) to call TUN/TAP wrappers. Co-developed-by: Tim Gebauer Signed-off-by: Tim Gebauer Signed-off-by: Simon Schippers --- drivers/vhost/net.c | 90 +++++++++++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 32 deletions(-) diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c6508fe0d5c8..6be17b53cc6c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -127,10 +127,10 @@ struct vhost_net_virtqueue { /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; - struct ptr_ring *rx_ring; struct vhost_net_buf rxq; /* Batched XDP buffs */ struct xdp_buff *xdp; + enum if_type {IF_NONE = 0, TUN, TAP} type; }; struct vhost_net { @@ -176,24 +176,54 @@ static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) return ret; } -static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) +static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq, + struct sock *sk) { + struct file *file = sk->sk_socket->file; struct vhost_net_buf *rxq = &nvq->rxq; rxq->head = 0; - rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, - VHOST_NET_BATCH); + + switch (nvq->type) { + case TUN: + rxq->tail = tun_ring_consume_batched(file, + rxq->queue, VHOST_NET_BATCH); + break; + case TAP: + rxq->tail = tap_ring_consume_batched(file, + rxq->queue, VHOST_NET_BATCH); + break; + case IF_NONE: + WARN_ON_ONCE(); + } + return rxq->tail; } -static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) +static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq, + struct socket *sk) { struct vhost_net_buf *rxq = &nvq->rxq; - - if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { - ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, - vhost_net_buf_get_size(rxq), - tun_ptr_free); + struct file *file; + + if (sk && !vhost_net_buf_is_empty(rxq)) { + file = sk->file; + switch (nvq->type) { + case TUN: + tun_ring_unconsume(file, + rxq->queue + rxq->head, + vhost_net_buf_get_size(rxq), + tun_ptr_free); + break; + case TAP: + tap_ring_unconsume(file, + rxq->queue + rxq->head, + vhost_net_buf_get_size(rxq), + tun_ptr_free); + break; + case IF_NONE: + return; + } rxq->head = rxq->tail = 0; } } @@ -209,14 +239,15 @@ static int vhost_net_buf_peek_len(void *ptr) return __skb_array_len_with_tag(ptr); } -static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) +static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq, + struct sock *sk) { struct vhost_net_buf *rxq = &nvq->rxq; if (!vhost_net_buf_is_empty(rxq)) goto out; - if (!vhost_net_buf_produce(nvq)) + if (!vhost_net_buf_produce(nvq, sk)) return 0; out: @@ -998,8 +1029,8 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) int len = 0; unsigned long flags; - if (rvq->rx_ring) - return vhost_net_buf_peek(rvq); + if (rvq->type) + return vhost_net_buf_peek(rvq, sk); spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); @@ -1207,7 +1238,7 @@ static void handle_rx(struct vhost_net *net) goto out; } busyloop_intr = false; - if (nvq->rx_ring) + if (nvq->type) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { @@ -1363,7 +1394,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) n->vqs[i].batched_xdp = 0; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; - n->vqs[i].rx_ring = NULL; + n->vqs[i].type = IF_NONE; vhost_net_buf_init(&n->vqs[i].rxq); } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, @@ -1393,8 +1424,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, sock = vhost_vq_get_backend(vq); vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, NULL); - vhost_net_buf_unproduce(nvq); - nvq->rx_ring = NULL; + vhost_net_buf_unproduce(nvq, sock); + nvq->type = IF_NONE; mutex_unlock(&vq->mutex); return sock; } @@ -1474,18 +1505,13 @@ static struct socket *get_raw_socket(int fd) return ERR_PTR(r); } -static struct ptr_ring *get_tap_ptr_ring(struct file *file) +static enum if_type get_if_type(struct file *file) { - struct ptr_ring *ring; - ring = tun_get_tx_ring(file); - if (!IS_ERR(ring)) - goto out; - ring = tap_get_ptr_ring(file); - if (!IS_ERR(ring)) - goto out; - ring = NULL; -out: - return ring; + if (is_tap_file(file)) + return TAP; + if (is_tun_file(file)) + return TUN; + return IF_NONE; } static struct socket *get_tap_socket(int fd) @@ -1567,7 +1593,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, sock); - vhost_net_buf_unproduce(nvq); + vhost_net_buf_unproduce(nvq, sock); r = vhost_vq_init_access(vq); if (r) goto err_used; @@ -1576,9 +1602,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) goto err_used; if (index == VHOST_NET_VQ_RX) { if (sock) - nvq->rx_ring = get_tap_ptr_ring(sock->file); + nvq->type = get_if_type(sock->file); else - nvq->rx_ring = NULL; + nvq->type = IF_NONE; } oldubufs = nvq->ubufs; -- 2.43.0