Currently non-zc xsk rx path for multi-buffer case checks twice if xsk rx queue has enough space for producing descriptors: 1. if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) { xs->rx_queue_full++; return -ENOBUFS; } 2. __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); -> err = xskq_prod_reserve_desc(xs->rx, addr, len, flags); -> if (xskq_prod_is_full(q)) Second part is redundant as in 1. we already peeked onto rx queue and checked that there is enough space to produce given amount of descriptors. Provide helper functions that will skip it and therefore optimize code. Signed-off-by: Maciej Fijalkowski --- net/xdp/xsk.c | 14 +++++++++++++- net/xdp/xsk_queue.h | 16 +++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f093c3453f64..aaadc13649e1 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -160,6 +160,17 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len, return 0; } +static void __xsk_rcv_zc_safe(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, + u32 len, u32 flags) +{ + u64 addr; + + addr = xp_get_handle(xskb, xskb->pool); + __xskq_prod_reserve_desc(xs->rx, addr, len, flags); + + xp_release(xskb); +} + static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); @@ -292,7 +303,8 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) rem -= copied; xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp); - __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0); + __xsk_rcv_zc_safe(xs, xskb, copied - meta_len, + rem ? XDP_PKT_CONTD : 0); meta_len = 0; } while (rem); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 1eb8d9f8b104..4f764b5748d2 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -440,20 +440,26 @@ static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_de q->cached_prod = cached_prod; } -static inline int xskq_prod_reserve_desc(struct xsk_queue *q, - u64 addr, u32 len, u32 flags) +static inline void __xskq_prod_reserve_desc(struct xsk_queue *q, + u64 addr, u32 len, u32 flags) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; u32 idx; - if (xskq_prod_is_full(q)) - return -ENOBUFS; - /* A, matches D */ idx = q->cached_prod++ & q->ring_mask; ring->desc[idx].addr = addr; ring->desc[idx].len = len; ring->desc[idx].options = flags; +} + +static inline int xskq_prod_reserve_desc(struct xsk_queue *q, + u64 addr, u32 len, u32 flags) +{ + if (xskq_prod_is_full(q)) + return -ENOBUFS; + + __xskq_prod_reserve_desc(q, addr, len, flags); return 0; } -- 2.43.0