From: Michal Kubiak Implement loading/removing XDP program using .ndo_bpf callback in the split queue mode. Reconfigure and restart the queues if needed (!!old_prog != !!new_prog), otherwise, just update the pointers. Signed-off-by: Michal Kubiak Signed-off-by: Alexander Lobakin --- drivers/net/ethernet/intel/idpf/idpf_txrx.h | 4 +- drivers/net/ethernet/intel/idpf/xdp.h | 7 ++ drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 + drivers/net/ethernet/intel/idpf/idpf_txrx.c | 4 + drivers/net/ethernet/intel/idpf/xdp.c | 98 +++++++++++++++++++++ 5 files changed, 113 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 6bc204b68d9e..f898a9c8de1d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -467,6 +467,7 @@ struct idpf_tx_queue_stats { * @desc_ring: virtual descriptor ring address * @bufq_sets: Pointer to the array of buffer queues in splitq mode * @napi: NAPI instance corresponding to this queue (splitq) + * @xdp_prog: attached XDP program * @rx_buf: See struct &libeth_fqe * @pp: Page pool pointer in singleq mode * @tail: Tail offset. Used for both queue models single and split. @@ -508,13 +509,14 @@ struct idpf_rx_queue { struct { struct idpf_bufq_set *bufq_sets; struct napi_struct *napi; + struct bpf_prog __rcu *xdp_prog; }; struct { struct libeth_fqe *rx_buf; struct page_pool *pp; + void __iomem *tail; }; }; - void __iomem *tail; DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); u16 idx; diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h index cf6823b24ba5..47553ce5f81a 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.h +++ b/drivers/net/ethernet/intel/idpf/xdp.h @@ -6,12 +6,19 @@ #include +struct bpf_prog; struct idpf_vport; +struct net_device; +struct netdev_bpf; int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport); void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport); +void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, + struct bpf_prog *xdp_prog); int idpf_xdpsqs_get(const struct idpf_vport *vport); void idpf_xdpsqs_put(const struct idpf_vport *vport); +int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp); + #endif /* _IDPF_XDP_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 21caa3e2ce6f..1d96947e4091 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2519,4 +2519,5 @@ static const struct net_device_ops idpf_netdev_ops = { .ndo_tx_timeout = idpf_tx_timeout, .ndo_hwtstamp_get = idpf_hwtstamp_get, .ndo_hwtstamp_set = idpf_hwtstamp_set, + .ndo_bpf = idpf_xdp, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 18294fc93176..7224f92624cb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -741,6 +741,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) bool split = idpf_is_queue_model_split(vport->rxq_model); int i, j, err; + idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog); + for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; u32 truesize = 0; @@ -1019,6 +1021,8 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) */ void idpf_vport_queues_rel(struct idpf_vport *vport) { + idpf_xdp_copy_prog_to_rqs(vport, NULL); + idpf_tx_desc_rel_all(vport); idpf_rx_desc_rel_all(vport); diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 08d63462dca4..09e84fe80d4e 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -4,6 +4,7 @@ #include #include "idpf.h" +#include "idpf_virtchnl.h" #include "xdp.h" static int idpf_rxq_for_each(const struct idpf_vport *vport, @@ -91,6 +92,28 @@ void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) (void *)(size_t)vport->rxq_model); } +static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg) +{ + struct mutex *lock = &rxq->q_vector->vport->adapter->vport_ctrl_lock; + struct bpf_prog *prog = arg; + struct bpf_prog *old; + + if (prog) + bpf_prog_inc(prog); + + old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_is_held(lock)); + if (old) + bpf_prog_put(old); + + return 0; +} + +void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport, + struct bpf_prog *xdp_prog) +{ + idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog); +} + int idpf_xdpsqs_get(const struct idpf_vport *vport) { struct libeth_xdpsq_timer **timers __free(kvfree) = NULL; @@ -163,3 +186,78 @@ void idpf_xdpsqs_put(const struct idpf_vport *vport) idpf_queue_clear(NOIRQ, xdpsq); } } + +static int idpf_xdp_setup_prog(struct idpf_vport *vport, + const struct netdev_bpf *xdp) +{ + const struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + struct bpf_prog *old, *prog = xdp->prog; + struct idpf_vport_config *cfg; + int ret; + + cfg = vport->adapter->vport_config[vport->idx]; + + if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) || + !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) || + !!vport->xdp_prog == !!prog) { + if (np->state == __IDPF_VPORT_UP) + idpf_xdp_copy_prog_to_rqs(vport, prog); + + old = xchg(&vport->xdp_prog, prog); + if (old) + bpf_prog_put(old); + + cfg->user_config.xdp_prog = prog; + + return 0; + } + + if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "No Tx queues available for XDP, please decrease the number of regular SQs"); + return -ENOSPC; + } + + old = cfg->user_config.xdp_prog; + cfg->user_config.xdp_prog = prog; + + ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); + if (ret) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "Could not reopen the vport after XDP setup"); + + cfg->user_config.xdp_prog = old; + old = prog; + } + + if (old) + bpf_prog_put(old); + + return ret; +} + +int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct idpf_vport *vport; + int ret; + + idpf_vport_ctrl_lock(dev); + vport = idpf_netdev_to_vport(dev); + + if (!idpf_is_queue_model_split(vport->txq_model)) + goto notsupp; + + switch (xdp->command) { + case XDP_SETUP_PROG: + ret = idpf_xdp_setup_prog(vport, xdp); + break; + default: +notsupp: + ret = -EOPNOTSUPP; + break; + } + + idpf_vport_ctrl_unlock(dev); + + return ret; +} -- 2.50.1