Implement following ethtool callback function: .get_ringparam .set_ringparam These callbacks allow users to utilize ethtool for detailed queue depth configuration and monitoring. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- .../ethernet/huawei/hinic3/hinic3_ethtool.c | 101 +++++++++++++++++ .../net/ethernet/huawei/hinic3/hinic3_irq.c | 10 +- .../net/ethernet/huawei/hinic3/hinic3_main.c | 11 ++ .../huawei/hinic3/hinic3_netdev_ops.c | 103 +++++++++++++++++- .../ethernet/huawei/hinic3/hinic3_nic_dev.h | 16 +++ .../ethernet/huawei/hinic3/hinic3_nic_io.h | 4 + 6 files changed, 240 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c index 90fc16288de9..e47c3f43e7b9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c @@ -409,6 +409,105 @@ hinic3_get_link_ksettings(struct net_device *netdev, return 0; } +static void hinic3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + ring->rx_max_pending = HINIC3_MAX_RX_QUEUE_DEPTH; + ring->tx_max_pending = HINIC3_MAX_TX_QUEUE_DEPTH; + ring->rx_pending = nic_dev->rxqs[0].q_depth; + ring->tx_pending = nic_dev->txqs[0].q_depth; +} + +static void hinic3_update_qp_depth(struct net_device *netdev, + u32 sq_depth, u32 rq_depth) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 i; + + nic_dev->q_params.sq_depth = sq_depth; + nic_dev->q_params.rq_depth = rq_depth; + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->txqs[i].q_depth = sq_depth; + nic_dev->txqs[i].q_mask = sq_depth - 1; + nic_dev->rxqs[i].q_depth = rq_depth; + nic_dev->rxqs[i].q_mask = rq_depth - 1; + } +} + +static int hinic3_check_ringparam_valid(struct net_device *netdev, + const struct ethtool_ringparam *ring) +{ + if (ring->rx_jumbo_pending || ring->rx_mini_pending) { + netdev_err(netdev, "Unsupported rx_jumbo_pending/rx_mini_pending\n"); + return -EINVAL; + } + + if (ring->tx_pending > HINIC3_MAX_TX_QUEUE_DEPTH || + ring->tx_pending < HINIC3_MIN_QUEUE_DEPTH || + ring->rx_pending > HINIC3_MAX_RX_QUEUE_DEPTH || + ring->rx_pending < HINIC3_MIN_QUEUE_DEPTH) { + netdev_err(netdev, + "Queue depth out of range tx[%d-%d] rx[%d-%d]\n", + HINIC3_MIN_QUEUE_DEPTH, HINIC3_MAX_TX_QUEUE_DEPTH, + HINIC3_MIN_QUEUE_DEPTH, HINIC3_MAX_RX_QUEUE_DEPTH); + return -EINVAL; + } + + return 0; +} + +static int hinic3_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_txrxq_params q_params = {}; + u32 new_sq_depth, new_rq_depth; + int err; + + err = hinic3_check_ringparam_valid(netdev, ring); + if (err) + return err; + + new_sq_depth = 1U << ilog2(ring->tx_pending); + new_rq_depth = 1U << ilog2(ring->rx_pending); + if (new_sq_depth == nic_dev->q_params.sq_depth && + new_rq_depth == nic_dev->q_params.rq_depth) + return 0; + + if (new_sq_depth != ring->tx_pending) + netdev_info(netdev, "Requested Tx depth trimmed to %d\n", + new_sq_depth); + if (new_rq_depth != ring->rx_pending) + netdev_info(netdev, "Requested Rx depth trimmed to %d\n", + new_rq_depth); + + netdev_info(netdev, "Change Tx/Rx ring depth from %u/%u to %u/%u\n", + nic_dev->q_params.sq_depth, nic_dev->q_params.rq_depth, + new_sq_depth, new_rq_depth); + + if (!netif_running(netdev)) { + hinic3_update_qp_depth(netdev, new_sq_depth, new_rq_depth); + } else { + q_params = nic_dev->q_params; + q_params.sq_depth = new_sq_depth; + q_params.rq_depth = new_rq_depth; + + err = hinic3_change_channel_settings(netdev, &q_params); + if (err) { + netdev_err(netdev, "Failed to change channel settings\n"); + return err; + } + } + + return 0; +} + static const struct ethtool_ops hinic3_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_PKT_RATE_RX_USECS, @@ -417,6 +516,8 @@ static const struct ethtool_ops hinic3_ethtool_ops = { .get_msglevel = hinic3_get_msglevel, .set_msglevel = hinic3_set_msglevel, .get_link = ethtool_op_get_link, + .get_ringparam = hinic3_get_ringparam, + .set_ringparam = hinic3_set_ringparam, }; void hinic3_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c index e7d6c2033b45..d3b3927b5408 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -135,10 +135,16 @@ static int hinic3_set_interrupt_moder(struct net_device *netdev, u16 q_id, { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); struct hinic3_interrupt_info info = {}; + unsigned long flags; int err; - if (q_id >= nic_dev->q_params.num_qps) + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + + if (!HINIC3_CHANNEL_RES_VALID(nic_dev) || + q_id >= nic_dev->q_params.num_qps) { + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); return 0; + } info.interrupt_coalesc_set = 1; info.coalesc_timer_cfg = coalesc_timer_cfg; @@ -147,6 +153,8 @@ static int hinic3_set_interrupt_moder(struct net_device *netdev, u16 q_id, info.resend_timer_cfg = nic_dev->intr_coalesce[q_id].resend_timer_cfg; + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info); if (err) { netdev_err(netdev, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c index 0a888fe4c975..3b470978714a 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -179,6 +179,8 @@ static int hinic3_sw_init(struct net_device *netdev) int err; mutex_init(&nic_dev->port_state_mutex); + mutex_init(&nic_dev->channel_cfg_lock); + spin_lock_init(&nic_dev->channel_res_lock); nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; @@ -314,6 +316,15 @@ static void hinic3_link_status_change(struct net_device *netdev, bool link_status_up) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + unsigned long flags; + bool valid; + + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + valid = HINIC3_CHANNEL_RES_VALID(nic_dev); + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + if (!valid) + return; if (link_status_up) { if (netif_carrier_ok(netdev)) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c index da73811641a9..cec501a9dd43 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -428,6 +428,84 @@ static void hinic3_vport_down(struct net_device *netdev) } } +int +hinic3_change_channel_settings(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *trxq_params) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dyna_txrxq_params old_qp_params = {}; + struct hinic3_dyna_qp_params new_qp_params = {}; + struct hinic3_dyna_qp_params cur_qp_params = {}; + bool need_teardown = false; + unsigned long flags; + int err; + + mutex_lock(&nic_dev->channel_cfg_lock); + + hinic3_config_num_qps(netdev, trxq_params); + + err = hinic3_alloc_channel_resources(netdev, &new_qp_params, + trxq_params); + if (err) { + netdev_err(netdev, "Failed to alloc channel resources\n"); + mutex_unlock(&nic_dev->channel_cfg_lock); + return err; + } + + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + if (!test_and_set_bit(HINIC3_CHANGE_RES_INVALID, &nic_dev->flags)) + need_teardown = true; + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + if (need_teardown) { + hinic3_vport_down(netdev); + hinic3_close_channel(netdev); + hinic3_uninit_qps(nic_dev, &cur_qp_params); + hinic3_free_channel_resources(netdev, &cur_qp_params, + &nic_dev->q_params); + } + + if (nic_dev->num_qp_irq > trxq_params->num_qps) + hinic3_qp_irq_change(netdev, trxq_params->num_qps); + + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + old_qp_params = nic_dev->q_params; + nic_dev->q_params = *trxq_params; + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + hinic3_init_qps(nic_dev, &new_qp_params); + + err = hinic3_open_channel(netdev); + if (err) + goto err_uninit_qps; + + err = hinic3_vport_up(netdev); + if (err) + goto err_close_channel; + + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + clear_bit(HINIC3_CHANGE_RES_INVALID, &nic_dev->flags); + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + mutex_unlock(&nic_dev->channel_cfg_lock); + + return 0; + +err_close_channel: + hinic3_close_channel(netdev); +err_uninit_qps: + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + nic_dev->q_params = old_qp_params; + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + hinic3_uninit_qps(nic_dev, &new_qp_params); + hinic3_free_channel_resources(netdev, &new_qp_params, trxq_params); + + mutex_unlock(&nic_dev->channel_cfg_lock); + + return err; +} + static int hinic3_open(struct net_device *netdev) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); @@ -487,16 +565,33 @@ static int hinic3_close(struct net_device *netdev) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); struct hinic3_dyna_qp_params qp_params; + bool need_teardown = false; + unsigned long flags; if (!test_and_clear_bit(HINIC3_INTF_UP, &nic_dev->flags)) { netdev_dbg(netdev, "Netdev already close, do nothing\n"); return 0; } - hinic3_vport_down(netdev); - hinic3_close_channel(netdev); - hinic3_uninit_qps(nic_dev, &qp_params); - hinic3_free_channel_resources(netdev, &qp_params, &nic_dev->q_params); + mutex_lock(&nic_dev->channel_cfg_lock); + + spin_lock_irqsave(&nic_dev->channel_res_lock, flags); + if (!test_and_set_bit(HINIC3_CHANGE_RES_INVALID, &nic_dev->flags)) + need_teardown = true; + spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); + + if (need_teardown) { + hinic3_vport_down(netdev); + hinic3_close_channel(netdev); + hinic3_uninit_qps(nic_dev, &qp_params); + hinic3_free_channel_resources(netdev, &qp_params, + &nic_dev->q_params); + } + + hinic3_free_nicio_res(nic_dev); + hinic3_destroy_num_qps(netdev); + + mutex_unlock(&nic_dev->channel_cfg_lock); return 0; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h index 9502293ff710..55b280888ad8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -10,6 +10,9 @@ #include "hinic3_hw_cfg.h" #include "hinic3_hwdev.h" #include "hinic3_mgmt_interface.h" +#include "hinic3_nic_io.h" +#include "hinic3_tx.h" +#include "hinic3_rx.h" #define HINIC3_VLAN_BITMAP_BYTE_SIZE(nic_dev) (sizeof(*(nic_dev)->vlan_bitmap)) #define HINIC3_VLAN_BITMAP_SIZE(nic_dev) \ @@ -20,8 +23,13 @@ enum hinic3_flags { HINIC3_MAC_FILTER_CHANGED, HINIC3_RSS_ENABLE, HINIC3_UPDATE_MAC_FILTER, + HINIC3_CHANGE_RES_INVALID, }; +#define HINIC3_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(HINIC3_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(HINIC3_CHANGE_RES_INVALID, &(nic_dev)->flags)) + enum hinic3_event_work_flags { HINIC3_EVENT_WORK_TX_TIMEOUT, }; @@ -129,6 +137,10 @@ struct hinic3_nic_dev { struct work_struct rx_mode_work; /* lock for enable/disable port */ struct mutex port_state_mutex; + /* lock for channel configuration */ + struct mutex channel_cfg_lock; + /* lock for channel resources */ + spinlock_t channel_res_lock; struct list_head uc_filter_list; struct list_head mc_filter_list; @@ -143,6 +155,10 @@ struct hinic3_nic_dev { void hinic3_set_netdev_ops(struct net_device *netdev); int hinic3_set_hw_features(struct net_device *netdev); +int +hinic3_change_channel_settings(struct net_device *netdev, + struct hinic3_dyna_txrxq_params *trxq_params); + int hinic3_qps_irq_init(struct net_device *netdev); void hinic3_qps_irq_uninit(struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h index 12eefabcf1db..3791b9bc865b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -14,6 +14,10 @@ struct hinic3_nic_dev; #define HINIC3_RQ_WQEBB_SHIFT 3 #define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT) +#define HINIC3_MAX_TX_QUEUE_DEPTH 65536 +#define HINIC3_MAX_RX_QUEUE_DEPTH 16384 +#define HINIC3_MIN_QUEUE_DEPTH 128 + /* ******************** RQ_CTRL ******************** */ enum hinic3_rq_wqe_type { HINIC3_NORMAL_RQ_WQE = 1, -- 2.43.0 Add PF/VF statistics functions in TX and RX processing. Implement following ethtool callback function: .get_sset_count .get_ethtool_stats .get_strings .get_eth_phy_stats .get_eth_mac_stats .get_eth_ctrl_stats .get_rmon_stats .get_pause_stats These callbacks allow users to utilize ethtool for detailed TX and RX netdev stats monitoring. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- .../ethernet/huawei/hinic3/hinic3_ethtool.c | 485 ++++++++++++++++++ .../ethernet/huawei/hinic3/hinic3_hw_intf.h | 13 +- .../huawei/hinic3/hinic3_mgmt_interface.h | 37 ++ .../ethernet/huawei/hinic3/hinic3_nic_cfg.c | 64 +++ .../ethernet/huawei/hinic3/hinic3_nic_cfg.h | 109 ++++ .../net/ethernet/huawei/hinic3/hinic3_rx.c | 59 ++- .../net/ethernet/huawei/hinic3/hinic3_rx.h | 15 +- .../net/ethernet/huawei/hinic3/hinic3_tx.c | 71 ++- .../net/ethernet/huawei/hinic3/hinic3_tx.h | 2 + 9 files changed, 845 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c index e47c3f43e7b9..be26698fc658 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c @@ -508,6 +508,483 @@ static int hinic3_set_ringparam(struct net_device *netdev, return 0; } +struct hinic3_stats { + char name[ETH_GSTRING_LEN]; + u32 size; + int offset; +}; + +#define HINIC3_RXQ_STAT(_stat_item) { \ + .name = "rxq%d_"#_stat_item, \ + .size = sizeof_field(struct hinic3_rxq_stats, _stat_item), \ + .offset = offsetof(struct hinic3_rxq_stats, _stat_item) \ +} + +#define HINIC3_TXQ_STAT(_stat_item) { \ + .name = "txq%d_"#_stat_item, \ + .size = sizeof_field(struct hinic3_txq_stats, _stat_item), \ + .offset = offsetof(struct hinic3_txq_stats, _stat_item) \ +} + +static struct hinic3_stats hinic3_rx_queue_stats[] = { + HINIC3_RXQ_STAT(csum_errors), + HINIC3_RXQ_STAT(other_errors), + HINIC3_RXQ_STAT(rx_buf_empty), + HINIC3_RXQ_STAT(alloc_skb_err), + HINIC3_RXQ_STAT(alloc_rx_buf_err), +}; + +static struct hinic3_stats hinic3_tx_queue_stats[] = { + HINIC3_TXQ_STAT(busy), + HINIC3_TXQ_STAT(skb_pad_err), + HINIC3_TXQ_STAT(frag_len_overflow), + HINIC3_TXQ_STAT(offload_cow_skb_err), + HINIC3_TXQ_STAT(map_frag_err), + HINIC3_TXQ_STAT(unknown_tunnel_pkt), + HINIC3_TXQ_STAT(frag_size_err), +}; + +#define HINIC3_FUNC_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct l2nic_vport_stats, _stat_item), \ + .offset = offsetof(struct l2nic_vport_stats, _stat_item) \ +} + +static struct hinic3_stats hinic3_function_stats[] = { + HINIC3_FUNC_STAT(tx_unicast_pkts_vport), + HINIC3_FUNC_STAT(tx_unicast_bytes_vport), + HINIC3_FUNC_STAT(tx_multicast_pkts_vport), + HINIC3_FUNC_STAT(tx_multicast_bytes_vport), + HINIC3_FUNC_STAT(tx_broadcast_pkts_vport), + HINIC3_FUNC_STAT(tx_broadcast_bytes_vport), + + HINIC3_FUNC_STAT(rx_unicast_pkts_vport), + HINIC3_FUNC_STAT(rx_unicast_bytes_vport), + HINIC3_FUNC_STAT(rx_multicast_pkts_vport), + HINIC3_FUNC_STAT(rx_multicast_bytes_vport), + HINIC3_FUNC_STAT(rx_broadcast_pkts_vport), + HINIC3_FUNC_STAT(rx_broadcast_bytes_vport), + + HINIC3_FUNC_STAT(tx_discard_vport), + HINIC3_FUNC_STAT(rx_discard_vport), + HINIC3_FUNC_STAT(tx_err_vport), + HINIC3_FUNC_STAT(rx_err_vport), +}; + +#define HINIC3_PORT_STAT(_stat_item) { \ + .name = #_stat_item, \ + .size = sizeof_field(struct mag_cmd_port_stats, _stat_item), \ + .offset = offsetof(struct mag_cmd_port_stats, _stat_item) \ +} + +static struct hinic3_stats hinic3_port_stats[] = { + HINIC3_PORT_STAT(mac_tx_fragment_pkt_num), + HINIC3_PORT_STAT(mac_tx_undersize_pkt_num), + HINIC3_PORT_STAT(mac_tx_undermin_pkt_num), + HINIC3_PORT_STAT(mac_tx_1519_max_bad_pkt_num), + HINIC3_PORT_STAT(mac_tx_1519_max_good_pkt_num), + HINIC3_PORT_STAT(mac_tx_oversize_pkt_num), + HINIC3_PORT_STAT(mac_tx_jabber_pkt_num), + HINIC3_PORT_STAT(mac_tx_bad_pkt_num), + HINIC3_PORT_STAT(mac_tx_bad_oct_num), + HINIC3_PORT_STAT(mac_tx_good_oct_num), + HINIC3_PORT_STAT(mac_tx_total_pkt_num), + HINIC3_PORT_STAT(mac_tx_uni_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri0_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri1_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri2_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri3_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri4_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri5_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri6_pkt_num), + HINIC3_PORT_STAT(mac_tx_pfc_pri7_pkt_num), + HINIC3_PORT_STAT(mac_tx_err_all_pkt_num), + HINIC3_PORT_STAT(mac_tx_from_app_good_pkt_num), + HINIC3_PORT_STAT(mac_tx_from_app_bad_pkt_num), + + HINIC3_PORT_STAT(mac_rx_undermin_pkt_num), + HINIC3_PORT_STAT(mac_rx_1519_max_bad_pkt_num), + HINIC3_PORT_STAT(mac_rx_1519_max_good_pkt_num), + HINIC3_PORT_STAT(mac_rx_bad_pkt_num), + HINIC3_PORT_STAT(mac_rx_bad_oct_num), + HINIC3_PORT_STAT(mac_rx_good_oct_num), + HINIC3_PORT_STAT(mac_rx_total_pkt_num), + HINIC3_PORT_STAT(mac_rx_uni_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri0_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri1_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri2_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri3_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri4_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri5_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri6_pkt_num), + HINIC3_PORT_STAT(mac_rx_pfc_pri7_pkt_num), + HINIC3_PORT_STAT(mac_rx_send_app_good_pkt_num), + HINIC3_PORT_STAT(mac_rx_send_app_bad_pkt_num), + HINIC3_PORT_STAT(mac_rx_unfilter_pkt_num), +}; + +static int hinic3_get_sset_count(struct net_device *netdev, int sset) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int count, q_num; + + switch (sset) { + case ETH_SS_STATS: + q_num = nic_dev->q_params.num_qps; + count = ARRAY_SIZE(hinic3_function_stats) + + (ARRAY_SIZE(hinic3_tx_queue_stats) + + ARRAY_SIZE(hinic3_rx_queue_stats)) * + q_num; + + if (!HINIC3_IS_VF(nic_dev->hwdev)) + count += ARRAY_SIZE(hinic3_port_stats); + + return count; + default: + return -EOPNOTSUPP; + } +} + +static u64 get_val_of_ptr(u32 size, const void *ptr) +{ + u64 ret = size == sizeof(u64) ? *(u64 *)ptr : + size == sizeof(u32) ? *(u32 *)ptr : + size == sizeof(u16) ? *(u16 *)ptr : + *(u8 *)ptr; + + return ret; +} + +static void hinic3_get_drv_queue_stats(struct net_device *netdev, u64 *data) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_txq_stats txq_stats = {}; + struct hinic3_rxq_stats rxq_stats = {}; + u16 i = 0, j, qid; + char *p; + + u64_stats_init(&txq_stats.syncp); + u64_stats_init(&rxq_stats.syncp); + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->txqs) + break; + + hinic3_txq_get_stats(&nic_dev->txqs[qid], &txq_stats); + for (j = 0; j < ARRAY_SIZE(hinic3_tx_queue_stats); j++, i++) { + p = (char *)&txq_stats + + hinic3_tx_queue_stats[j].offset; + data[i] = get_val_of_ptr(hinic3_tx_queue_stats[j].size, + p); + } + } + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + if (!nic_dev->rxqs) + break; + + hinic3_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats); + for (j = 0; j < ARRAY_SIZE(hinic3_rx_queue_stats); j++, i++) { + p = (char *)&rxq_stats + + hinic3_rx_queue_stats[j].offset; + data[i] = get_val_of_ptr(hinic3_rx_queue_stats[j].size, + p); + } + } +} + +static u16 hinic3_get_ethtool_port_stats(struct net_device *netdev, u64 *data) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + u16 i = 0, j; + char *p; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + goto err_zero_stats; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get port stats from fw\n"); + goto err_zero_stats; + } + + for (j = 0; j < ARRAY_SIZE(hinic3_port_stats); j++, i++) { + p = (char *)ps + hinic3_port_stats[j].offset; + data[i] = get_val_of_ptr(hinic3_port_stats[j].size, p); + } + + kfree(ps); + + return i; + +err_zero_stats: + memset(&data[i], 0, ARRAY_SIZE(hinic3_port_stats) * sizeof(*data)); + + return i + ARRAY_SIZE(hinic3_port_stats); +} + +static void hinic3_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct l2nic_vport_stats vport_stats = {}; + u16 i = 0, j; + char *p; + int err; + + err = hinic3_get_vport_stats(nic_dev->hwdev, + hinic3_global_func_id(nic_dev->hwdev), + &vport_stats); + if (err) + netdev_err(netdev, "Failed to get function stats from fw\n"); + + for (j = 0; j < ARRAY_SIZE(hinic3_function_stats); j++, i++) { + p = (char *)&vport_stats + hinic3_function_stats[j].offset; + data[i] = get_val_of_ptr(hinic3_function_stats[j].size, p); + } + + if (!HINIC3_IS_VF(nic_dev->hwdev)) + i += hinic3_get_ethtool_port_stats(netdev, data + i); + + hinic3_get_drv_queue_stats(netdev, data + i); +} + +static u16 hinic3_get_hw_stats_strings(struct net_device *netdev, char *p) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 i, cnt = 0; + + for (i = 0; i < ARRAY_SIZE(hinic3_function_stats); i++) { + memcpy(p, hinic3_function_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + + if (!HINIC3_IS_VF(nic_dev->hwdev)) { + for (i = 0; i < ARRAY_SIZE(hinic3_port_stats); i++) { + memcpy(p, hinic3_port_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + cnt++; + } + } + + return cnt; +} + +static void hinic3_get_qp_stats_strings(struct net_device *netdev, char *p) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u8 *data = p; + u16 i, j; + + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_SIZE(hinic3_tx_queue_stats); j++) + ethtool_sprintf(&data, + hinic3_tx_queue_stats[j].name, i); + } + + for (i = 0; i < nic_dev->q_params.num_qps; i++) { + for (j = 0; j < ARRAY_SIZE(hinic3_rx_queue_stats); j++) + ethtool_sprintf(&data, + hinic3_rx_queue_stats[j].name, i); + } +} + +static void hinic3_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + char *p = (char *)data; + u16 offset; + + switch (stringset) { + case ETH_SS_STATS: + offset = hinic3_get_hw_stats_strings(netdev, p); + hinic3_get_qp_stats_strings(netdev, + p + offset * ETH_GSTRING_LEN); + + return; + default: + netdev_err(netdev, "Invalid string set %u.\n", stringset); + return; + } +} + +static void hinic3_get_eth_phy_stats(struct net_device *netdev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + return; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get eth phy stats from fw\n"); + return; + } + + phy_stats->SymbolErrorDuringCarrier = ps->mac_rx_sym_err_pkt_num; + + kfree(ps); +} + +static void hinic3_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + return; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get eth mac stats from fw\n"); + return; + } + + mac_stats->FramesTransmittedOK = ps->mac_tx_good_pkt_num; + mac_stats->FramesReceivedOK = ps->mac_rx_good_pkt_num; + mac_stats->FrameCheckSequenceErrors = ps->mac_rx_fcs_err_pkt_num; + mac_stats->OctetsTransmittedOK = ps->mac_tx_total_oct_num; + mac_stats->OctetsReceivedOK = ps->mac_rx_total_oct_num; + mac_stats->MulticastFramesXmittedOK = ps->mac_tx_multi_pkt_num; + mac_stats->BroadcastFramesXmittedOK = ps->mac_tx_broad_pkt_num; + mac_stats->MulticastFramesReceivedOK = ps->mac_rx_multi_pkt_num; + mac_stats->BroadcastFramesReceivedOK = ps->mac_rx_broad_pkt_num; + + kfree(ps); +} + +static void hinic3_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + return; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get eth ctrl stats from fw\n"); + return; + } + + ctrl_stats->MACControlFramesTransmitted = ps->mac_tx_control_pkt_num; + ctrl_stats->MACControlFramesReceived = ps->mac_rx_control_pkt_num; + + kfree(ps); +} + +static const struct ethtool_rmon_hist_range hinic3_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 8191 }, + { 8192, 9216 }, + { 9217, 12287 }, + {} +}; + +static void hinic3_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + return; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get eth rmon stats from fw\n"); + return; + } + + rmon_stats->undersize_pkts = ps->mac_rx_undersize_pkt_num; + rmon_stats->oversize_pkts = ps->mac_rx_oversize_pkt_num; + rmon_stats->fragments = ps->mac_rx_fragment_pkt_num; + rmon_stats->jabbers = ps->mac_rx_jabber_pkt_num; + + rmon_stats->hist[0] = ps->mac_rx_64_oct_pkt_num; + rmon_stats->hist[1] = ps->mac_rx_65_127_oct_pkt_num; + rmon_stats->hist[2] = ps->mac_rx_128_255_oct_pkt_num; + rmon_stats->hist[3] = ps->mac_rx_256_511_oct_pkt_num; + rmon_stats->hist[4] = ps->mac_rx_512_1023_oct_pkt_num; + rmon_stats->hist[5] = ps->mac_rx_1024_1518_oct_pkt_num; + rmon_stats->hist[6] = ps->mac_rx_1519_2047_oct_pkt_num; + rmon_stats->hist[7] = ps->mac_rx_2048_4095_oct_pkt_num; + rmon_stats->hist[8] = ps->mac_rx_4096_8191_oct_pkt_num; + rmon_stats->hist[9] = ps->mac_rx_8192_9216_oct_pkt_num; + rmon_stats->hist[10] = ps->mac_rx_9217_12287_oct_pkt_num; + + rmon_stats->hist_tx[0] = ps->mac_tx_64_oct_pkt_num; + rmon_stats->hist_tx[1] = ps->mac_tx_65_127_oct_pkt_num; + rmon_stats->hist_tx[2] = ps->mac_tx_128_255_oct_pkt_num; + rmon_stats->hist_tx[3] = ps->mac_tx_256_511_oct_pkt_num; + rmon_stats->hist_tx[4] = ps->mac_tx_512_1023_oct_pkt_num; + rmon_stats->hist_tx[5] = ps->mac_tx_1024_1518_oct_pkt_num; + rmon_stats->hist_tx[6] = ps->mac_tx_1519_2047_oct_pkt_num; + rmon_stats->hist_tx[7] = ps->mac_tx_2048_4095_oct_pkt_num; + rmon_stats->hist_tx[8] = ps->mac_tx_4096_8191_oct_pkt_num; + rmon_stats->hist_tx[9] = ps->mac_tx_8192_9216_oct_pkt_num; + rmon_stats->hist_tx[10] = ps->mac_tx_9217_12287_oct_pkt_num; + + *ranges = hinic3_rmon_ranges; + + kfree(ps); +} + +static void hinic3_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *pause_stats) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct mag_cmd_port_stats *ps; + int err; + + ps = kmalloc_obj(*ps); + if (!ps) + return; + + err = hinic3_get_phy_port_stats(nic_dev->hwdev, ps); + if (err) { + kfree(ps); + netdev_err(netdev, "Failed to get eth pause stats from fw\n"); + return; + } + + pause_stats->tx_pause_frames = ps->mac_tx_pause_num; + pause_stats->rx_pause_frames = ps->mac_rx_pause_num; + + kfree(ps); +} + static const struct ethtool_ops hinic3_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_PKT_RATE_RX_USECS, @@ -518,6 +995,14 @@ static const struct ethtool_ops hinic3_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ringparam = hinic3_get_ringparam, .set_ringparam = hinic3_set_ringparam, + .get_sset_count = hinic3_get_sset_count, + .get_ethtool_stats = hinic3_get_ethtool_stats, + .get_strings = hinic3_get_strings, + .get_eth_phy_stats = hinic3_get_eth_phy_stats, + .get_eth_mac_stats = hinic3_get_eth_mac_stats, + .get_eth_ctrl_stats = hinic3_get_eth_ctrl_stats, + .get_rmon_stats = hinic3_get_rmon_stats, + .get_pause_stats = hinic3_get_pause_stats, }; void hinic3_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h index cfc9daa3034f..0b2ebef04c02 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h @@ -51,7 +51,18 @@ static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_para msg_params->in_size = buf_size; msg_params->expected_out_size = buf_size; msg_params->timeout_ms = 0; -} +}; + +static inline void +mgmt_msg_params_init_in_out(struct mgmt_msg_params *msg_params, void *in_buf, + void *out_buf, u32 in_buf_size, u32 out_buf_size) +{ + msg_params->buf_in = in_buf; + msg_params->buf_out = out_buf; + msg_params->in_size = in_buf_size; + msg_params->expected_out_size = out_buf_size; + msg_params->timeout_ms = 0; +}; enum cfg_cmd { CFG_CMD_GET_DEV_CAP = 0, diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h index c5bca3c4af96..76c691f82703 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -143,6 +143,41 @@ struct l2nic_cmd_set_dcb_state { u8 rsvd[7]; }; +struct l2nic_port_stats_info { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd1; +}; + +struct l2nic_vport_stats { + u64 tx_unicast_pkts_vport; + u64 tx_unicast_bytes_vport; + u64 tx_multicast_pkts_vport; + u64 tx_multicast_bytes_vport; + u64 tx_broadcast_pkts_vport; + u64 tx_broadcast_bytes_vport; + + u64 rx_unicast_pkts_vport; + u64 rx_unicast_bytes_vport; + u64 rx_multicast_pkts_vport; + u64 rx_multicast_bytes_vport; + u64 rx_broadcast_pkts_vport; + u64 rx_broadcast_bytes_vport; + + u64 tx_discard_vport; + u64 rx_discard_vport; + u64 tx_err_vport; + u64 rx_err_vport; +}; + +struct l2nic_cmd_vport_stats { + struct mgmt_msg_head msg_head; + u32 stats_size; + u32 rsvd1; + struct l2nic_vport_stats stats; + u64 rsvd2[6]; +}; + struct l2nic_cmd_lro_config { struct mgmt_msg_head msg_head; u16 func_id; @@ -234,6 +269,7 @@ enum l2nic_cmd { L2NIC_CMD_SET_VPORT_ENABLE = 6, L2NIC_CMD_SET_RX_MODE = 7, L2NIC_CMD_SET_SQ_CI_ATTR = 8, + L2NIC_CMD_GET_VPORT_STAT = 9, L2NIC_CMD_CLEAR_QP_RESOURCE = 11, L2NIC_CMD_CFG_RX_LRO = 13, L2NIC_CMD_CFG_LRO_TIMER = 14, @@ -272,6 +308,7 @@ enum mag_cmd { MAG_CMD_SET_PORT_ENABLE = 6, MAG_CMD_GET_LINK_STATUS = 7, + MAG_CMD_GET_PORT_STAT = 151, MAG_CMD_GET_PORT_INFO = 153, }; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c index de5a7984d2cb..1b14dc824ce1 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -639,6 +639,42 @@ int hinic3_get_link_status(struct hinic3_hwdev *hwdev, bool *link_status_up) return 0; } +int hinic3_get_phy_port_stats(struct hinic3_hwdev *hwdev, + struct mag_cmd_port_stats *stats) +{ + struct mag_cmd_port_stats_info stats_info = {}; + struct mag_cmd_get_port_stat *ps; + struct mgmt_msg_params msg_params = {}; + int err; + + ps = kzalloc_obj(*ps); + if (!ps) + return -ENOMEM; + + stats_info.port_id = hinic3_physical_port_id(hwdev); + + mgmt_msg_params_init_in_out(&msg_params, &stats_info, ps, + sizeof(stats_info), sizeof(*ps)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_HILINK, + MAG_CMD_GET_PORT_STAT, &msg_params); + + if (err || ps->head.status) { + dev_err(hwdev->dev, + "Failed to get port statistics, err: %d, status: 0x%x\n", + err, ps->head.status); + err = -EFAULT; + goto out; + } + + memcpy(stats, &ps->counter, sizeof(*stats)); + +out: + kfree(ps); + + return err; +} + int hinic3_get_port_info(struct hinic3_hwdev *hwdev, struct hinic3_nic_port_info *port_info) { @@ -738,3 +774,31 @@ int hinic3_get_pause_info(struct hinic3_nic_dev *nic_dev, return hinic3_cfg_hw_pause(nic_dev->hwdev, MGMT_MSG_CMD_OP_GET, nic_pause); } + +int hinic3_get_vport_stats(struct hinic3_hwdev *hwdev, u16 func_id, + struct l2nic_vport_stats *stats) +{ + struct l2nic_cmd_vport_stats vport_stats = {}; + struct l2nic_port_stats_info stats_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + stats_info.func_id = func_id; + + mgmt_msg_params_init_in_out(&msg_params, &stats_info, &vport_stats, + sizeof(stats_info), sizeof(vport_stats)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_GET_VPORT_STAT, &msg_params); + + if (err || vport_stats.msg_head.status) { + dev_err(hwdev->dev, + "Failed to get function statistics, err: %d, status: 0x%x\n", + err, vport_stats.msg_head.status); + return -EFAULT; + } + + memcpy(stats, &vport_stats.stats, sizeof(*stats)); + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h index 5d52202a8d4e..80573c121539 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -129,6 +129,110 @@ struct mag_cmd_get_xsfp_present { u8 rsvd[2]; }; +struct mag_cmd_port_stats { + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_9216_oct_pkt_num; + u64 mac_tx_9217_12287_oct_pkt_num; + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_bad_pkt_num; + u64 mac_tx_1519_max_good_pkt_num; + u64 mac_tx_oversize_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_pause_num; + u64 mac_tx_pfc_pkt_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_tx_control_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_9216_oct_pkt_num; + u64 mac_rx_9217_12287_oct_pkt_num; + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_bad_pkt_num; + u64 mac_rx_1519_max_good_pkt_num; + u64 mac_rx_oversize_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_pause_num; + u64 mac_rx_pfc_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_rx_control_pkt_num; + u64 mac_rx_sym_err_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; + u64 mac_rx_unfilter_pkt_num; +}; + +struct mag_cmd_port_stats_info { + struct mgmt_msg_head head; + + u8 port_id; + u8 rsvd0[3]; +}; + +struct mag_cmd_get_port_stat { + struct mgmt_msg_head head; + + struct mag_cmd_port_stats counter; + u64 rsvd1[15]; +}; + enum link_err_type { LINK_ERR_MODULE_UNRECOGENIZED, LINK_ERR_NUM, @@ -209,6 +313,11 @@ int hinic3_get_port_info(struct hinic3_hwdev *hwdev, struct hinic3_nic_port_info *port_info); int hinic3_set_vport_enable(struct hinic3_hwdev *hwdev, u16 func_id, bool enable); +int hinic3_get_phy_port_stats(struct hinic3_hwdev *hwdev, + struct mag_cmd_port_stats *stats); +int hinic3_get_vport_stats(struct hinic3_hwdev *hwdev, u16 func_id, + struct l2nic_vport_stats *stats); + int hinic3_add_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id); int hinic3_del_vlan(struct hinic3_hwdev *hwdev, u16 vlan_id, u16 func_id); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c index 309ab5901379..7fadb88ff722 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -29,7 +29,7 @@ #define HINIC3_LRO_PKT_HDR_LEN_IPV4 66 #define HINIC3_LRO_PKT_HDR_LEN_IPV6 86 #define HINIC3_LRO_PKT_HDR_LEN(cqe) \ - (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \ + (RQ_CQE_OFFOLAD_TYPE_GET(le32_to_cpu((cqe)->offload_type), IP_TYPE) == \ HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \ HINIC3_LRO_PKT_HDR_LEN_IPV4) @@ -46,7 +46,6 @@ static void hinic3_rxq_clean_stats(struct hinic3_rxq_stats *rxq_stats) rxq_stats->alloc_skb_err = 0; rxq_stats->alloc_rx_buf_err = 0; - rxq_stats->restore_drop_sge = 0; u64_stats_update_end(&rxq_stats->syncp); } @@ -155,8 +154,12 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) err = rx_alloc_mapped_page(rxq->page_pool, rx_info, rxq->buf_len); - if (unlikely(err)) + if (unlikely(err)) { + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.alloc_rx_buf_err++; + u64_stats_update_end(&rxq->rxq_stats.syncp); break; + } dma_addr = page_pool_get_dma_addr(rx_info->page) + rx_info->page_offset; @@ -170,6 +173,10 @@ static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) rxq->next_to_update << HINIC3_NORMAL_RQ_WQE); rxq->delta -= i; rxq->next_to_alloc = rxq->next_to_update; + } else if (free_wqebbs == rxq->q_depth - 1) { + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.rx_buf_empty++; + u64_stats_update_end(&rxq->rxq_stats.syncp); } return i; @@ -330,11 +337,23 @@ static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type, struct net_device *netdev = rxq->netdev; bool l2_tunnel; + if (unlikely(csum_err == HINIC3_RX_CSUM_IPSU_OTHER_ERR)) { + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.other_errors++; + u64_stats_update_end(&rxq->rxq_stats.syncp); + } + if (!(netdev->features & NETIF_F_RXCSUM)) return; if (unlikely(csum_err)) { /* pkt type is recognized by HW, and csum is wrong */ + if (!(csum_err & (HINIC3_RX_CSUM_HW_CHECK_NONE | + HINIC3_RX_CSUM_IPSU_OTHER_ERR))) { + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.csum_errors++; + u64_stats_update_end(&rxq->rxq_stats.syncp); + } skb->ip_summed = CHECKSUM_NONE; return; } @@ -387,8 +406,12 @@ static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, u16 num_lro; skb = hinic3_fetch_rx_buffer(rxq, pkt_len); - if (unlikely(!skb)) + if (unlikely(!skb)) { + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.alloc_skb_err++; + u64_stats_update_end(&rxq->rxq_stats.syncp); return -ENOMEM; + } /* place header in linear portion of buffer */ if (skb_is_nonlinear(skb)) @@ -550,11 +573,28 @@ int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, return 0; } +void hinic3_rxq_get_stats(struct hinic3_rxq *rxq, + struct hinic3_rxq_stats *stats) +{ + struct hinic3_rxq_stats *rxq_stats = &rxq->rxq_stats; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + stats->csum_errors = rxq_stats->csum_errors; + stats->other_errors = rxq_stats->other_errors; + stats->rx_buf_empty = rxq_stats->rx_buf_empty; + stats->alloc_skb_err = rxq_stats->alloc_skb_err; + stats->alloc_rx_buf_err = rxq_stats->alloc_rx_buf_err; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); +} + int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) { struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); u32 sw_ci, status, pkt_len, vlan_len; struct hinic3_rq_cqe *rx_cqe; + u64 rx_bytes = 0; u32 num_wqe = 0; int nr_pkts = 0; u16 num_lro; @@ -574,10 +614,14 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) break; + rx_bytes += pkt_len; nr_pkts++; num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); - if (num_lro) + if (num_lro) { + rx_bytes += (num_lro - 1) * + HINIC3_LRO_PKT_HDR_LEN(rx_cqe); num_wqe += hinic3_get_sge_num(rxq, pkt_len); + } rx_cqe->status = 0; @@ -588,5 +632,10 @@ int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) if (rxq->delta >= HINIC3_RX_BUFFER_WRITE) hinic3_rx_fill_buffers(rxq); + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.packets += (u64)nr_pkts; + rxq->rxq_stats.bytes += rx_bytes; + u64_stats_update_end(&rxq->rxq_stats.syncp); + return nr_pkts; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index 06d1b3299e7c..c11d080408a7 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -8,6 +8,17 @@ #include #include +/* rx cqe checksum err */ +#define HINIC3_RX_CSUM_IP_CSUM_ERR BIT(0) +#define HINIC3_RX_CSUM_TCP_CSUM_ERR BIT(1) +#define HINIC3_RX_CSUM_UDP_CSUM_ERR BIT(2) +#define HINIC3_RX_CSUM_IGMP_CSUM_ERR BIT(3) +#define HINIC3_RX_CSUM_ICMPV4_CSUM_ERR BIT(4) +#define HINIC3_RX_CSUM_ICMPV6_CSUM_ERR BIT(5) +#define HINIC3_RX_CSUM_SCTP_CRC_ERR BIT(6) +#define HINIC3_RX_CSUM_HW_CHECK_NONE BIT(7) +#define HINIC3_RX_CSUM_IPSU_OTHER_ERR BIT(8) + #define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) #define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) #define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) @@ -39,7 +50,6 @@ struct hinic3_rxq_stats { u64 rx_buf_empty; u64 alloc_skb_err; u64 alloc_rx_buf_err; - u64 restore_drop_sge; struct u64_stats_sync syncp; }; @@ -123,6 +133,9 @@ void hinic3_free_rxqs_res(struct net_device *netdev, u16 num_rq, u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); int hinic3_configure_rxqs(struct net_device *netdev, u16 num_rq, u32 rq_depth, struct hinic3_dyna_rxq_res *rxqs_res); + +void hinic3_rxq_get_stats(struct hinic3_rxq *rxq, + struct hinic3_rxq_stats *stats); int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); #endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c index 9306bf0020ca..3fbbfa5d96b6 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -97,8 +97,12 @@ static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) + if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) { + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.map_frag_err++; + u64_stats_update_end(&txq->txq_stats.syncp); return -EFAULT; + } dma_info[0].len = skb_headlen(skb); @@ -117,6 +121,9 @@ static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(&pdev->dev, dma_info[idx].dma)) { + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.map_frag_err++; + u64_stats_update_end(&txq->txq_stats.syncp); err = -EFAULT; goto err_unmap_page; } @@ -260,6 +267,9 @@ static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, if (l4_proto != IPPROTO_UDP || ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_LE) { + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.unknown_tunnel_pkt++; + u64_stats_update_end(&txq->txq_stats.syncp); /* Unsupported tunnel packet, disable csum offload */ skb_checksum_help(skb); return 0; @@ -433,6 +443,27 @@ static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, return offload; } +static void hinic3_get_pkt_stats(struct hinic3_txq *txq, struct sk_buff *skb) +{ + u32 hdr_len, tx_bytes; + unsigned short pkts; + + if (skb_is_gso(skb)) { + hdr_len = (skb_shinfo(skb)->gso_segs - 1) * + skb_tcp_all_headers(skb); + tx_bytes = skb->len + hdr_len; + pkts = skb_shinfo(skb)->gso_segs; + } else { + tx_bytes = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + pkts = 1; + } + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.bytes += tx_bytes; + txq->txq_stats.packets += pkts; + u64_stats_update_end(&txq->txq_stats.syncp); +} + static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq, u16 curr_pi, u16 wqebb_cnt) { @@ -539,8 +570,12 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, int err; if (unlikely(skb->len < MIN_SKB_LEN)) { - if (skb_pad(skb, MIN_SKB_LEN - skb->len)) + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.skb_pad_err++; + u64_stats_update_end(&txq->txq_stats.syncp); goto err_out; + } skb->len = MIN_SKB_LEN; } @@ -595,6 +630,7 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, txq->tx_stop_thrs, txq->tx_start_thrs); + hinic3_get_pkt_stats(txq, skb); hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ, hinic3_get_sq_local_pi(txq->sq)); @@ -604,6 +640,10 @@ static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, err_drop_pkt: dev_kfree_skb_any(skb); err_out: + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + return NETDEV_TX_OK; } @@ -611,12 +651,19 @@ netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); u16 q_id = skb_get_queue_mapping(skb); + struct hinic3_txq *txq; if (unlikely(!netif_carrier_ok(netdev))) goto err_drop_pkt; - if (unlikely(q_id >= nic_dev->q_params.num_qps)) + if (unlikely(q_id >= nic_dev->q_params.num_qps)) { + txq = &nic_dev->txqs[0]; + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + goto err_drop_pkt; + } return hinic3_send_one_skb(skb, netdev, &nic_dev->txqs[q_id]); @@ -754,6 +801,24 @@ int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, return 0; } +void hinic3_txq_get_stats(struct hinic3_txq *txq, + struct hinic3_txq_stats *stats) +{ + struct hinic3_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; + + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->busy = txq_stats->busy; + stats->skb_pad_err = txq_stats->skb_pad_err; + stats->frag_len_overflow = txq_stats->frag_len_overflow; + stats->offload_cow_skb_err = txq_stats->offload_cow_skb_err; + stats->map_frag_err = txq_stats->map_frag_err; + stats->unknown_tunnel_pkt = txq_stats->unknown_tunnel_pkt; + stats->frag_size_err = txq_stats->frag_size_err; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); +} + bool hinic3_tx_poll(struct hinic3_txq *txq, int budget) { struct net_device *netdev = txq->netdev; diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h index 00194f2a1bcc..0a21c423618f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h @@ -157,6 +157,8 @@ int hinic3_configure_txqs(struct net_device *netdev, u16 num_sq, u32 sq_depth, struct hinic3_dyna_txq_res *txqs_res); netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +void hinic3_txq_get_stats(struct hinic3_txq *txq, + struct hinic3_txq_stats *stats); bool hinic3_tx_poll(struct hinic3_txq *txq, int budget); void hinic3_flush_txqs(struct net_device *netdev); -- 2.43.0 Implement following ethtool callback function: .get_coalesce .set_coalesce These callbacks allow users to utilize ethtool for detailed RX coalesce configuration and monitoring. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- .../ethernet/huawei/hinic3/hinic3_ethtool.c | 232 +++++++++++++++++- 1 file changed, 230 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c index be26698fc658..a4b2d5ba81f8 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c @@ -17,6 +17,11 @@ #include "hinic3_nic_cfg.h" #define HINIC3_MGMT_VERSION_MAX_LEN 32 +/* Coalesce time properties in microseconds */ +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 5 +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) static void hinic3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) @@ -985,9 +990,230 @@ static void hinic3_get_pause_stats(struct net_device *netdev, kfree(ps); } +static int hinic3_set_queue_coalesce(struct net_device *netdev, u16 q_id, + struct hinic3_intr_coal_info *coal) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info *intr_coal; + struct hinic3_interrupt_info info = {}; + int err; + + intr_coal = &nic_dev->intr_coalesce[q_id]; + + intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; + intr_coal->pending_limit = coal->pending_limit; + intr_coal->rx_pending_limit_low = coal->rx_pending_limit_low; + intr_coal->rx_pending_limit_high = coal->rx_pending_limit_high; + + if (!test_bit(HINIC3_INTF_UP, &nic_dev->flags) || + q_id >= nic_dev->q_params.num_qps || nic_dev->adaptive_rx_coal) + return 0; + + info.msix_index = nic_dev->q_params.irq_cfg[q_id].msix_entry_idx; + info.interrupt_coalesc_set = 1; + info.coalesc_timer_cfg = intr_coal->coalesce_timer_cfg; + info.pending_limit = intr_coal->pending_limit; + info.resend_timer_cfg = intr_coal->resend_timer_cfg; + err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info); + if (err) { + netdev_warn(netdev, "Failed to set queue%u coalesce\n", q_id); + return err; + } + + return 0; +} + +static int is_coalesce_exceed_limit(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + const struct { + const char *name; + u32 value; + u32 limit; + } coalesce_limits[] = { + {"rx_coalesce_usecs", + coal->rx_coalesce_usecs, + COALESCE_MAX_TIMER_CFG}, + {"rx_max_coalesced_frames", + coal->rx_max_coalesced_frames, + COALESCE_MAX_PENDING_LIMIT}, + {"rx_max_coalesced_frames_low", + coal->rx_max_coalesced_frames_low, + COALESCE_MAX_PENDING_LIMIT}, + {"rx_max_coalesced_frames_high", + coal->rx_max_coalesced_frames_high, + COALESCE_MAX_PENDING_LIMIT}, + }; + + for (int i = 0; i < ARRAY_SIZE(coalesce_limits); i++) { + if (coalesce_limits[i].value > coalesce_limits[i].limit) { + netdev_err(netdev, "%s out of range %d-%d\n", + coalesce_limits[i].name, 0, + coalesce_limits[i].limit); + return -ERANGE; + } + } + return 0; +} + +static int is_coalesce_legal(struct net_device *netdev, + const struct ethtool_coalesce *coal) +{ + int err; + + err = is_coalesce_exceed_limit(netdev, coal); + if (err) + return err; + + if (coal->rx_max_coalesced_frames_low > + coal->rx_max_coalesced_frames_high) { + netdev_err(netdev, "invalid coalesce frame high %u, low %u, unit %d\n", + coal->rx_max_coalesced_frames_high, + coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + return -EINVAL; + } + + return 0; +} + +static void check_coalesce_align(struct net_device *netdev, + u32 item, u32 unit, const char *str) +{ + if (item % unit) + netdev_warn(netdev, "%s in %d units, change to %u\n", + str, unit, item - item % unit); +} + +#define CHECK_COALESCE_ALIGN(member, unit) \ + check_coalesce_align(netdev, member, unit, #member) + +static void check_coalesce_changed(struct net_device *netdev, + u32 item, u32 unit, u32 ori_val, + const char *obj_str, const char *str) +{ + if ((item / unit) != ori_val) + netdev_dbg(netdev, "Change %s from %d to %u %s\n", + str, ori_val * unit, item - item % unit, obj_str); +} + +#define CHECK_COALESCE_CHANGED(member, unit, ori_val, obj_str) \ + check_coalesce_changed(netdev, member, unit, ori_val, obj_str, #member) + +static int hinic3_set_hw_coal_param(struct net_device *netdev, + struct hinic3_intr_coal_info *intr_coal) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + u16 i; + + for (i = 0; i < nic_dev->max_qps; i++) { + err = hinic3_set_queue_coalesce(netdev, i, intr_coal); + if (err) + return err; + } + + return 0; +} + +static int hinic3_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info *interrupt_info; + + interrupt_info = &nic_dev->intr_coalesce[0]; + + /* TX/RX uses the same interrupt. + * So we only declare RX ethtool_coalesce parameters. + */ + coal->rx_coalesce_usecs = interrupt_info->coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + coal->rx_max_coalesced_frames = interrupt_info->pending_limit * + COALESCE_PENDING_LIMIT_UNIT; + + coal->use_adaptive_rx_coalesce = nic_dev->adaptive_rx_coal; + + coal->rx_max_coalesced_frames_high = + interrupt_info->rx_pending_limit_high * + COALESCE_PENDING_LIMIT_UNIT; + + coal->rx_max_coalesced_frames_low = + interrupt_info->rx_pending_limit_low * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +static int hinic3_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_intr_coal_info *ori_intr_coal; + struct hinic3_intr_coal_info intr_coal = {}; + char obj_str[32]; + int err; + + err = is_coalesce_legal(netdev, coal); + if (err) + return err; + + CHECK_COALESCE_ALIGN(coal->rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT); + CHECK_COALESCE_ALIGN(coal->rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal->rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT); + CHECK_COALESCE_ALIGN(coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT); + + ori_intr_coal = &nic_dev->intr_coalesce[0]; + snprintf(obj_str, sizeof(obj_str), "for netdev"); + + CHECK_COALESCE_CHANGED(coal->rx_coalesce_usecs, COALESCE_TIMER_CFG_UNIT, + ori_intr_coal->coalesce_timer_cfg, obj_str); + CHECK_COALESCE_CHANGED(coal->rx_max_coalesced_frames, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->pending_limit, obj_str); + CHECK_COALESCE_CHANGED(coal->rx_max_coalesced_frames_high, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limit_high, obj_str); + CHECK_COALESCE_CHANGED(coal->rx_max_coalesced_frames_low, + COALESCE_PENDING_LIMIT_UNIT, + ori_intr_coal->rx_pending_limit_low, obj_str); + + intr_coal.coalesce_timer_cfg = + (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + intr_coal.pending_limit = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + + nic_dev->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; + + intr_coal.rx_pending_limit_high = + (u8)(coal->rx_max_coalesced_frames_high / + COALESCE_PENDING_LIMIT_UNIT); + + intr_coal.rx_pending_limit_low = + (u8)(coal->rx_max_coalesced_frames_low / + COALESCE_PENDING_LIMIT_UNIT); + + /* coalesce timer or pending set to zero will disable coalesce */ + if (!nic_dev->adaptive_rx_coal && + (!intr_coal.coalesce_timer_cfg || !intr_coal.pending_limit)) + netdev_warn(netdev, "Coalesce will be disabled\n"); + + return hinic3_set_hw_coal_param(netdev, &intr_coal); +} + static const struct ethtool_ops hinic3_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_PKT_RATE_RX_USECS, + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX | + ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | + ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH, .get_link_ksettings = hinic3_get_link_ksettings, .get_drvinfo = hinic3_get_drvinfo, .get_msglevel = hinic3_get_msglevel, @@ -1003,6 +1229,8 @@ static const struct ethtool_ops hinic3_ethtool_ops = { .get_eth_ctrl_stats = hinic3_get_eth_ctrl_stats, .get_rmon_stats = hinic3_get_rmon_stats, .get_pause_stats = hinic3_get_pause_stats, + .get_coalesce = hinic3_get_coalesce, + .set_coalesce = hinic3_set_coalesce, }; void hinic3_set_ethtool_ops(struct net_device *netdev) -- 2.43.0 Implement following ethtool callback function: .get_rxnfc .set_rxnfc .get_channels .set_channels .get_rxfh_indir_size .get_rxfh_key_size .get_rxfh .set_rxfh These callbacks allow users to utilize ethtool for detailed RSS parameters configuration and monitoring. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- .../ethernet/huawei/hinic3/hinic3_ethtool.c | 9 + .../huawei/hinic3/hinic3_mgmt_interface.h | 2 + .../net/ethernet/huawei/hinic3/hinic3_rss.c | 487 +++++++++++++++++- .../net/ethernet/huawei/hinic3/hinic3_rss.h | 19 + 4 files changed, 515 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c index a4b2d5ba81f8..8cd7dd9da67b 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_ethtool.c @@ -15,6 +15,7 @@ #include "hinic3_hw_comm.h" #include "hinic3_nic_dev.h" #include "hinic3_nic_cfg.h" +#include "hinic3_rss.h" #define HINIC3_MGMT_VERSION_MAX_LEN 32 /* Coalesce time properties in microseconds */ @@ -1231,6 +1232,14 @@ static const struct ethtool_ops hinic3_ethtool_ops = { .get_pause_stats = hinic3_get_pause_stats, .get_coalesce = hinic3_get_coalesce, .set_coalesce = hinic3_set_coalesce, + .get_rxnfc = hinic3_get_rxnfc, + .set_rxnfc = hinic3_set_rxnfc, + .get_channels = hinic3_get_channels, + .set_channels = hinic3_set_channels, + .get_rxfh_indir_size = hinic3_get_rxfh_indir_size, + .get_rxfh_key_size = hinic3_get_rxfh_key_size, + .get_rxfh = hinic3_get_rxfh, + .set_rxfh = hinic3_set_rxfh, }; void hinic3_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h index 76c691f82703..3c1263ff99ff 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -282,6 +282,7 @@ enum l2nic_cmd { L2NIC_CMD_SET_VLAN_FILTER_EN = 26, L2NIC_CMD_SET_RX_VLAN_OFFLOAD = 27, L2NIC_CMD_CFG_RSS = 60, + L2NIC_CMD_GET_RSS_CTX_TBL = 62, L2NIC_CMD_CFG_RSS_HASH_KEY = 63, L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64, L2NIC_CMD_SET_RSS_CTX_TBL = 65, @@ -301,6 +302,7 @@ enum l2nic_ucode_cmd { L2NIC_UCODE_CMD_MODIFY_QUEUE_CTX = 0, L2NIC_UCODE_CMD_CLEAN_QUEUE_CTX = 1, L2NIC_UCODE_CMD_SET_RSS_INDIR_TBL = 4, + L2NIC_UCODE_CMD_GET_RSS_INDIR_TBL = 6, }; /* hilink mac group command */ diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c index 25db74d8c7dd..b40d5fa885c2 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.c @@ -155,7 +155,7 @@ static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev, L2NIC_CMD_SET_RSS_CTX_TBL, &msg_params); if (ctx_tbl.msg_head.status == MGMT_STATUS_CMD_UNSUPPORTED) { - return MGMT_STATUS_CMD_UNSUPPORTED; + return -EOPNOTSUPP; } else if (err || ctx_tbl.msg_head.status) { dev_err(hwdev->dev, "mgmt Failed to set rss context offload, err: %d, status: 0x%x\n", err, ctx_tbl.msg_head.status); @@ -165,6 +165,39 @@ static int hinic3_set_rss_type(struct hinic3_hwdev *hwdev, return 0; } +static int hinic3_get_rss_type(struct hinic3_hwdev *hwdev, + struct hinic3_rss_type *rss_type) +{ + struct l2nic_cmd_rss_ctx_tbl ctx_tbl = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + ctx_tbl.func_id = hinic3_global_func_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &ctx_tbl, sizeof(ctx_tbl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_GET_RSS_CTX_TBL, + &msg_params); + if (err || ctx_tbl.msg_head.status) { + dev_err(hwdev->dev, "Failed to get hash type, err: %d, status: 0x%x\n", + err, ctx_tbl.msg_head.status); + return -EINVAL; + } + + rss_type->ipv4 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, IPV4); + rss_type->ipv6 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, IPV6); + rss_type->ipv6_ext = L2NIC_RSS_TYPE_GET(ctx_tbl.context, IPV6_EXT); + rss_type->tcp_ipv4 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV4); + rss_type->tcp_ipv6 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, TCP_IPV6); + rss_type->tcp_ipv6_ext = L2NIC_RSS_TYPE_GET(ctx_tbl.context, + TCP_IPV6_EXT); + rss_type->udp_ipv4 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV4); + rss_type->udp_ipv6 = L2NIC_RSS_TYPE_GET(ctx_tbl.context, UDP_IPV6); + + return 0; +} + static int hinic3_rss_cfg_hash_type(struct hinic3_hwdev *hwdev, u8 opcode, enum hinic3_rss_hash_type *type) { @@ -264,7 +297,8 @@ static int hinic3_set_hw_rss_parameters(struct net_device *netdev, u8 rss_en) if (err) return err; - hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir); + if (!netif_is_rxfh_configured(netdev)) + hinic3_fillout_indir_tbl(netdev, nic_dev->rss_indir); err = hinic3_config_rss_hw_resource(netdev, nic_dev->rss_indir); if (err) @@ -334,3 +368,452 @@ void hinic3_try_to_enable_rss(struct net_device *netdev) clear_bit(HINIC3_RSS_ENABLE, &nic_dev->flags); nic_dev->q_params.num_qps = nic_dev->max_qps; } + +static int hinic3_set_l4_rss_hash_ops(const struct ethtool_rxnfc *cmd, + struct hinic3_rss_type *rss_type) +{ + u8 rss_l4_en; + + switch (cmd->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + case 0: + rss_l4_en = 0; + break; + case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + rss_l4_en = 1; + break; + default: + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + rss_type->tcp_ipv4 = rss_l4_en; + break; + case TCP_V6_FLOW: + rss_type->tcp_ipv6 = rss_l4_en; + break; + case UDP_V4_FLOW: + rss_type->udp_ipv4 = rss_l4_en; + break; + case UDP_V6_FLOW: + rss_type->udp_ipv6 = rss_l4_en; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int hinic3_update_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + struct hinic3_rss_type *rss_type) +{ + int err; + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V4_FLOW: + case UDP_V6_FLOW: + err = hinic3_set_l4_rss_hash_ops(cmd, rss_type); + if (err) + return err; + + break; + case IPV4_FLOW: + rss_type->ipv4 = 1; + break; + case IPV6_FLOW: + rss_type->ipv6 = 1; + break; + default: + netdev_err(netdev, "Unsupported flow type\n"); + return -EINVAL; + } + + return 0; +} + +static int hinic3_set_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_rss_type rss_type; + int err; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { + cmd->data = 0; + netdev_err(netdev, "RSS is disable, not support to set flow-hash\n"); + return -EOPNOTSUPP; + } + + /* RSS only supports hashing of IP addresses and L4 ports */ + if (cmd->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + /* Both IP addresses must be part of the hash tuple */ + if (!(cmd->data & RXH_IP_SRC) || !(cmd->data & RXH_IP_DST)) + return -EINVAL; + + err = hinic3_get_rss_type(nic_dev->hwdev, &rss_type); + if (err) { + netdev_err(netdev, "Failed to get rss type\n"); + return err; + } + + err = hinic3_update_rss_hash_opts(netdev, cmd, &rss_type); + if (err) + return err; + + err = hinic3_set_rss_type(nic_dev->hwdev, rss_type); + if (err) { + netdev_err(netdev, "Failed to set rss type\n"); + return err; + } + + nic_dev->rss_type = rss_type; + + return 0; +} + +static void convert_rss_type(u8 rss_opt, struct ethtool_rxnfc *cmd) +{ + if (rss_opt) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +} + +static int hinic3_convert_rss_type(struct net_device *netdev, + struct hinic3_rss_type *rss_type, + struct ethtool_rxnfc *cmd) +{ + cmd->data = RXH_IP_SRC | RXH_IP_DST; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + convert_rss_type(rss_type->tcp_ipv4, cmd); + break; + case TCP_V6_FLOW: + convert_rss_type(rss_type->tcp_ipv6, cmd); + break; + case UDP_V4_FLOW: + convert_rss_type(rss_type->udp_ipv4, cmd); + break; + case UDP_V6_FLOW: + convert_rss_type(rss_type->udp_ipv6, cmd); + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + netdev_err(netdev, "Unsupported flow type\n"); + cmd->data = 0; + return -EINVAL; + } + + return 0; +} + +static int hinic3_get_rss_hash_opts(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_rss_type rss_type; + int err; + + cmd->data = 0; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) + return 0; + + err = hinic3_get_rss_type(nic_dev->hwdev, &rss_type); + if (err) { + netdev_err(netdev, "Failed to get rss type\n"); + return err; + } + + return hinic3_convert_rss_type(netdev, &rss_type, cmd); +} + +int hinic3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = nic_dev->q_params.num_qps; + break; + case ETHTOOL_GRXFH: + err = hinic3_get_rss_hash_opts(netdev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +int hinic3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + int err; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + err = hinic3_set_rss_hash_opts(netdev, cmd); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u16 hinic3_max_channels(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u8 tcs = netdev_get_num_tc(netdev); + + return tcs ? nic_dev->max_qps / tcs : nic_dev->max_qps; +} + +static u16 hinic3_curr_channels(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + if (netif_running(netdev)) + return nic_dev->q_params.num_qps ? + nic_dev->q_params.num_qps : 1; + else + return min_t(u16, hinic3_max_channels(netdev), + nic_dev->q_params.num_qps); +} + +void hinic3_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + channels->max_rx = 0; + channels->max_tx = 0; + channels->max_other = 0; + /* report maximum channels */ + channels->max_combined = hinic3_max_channels(netdev); + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; + /* report flow director queues as maximum channels */ + channels->combined_count = hinic3_curr_channels(netdev); +} + +static int +hinic3_validate_channel_parameter(struct net_device *netdev, + const struct ethtool_channels *channels) +{ + u16 max_channel = hinic3_max_channels(netdev); + unsigned int count = channels->combined_count; + + if (!count) { + netdev_err(netdev, "Unsupported combined_count=0\n"); + return -EINVAL; + } + + if (channels->tx_count || channels->rx_count || channels->other_count) { + netdev_err(netdev, "Setting rx/tx/other count not supported\n"); + return -EINVAL; + } + + if (count > max_channel) { + netdev_err(netdev, "Combined count %u exceed limit %u\n", count, + max_channel); + return -EINVAL; + } + + return 0; +} + +int hinic3_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + unsigned int count = channels->combined_count; + struct hinic3_dyna_txrxq_params q_params; + int err; + + if (hinic3_validate_channel_parameter(netdev, channels)) + return -EINVAL; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { + netdev_err(netdev, "This function doesn't support RSS, only support 1 queue pair\n"); + return -EOPNOTSUPP; + } + + netdev_dbg(netdev, "Set max combined queue number from %u to %u\n", + nic_dev->q_params.num_qps, count); + + if (netif_running(netdev)) { + q_params = nic_dev->q_params; + q_params.num_qps = (u16)count; + q_params.txqs_res = NULL; + q_params.rxqs_res = NULL; + q_params.irq_cfg = NULL; + + err = hinic3_change_channel_settings(netdev, &q_params); + if (err) { + netdev_err(netdev, "Failed to change channel settings\n"); + return err; + } + } else { + nic_dev->q_params.num_qps = (u16)count; + } + + return 0; +} + +u32 hinic3_get_rxfh_indir_size(struct net_device *netdev) +{ + return L2NIC_RSS_INDIR_SIZE; +} + +static int hinic3_set_rss_rxfh(struct net_device *netdev, + const u32 *indir, u8 *key) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + u32 i; + + if (indir) { + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) + nic_dev->rss_indir[i] = (u16)indir[i]; + + err = hinic3_rss_set_indir_tbl(nic_dev->hwdev, + nic_dev->rss_indir); + if (err) { + netdev_err(netdev, "Failed to set rss indir table\n"); + return err; + } + } + + if (key) { + err = hinic3_rss_set_hash_key(nic_dev->hwdev, key); + if (err) { + netdev_err(netdev, "Failed to set rss key\n"); + return err; + } + + memcpy(nic_dev->rss_hkey, key, L2NIC_RSS_KEY_SIZE); + } + + return 0; +} + +u32 hinic3_get_rxfh_key_size(struct net_device *netdev) +{ + return L2NIC_RSS_KEY_SIZE; +} + +static int hinic3_rss_get_indir_tbl(struct hinic3_hwdev *hwdev, + u32 *indir_table) +{ + struct hinic3_cmd_buf_pair pair; + __le16 *indir_tbl = NULL; + int err, i; + + err = hinic3_cmd_buf_pair_init(hwdev, &pair); + if (err) { + dev_err(hwdev->dev, "Failed to allocate cmd_buf.\n"); + return err; + } + + err = hinic3_cmdq_detail_resp(hwdev, MGMT_MOD_L2NIC, + L2NIC_UCODE_CMD_GET_RSS_INDIR_TBL, + pair.in, pair.out, NULL); + if (err) { + dev_err(hwdev->dev, "Failed to get rss indir table\n"); + goto err_get_indir_tbl; + } + + indir_tbl = (__le16 *)pair.out->buf; + for (i = 0; i < L2NIC_RSS_INDIR_SIZE; i++) + indir_table[i] = le16_to_cpu(*(indir_tbl + i)); + +err_get_indir_tbl: + hinic3_cmd_buf_pair_uninit(hwdev, &pair); + + return err; +} + +int hinic3_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { + netdev_err(netdev, "Rss is disabled\n"); + return -EOPNOTSUPP; + } + + rxfh->hfunc = + nic_dev->rss_hash_type == HINIC3_RSS_HASH_ENGINE_TYPE_XOR ? + ETH_RSS_HASH_XOR : ETH_RSS_HASH_TOP; + + if (rxfh->indir) { + err = hinic3_rss_get_indir_tbl(nic_dev->hwdev, rxfh->indir); + if (err) + return err; + } + + if (rxfh->key) + memcpy(rxfh->key, nic_dev->rss_hkey, L2NIC_RSS_KEY_SIZE); + + return err; +} + +static int hinic3_update_hash_func_type(struct net_device *netdev, u8 hfunc) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + enum hinic3_rss_hash_type new_rss_hash_type; + + switch (hfunc) { + case ETH_RSS_HASH_NO_CHANGE: + return 0; + case ETH_RSS_HASH_XOR: + new_rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_XOR; + break; + case ETH_RSS_HASH_TOP: + new_rss_hash_type = HINIC3_RSS_HASH_ENGINE_TYPE_TOEP; + break; + default: + netdev_err(netdev, "Unsupported hash func %u\n", hfunc); + return -EOPNOTSUPP; + } + + if (new_rss_hash_type == nic_dev->rss_hash_type) + return 0; + + nic_dev->rss_hash_type = new_rss_hash_type; + return hinic3_rss_set_hash_type(nic_dev->hwdev, nic_dev->rss_hash_type); +} + +int hinic3_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + int err; + + if (!test_bit(HINIC3_RSS_ENABLE, &nic_dev->flags)) { + netdev_err(netdev, "Not support to set rss parameters when rss is disable\n"); + return -EOPNOTSUPP; + } + + err = hinic3_update_hash_func_type(netdev, rxfh->hfunc); + if (err) + return err; + + err = hinic3_set_rss_rxfh(netdev, rxfh->indir, rxfh->key); + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h index 78d82c2aca06..9f1b77780cd4 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rss.h @@ -5,10 +5,29 @@ #define _HINIC3_RSS_H_ #include +#include int hinic3_rss_init(struct net_device *netdev); void hinic3_rss_uninit(struct net_device *netdev); void hinic3_try_to_enable_rss(struct net_device *netdev); void hinic3_clear_rss_config(struct net_device *netdev); +int hinic3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, u32 *rule_locs); +int hinic3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd); + +void hinic3_get_channels(struct net_device *netdev, + struct ethtool_channels *channels); +int hinic3_set_channels(struct net_device *netdev, + struct ethtool_channels *channels); + +u32 hinic3_get_rxfh_indir_size(struct net_device *netdev); +u32 hinic3_get_rxfh_key_size(struct net_device *netdev); + +int hinic3_get_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh); +int hinic3_set_rxfh(struct net_device *netdev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); + #endif -- 2.43.0 Configure netdev watchdog timeout to improve transmission reliability. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- drivers/net/ethernet/huawei/hinic3/hinic3_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c index 3b470978714a..7e09b4b2da9f 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -33,6 +33,8 @@ #define HINIC3_RX_PENDING_LIMIT_LOW 2 #define HINIC3_RX_PENDING_LIMIT_HIGH 8 +#define HINIC3_WATCHDOG_TIMEOUT 5 + static void init_intr_coal_param(struct net_device *netdev) { struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); @@ -246,6 +248,8 @@ static void hinic3_assign_netdev_ops(struct net_device *netdev) { hinic3_set_netdev_ops(netdev); hinic3_set_ethtool_ops(netdev); + + netdev->watchdog_timeo = HINIC3_WATCHDOG_TIMEOUT * HZ; } static void netdev_feature_init(struct net_device *netdev) -- 2.43.0 Remove unneeded coalesce parameters in irq handling. Co-developed-by: Zhu Yikai Signed-off-by: Zhu Yikai Signed-off-by: Fan Gong --- drivers/net/ethernet/huawei/hinic3/hinic3_irq.c | 6 +----- drivers/net/ethernet/huawei/hinic3/hinic3_rx.h | 3 --- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c index d3b3927b5408..42464c007174 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -156,13 +156,9 @@ static int hinic3_set_interrupt_moder(struct net_device *netdev, u16 q_id, spin_unlock_irqrestore(&nic_dev->channel_res_lock, flags); err = hinic3_set_interrupt_cfg(nic_dev->hwdev, info); - if (err) { + if (err) netdev_err(netdev, "Failed to modify moderation for Queue: %u\n", q_id); - } else { - nic_dev->rxqs[q_id].last_coalesc_timer_cfg = coalesc_timer_cfg; - nic_dev->rxqs[q_id].last_pending_limit = pending_limit; - } return err; } diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h index c11d080408a7..2ab691ed11a9 100644 --- a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -111,9 +111,6 @@ struct hinic3_rxq { dma_addr_t cqe_start_paddr; struct dim dim; - - u8 last_coalesc_timer_cfg; - u8 last_pending_limit; } ____cacheline_aligned; struct hinic3_dyna_rxq_res { -- 2.43.0