Add build options and doc for mucse. Initialize pci device access for MUCSE devices. Signed-off-by: Dong Yibo --- .../device_drivers/ethernet/index.rst | 1 + .../device_drivers/ethernet/mucse/rnpgbe.rst | 21 ++ MAINTAINERS | 8 + drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/mucse/Kconfig | 34 +++ drivers/net/ethernet/mucse/Makefile | 7 + drivers/net/ethernet/mucse/rnpgbe/Makefile | 9 + drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 33 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 226 ++++++++++++++++++ 10 files changed, 341 insertions(+) create mode 100644 Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst create mode 100644 drivers/net/ethernet/mucse/Kconfig create mode 100644 drivers/net/ethernet/mucse/Makefile create mode 100644 drivers/net/ethernet/mucse/rnpgbe/Makefile create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 40ac552641a3..0e03c5c10d30 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -61,6 +61,7 @@ Contents: wangxun/txgbevf wangxun/ngbe wangxun/ngbevf + mucse/rnpgbe .. only:: subproject and html diff --git a/Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst b/Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst new file mode 100644 index 000000000000..7562fb6b8f61 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/mucse/rnpgbe.rst @@ -0,0 +1,21 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================================================== +Linux Base Driver for MUCSE(R) Gigabit PCI Express Adapters +=========================================================== + +MUCSE Gigabit Linux driver. +Copyright (c) 2020 - 2025 MUCSE Co.,Ltd. + +Identifying Your Adapter +======================== +The driver is compatible with devices based on the following: + + * MUCSE(R) Ethernet Controller N500 series + * MUCSE(R) Ethernet Controller N210 series + +Support +======= + If you have problems with the software or hardware, please contact our + customer support team via email at techsupport@mucse.com or check our + website at https://www.mucse.com/en/ diff --git a/MAINTAINERS b/MAINTAINERS index 1bc1698bc5ae..da0d12e77ddc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17033,6 +17033,14 @@ T: git git://linuxtv.org/media.git F: Documentation/devicetree/bindings/media/i2c/aptina,mt9v111.yaml F: drivers/media/i2c/mt9v111.c +MUCSE ETHERNET DRIVER +M: Yibo Dong +L: netdev@vger.kernel.org +S: Maintained +W: https://www.mucse.com/en/ +F: Documentation/networking/device_drivers/ethernet/mucse/* +F: drivers/net/ethernet/mucse/* + MULTIFUNCTION DEVICES (MFD) M: Lee Jones S: Maintained diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index f86d4557d8d7..77c55fa11942 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -202,5 +202,6 @@ source "drivers/net/ethernet/wangxun/Kconfig" source "drivers/net/ethernet/wiznet/Kconfig" source "drivers/net/ethernet/xilinx/Kconfig" source "drivers/net/ethernet/xircom/Kconfig" +source "drivers/net/ethernet/mucse/Kconfig" endif # ETHERNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 67182339469a..696825bd1211 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -107,3 +107,4 @@ obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/ obj-$(CONFIG_NET_VENDOR_PENSANDO) += pensando/ obj-$(CONFIG_OA_TC6) += oa_tc6.o +obj-$(CONFIG_NET_VENDOR_MUCSE) += mucse/ diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig new file mode 100644 index 000000000000..be0fdf268484 --- /dev/null +++ b/drivers/net/ethernet/mucse/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Mucse network device configuration +# + +config NET_VENDOR_MUCSE + bool "Mucse devices" + default y + help + If you have a network (Ethernet) card from Mucse(R), say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Mucse(R) cards. If you say Y, you will + be asked for your specific card in the following questions. + +if NET_VENDOR_MUCSE + +config MGBE + tristate "Mucse(R) 1GbE PCI Express adapters support" + depends on PCI + select PAGE_POOL + help + This driver supports Mucse(R) 1GbE PCI Express family of + adapters. + + More specific information on configuring the driver is in + . + + To compile this driver as a module, choose M here. The module + will be called rnpgbe. + +endif # NET_VENDOR_MUCSE + diff --git a/drivers/net/ethernet/mucse/Makefile b/drivers/net/ethernet/mucse/Makefile new file mode 100644 index 000000000000..f0bd79882488 --- /dev/null +++ b/drivers/net/ethernet/mucse/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Mucse(R) network device drivers. +# + +obj-$(CONFIG_MGBE) += rnpgbe/ + diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile new file mode 100644 index 000000000000..0942e27f5913 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2020 - 2025 MUCSE Corporation. +# +# Makefile for the MUCSE(R) 1GbE PCI Express ethernet driver +# + +obj-$(CONFIG_MGBE) += rnpgbe.o + +rnpgbe-objs := rnpgbe_main.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h new file mode 100644 index 000000000000..224e395d6be3 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_H +#define _RNPGBE_H + +enum rnpgbe_boards { + board_n500, + board_n210, + board_n210L, +}; + +struct mucse { + struct net_device *netdev; + struct pci_dev *pdev; + /* board number */ + u16 bd_number; + + char name[60]; +}; + +/* Device IDs */ +#ifndef PCI_VENDOR_ID_MUCSE +#define PCI_VENDOR_ID_MUCSE 0x8848 +#endif /* PCI_VENDOR_ID_MUCSE */ + +#define PCI_DEVICE_ID_N500_QUAD_PORT 0x8308 +#define PCI_DEVICE_ID_N500_DUAL_PORT 0x8318 +#define PCI_DEVICE_ID_N500_VF 0x8309 +#define PCI_DEVICE_ID_N210 0x8208 +#define PCI_DEVICE_ID_N210L 0x820a + +#endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c new file mode 100644 index 000000000000..13b49875006b --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include +#include +#include +#include +#include +#include + +#include "rnpgbe.h" + +char rnpgbe_driver_name[] = "rnpgbe"; + +/* rnpgbe_pci_tbl - PCI Device ID Table + * + * { PCI_DEVICE(Vendor ID, Device ID), + * driver_data (used for different hw chip) } + */ +static struct pci_device_id rnpgbe_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_QUAD_PORT), + .driver_data = board_n500}, + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N500_DUAL_PORT), + .driver_data = board_n500}, + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210), + .driver_data = board_n210}, + { PCI_DEVICE(PCI_VENDOR_ID_MUCSE, PCI_DEVICE_ID_N210L), + .driver_data = board_n210L}, + /* required last entry */ + {0, }, +}; + +/** + * rnpgbe_add_adapter - add netdev for this pci_dev + * @pdev: PCI device information structure + * + * rnpgbe_add_adapter initializes a netdev for this pci_dev + * structure. Initializes Bar map, private structure, and a + * hardware reset occur. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_add_adapter(struct pci_dev *pdev) +{ + struct mucse *mucse = NULL; + struct net_device *netdev; + static int bd_number; + + netdev = alloc_etherdev_mq(sizeof(struct mucse), 1); + if (!netdev) + return -ENOMEM; + + mucse = netdev_priv(netdev); + mucse->netdev = netdev; + mucse->pdev = pdev; + mucse->bd_number = bd_number++; + snprintf(mucse->name, sizeof(netdev->name), "%s%d", + rnpgbe_driver_name, mucse->bd_number); + pci_set_drvdata(pdev, mucse); + + return 0; +} + +/** + * rnpgbe_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @id: entry in rnpgbe_pci_tbl + * + * rnpgbe_probe initializes a PF adapter identified by a pci_dev + * structure. The OS initialization, then call rnpgbe_add_adapter + * to initializes netdev. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int err; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + /* hw only support 56-bits dma mask */ + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(56)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + + err = pci_request_mem_regions(pdev, rnpgbe_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_req; + } + + pci_set_master(pdev); + pci_save_state(pdev); + err = rnpgbe_add_adapter(pdev); + if (err) + goto err_regions; + + return 0; +err_regions: + pci_release_mem_regions(pdev); +err_dma: +err_pci_req: + pci_disable_device(pdev); + return err; +} + +/** + * rnpgbe_rm_adapter - remove netdev for this mucse structure + * @mucse: pointer to private structure + * + * rnpgbe_rm_adapter remove a netdev for this mucse structure + **/ +static void rnpgbe_rm_adapter(struct mucse *mucse) +{ + struct net_device *netdev; + + netdev = mucse->netdev; + free_netdev(netdev); +} + +/** + * rnpgbe_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * rnpgbe_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void rnpgbe_remove(struct pci_dev *pdev) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + + if (!mucse) + return; + + rnpgbe_rm_adapter(mucse); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +/** + * rnpgbe_dev_shutdown - Device Shutdown Routine + * @pdev: PCI device information struct + * @enable_wake: wakeup status + **/ +static void rnpgbe_dev_shutdown(struct pci_dev *pdev, + bool *enable_wake) +{ + struct mucse *mucse = pci_get_drvdata(pdev); + struct net_device *netdev = mucse->netdev; + + *enable_wake = false; + netif_device_detach(netdev); + pci_disable_device(pdev); +} + +/** + * rnpgbe_shutdown - Device Shutdown Routine + * @pdev: PCI device information struct + * + * rnpgbe_shutdown is called by the PCI subsystem to alert the driver + * that os shutdown. Device should setup wakeup state here. + **/ +static void rnpgbe_shutdown(struct pci_dev *pdev) +{ + bool wake = false; + + rnpgbe_dev_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +static struct pci_driver rnpgbe_driver = { + .name = rnpgbe_driver_name, + .id_table = rnpgbe_pci_tbl, + .probe = rnpgbe_probe, + .remove = rnpgbe_remove, + .shutdown = rnpgbe_shutdown, +}; + +/** + * rnpgbe_init_module - driver init routine + * + * rnpgbe_init_module is called when driver insmod + * + * @return: 0 on success, negative on failure + **/ +static int __init rnpgbe_init_module(void) +{ + int ret; + + ret = pci_register_driver(&rnpgbe_driver); + if (ret) + return ret; + + return 0; +} + +module_init(rnpgbe_init_module); + +/** + * rnpgbe_exit_module - driver remove routine + * + * rnpgbe_exit_module is called when driver is removed + **/ +static void __exit rnpgbe_exit_module(void) +{ + pci_unregister_driver(&rnpgbe_driver); +} + +module_exit(rnpgbe_exit_module); + +MODULE_DEVICE_TABLE(pci, rnpgbe_pci_tbl); +MODULE_AUTHOR("Mucse Corporation, "); +MODULE_DESCRIPTION("Mucse(R) 1 Gigabit PCI Express Network Driver"); +MODULE_LICENSE("GPL"); -- 2.25.1 Initialize n500/n210 chip bar resource map and dma, eth, mbx ... info for future use. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/Makefile | 4 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 138 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 138 ++++++++++++++++++ drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 27 ++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 68 ++++++++- 5 files changed, 370 insertions(+), 5 deletions(-) create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile index 0942e27f5913..42c359f459d9 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/Makefile +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -5,5 +5,5 @@ # obj-$(CONFIG_MGBE) += rnpgbe.o - -rnpgbe-objs := rnpgbe_main.o +rnpgbe-objs := rnpgbe_main.o\ + rnpgbe_chip.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 224e395d6be3..2ae836fc8951 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -4,21 +4,156 @@ #ifndef _RNPGBE_H #define _RNPGBE_H +#include +#include + +extern const struct rnpgbe_info rnpgbe_n500_info; +extern const struct rnpgbe_info rnpgbe_n210_info; +extern const struct rnpgbe_info rnpgbe_n210L_info; + enum rnpgbe_boards { board_n500, board_n210, board_n210L, }; +enum rnpgbe_hw_type { + rnpgbe_hw_n500 = 0, + rnpgbe_hw_n210, + rnpgbe_hw_n210L, + rnpgbe_hw_unknow +}; + +struct mucse_dma_info { + u8 __iomem *dma_base_addr; + u8 __iomem *dma_ring_addr; + void *back; + u32 max_tx_queues; + u32 max_rx_queues; + u32 dma_version; +}; + +#define RNPGBE_MAX_MTA 128 +struct mucse_eth_info { + u8 __iomem *eth_base_addr; + void *back; + u32 mta_shadow[RNPGBE_MAX_MTA]; + int mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ + unsigned int addr_shift; /* MII address shift */ + unsigned int reg_shift; /* MII reg shift */ + unsigned int addr_mask; /* MII address mask */ + unsigned int reg_mask; /* MII reg mask */ + unsigned int clk_csr_shift; + unsigned int clk_csr_mask; +}; + +struct mucse_mac_info { + u8 __iomem *mac_addr; + void *back; + struct mii_regs mii; + int phy_addr; + int clk_csr; +}; + +#define MAX_VF_NUM (8) + +struct mucse_mbx_info { + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; + u16 vf_req[MAX_VF_NUM]; + u16 vf_ack[MAX_VF_NUM]; + u16 fw_req; + u16 fw_ack; + /* lock for only one use mbx */ + struct mutex lock; + bool irq_enabled; + int mbx_size; + int mbx_mem_size; +#define MBX_FEATURE_NO_ZERO BIT(0) +#define MBX_FEATURE_WRITE_DELAY BIT(1) + u32 mbx_feature; + /* fw <--> pf mbx */ + u32 fw_pf_shm_base; + u32 pf2fw_mbox_ctrl; + u32 pf2fw_mbox_mask; + u32 fw_pf_mbox_mask; + u32 fw2pf_mbox_vec; + /* pf <--> vf mbx */ + u32 pf_vf_shm_base; + u32 pf2vf_mbox_ctrl_base; + u32 pf_vf_mbox_mask_lo; + u32 pf_vf_mbox_mask_hi; + u32 pf2vf_mbox_vec_base; + u32 vf2pf_mbox_vec_base; + u32 fw_vf_share_ram; + int share_size; +}; + +struct mucse_hw { + void *back; + u8 pfvfnum; + u8 pfvfnum_system; + u8 __iomem *hw_addr; + u8 __iomem *ring_msix_base; + struct pci_dev *pdev; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + enum rnpgbe_hw_type hw_type; + struct mucse_dma_info dma; + struct mucse_eth_info eth; + struct mucse_mac_info mac; + struct mucse_mbx_info mbx; +#define M_NET_FEATURE_SG BIT(0) +#define M_NET_FEATURE_TX_CHECKSUM BIT(1) +#define M_NET_FEATURE_RX_CHECKSUM BIT(2) +#define M_NET_FEATURE_TSO BIT(3) +#define M_NET_FEATURE_TX_UDP_TUNNEL BIT(4) +#define M_NET_FEATURE_VLAN_FILTER BIT(5) +#define M_NET_FEATURE_VLAN_OFFLOAD BIT(6) +#define M_NET_FEATURE_RX_NTUPLE_FILTER BIT(7) +#define M_NET_FEATURE_TCAM BIT(8) +#define M_NET_FEATURE_RX_HASH BIT(9) +#define M_NET_FEATURE_RX_FCS BIT(10) +#define M_NET_FEATURE_HW_TC BIT(11) +#define M_NET_FEATURE_USO BIT(12) +#define M_NET_FEATURE_STAG_FILTER BIT(13) +#define M_NET_FEATURE_STAG_OFFLOAD BIT(14) +#define M_NET_FEATURE_VF_FIXED BIT(15) +#define M_VEB_VLAN_MASK_EN BIT(16) +#define M_HW_FEATURE_EEE BIT(17) +#define M_HW_SOFT_MASK_OTHER_IRQ BIT(18) + u32 feature_flags; + u16 usecstocount; +}; + struct mucse { struct net_device *netdev; struct pci_dev *pdev; + struct mucse_hw hw; /* board number */ u16 bd_number; char name[60]; }; +struct rnpgbe_info { + int total_queue_pair_cnts; + enum rnpgbe_hw_type hw_type; + void (*get_invariants)(struct mucse_hw *hw); +}; + /* Device IDs */ #ifndef PCI_VENDOR_ID_MUCSE #define PCI_VENDOR_ID_MUCSE 0x8848 @@ -30,4 +165,7 @@ struct mucse { #define PCI_DEVICE_ID_N210 0x8208 #define PCI_DEVICE_ID_N210L 0x820a +#define m_rd_reg(reg) readl(reg) +#define m_wr_reg(reg, val) writel((val), reg) + #endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c new file mode 100644 index 000000000000..38c094965db9 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include +#include + +#include "rnpgbe.h" +#include "rnpgbe_hw.h" + +/** + * rnpgbe_get_invariants_n500 - setup for hw info + * @hw: hw information structure + * + * rnpgbe_get_invariants_n500 initializes all private + * structure, such as dma, eth, mac and mbx base on + * hw->addr for n500 + **/ +static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) +{ + struct mucse_dma_info *dma = &hw->dma; + struct mucse_eth_info *eth = &hw->eth; + struct mucse_mac_info *mac = &hw->mac; + struct mucse_mbx_info *mbx = &hw->mbx; + + /* setup msix base */ + hw->ring_msix_base = hw->hw_addr + 0x28700; + /* setup dma info */ + dma->dma_base_addr = hw->hw_addr; + dma->dma_ring_addr = hw->hw_addr + RNPGBE_RING_BASE; + dma->max_tx_queues = RNPGBE_MAX_QUEUES; + dma->max_rx_queues = RNPGBE_MAX_QUEUES; + dma->back = hw; + /* setup eth info */ + eth->eth_base_addr = hw->hw_addr + RNPGBE_ETH_BASE; + eth->back = hw; + eth->mc_filter_type = 0; + eth->mcft_size = RNPGBE_MC_TBL_SIZE; + eth->vft_size = RNPGBE_VFT_TBL_SIZE; + eth->num_rar_entries = RNPGBE_RAR_ENTRIES; + /* setup mac info */ + mac->mac_addr = hw->hw_addr + RNPGBE_MAC_BASE; + mac->back = hw; + /* set mac->mii */ + mac->mii.addr = RNPGBE_MII_ADDR; + mac->mii.data = RNPGBE_MII_DATA; + mac->mii.addr_shift = 11; + mac->mii.addr_mask = 0x0000F800; + mac->mii.reg_shift = 6; + mac->mii.reg_mask = 0x000007C0; + mac->mii.clk_csr_shift = 2; + mac->mii.clk_csr_mask = GENMASK(5, 2); + mac->clk_csr = 0x02; /* csr 25M */ + /* hw fixed phy_addr */ + mac->phy_addr = 0x11; + + mbx->mbx_feature |= MBX_FEATURE_NO_ZERO; + /* mbx offset */ + mbx->vf2pf_mbox_vec_base = 0x28900; + mbx->fw2pf_mbox_vec = 0x28b00; + mbx->pf_vf_shm_base = 0x29000; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0x2a100; + mbx->pf_vf_mbox_mask_lo = 0x2a200; + mbx->pf_vf_mbox_mask_hi = 0; + mbx->fw_pf_shm_base = 0x2d000; + mbx->pf2fw_mbox_ctrl = 0x2e000; + mbx->fw_pf_mbox_mask = 0x2e200; + mbx->fw_vf_share_ram = 0x2b000; + mbx->share_size = 512; + + /* setup net feature here */ + hw->feature_flags |= M_NET_FEATURE_SG | + M_NET_FEATURE_TX_CHECKSUM | + M_NET_FEATURE_RX_CHECKSUM | + M_NET_FEATURE_TSO | + M_NET_FEATURE_VLAN_FILTER | + M_NET_FEATURE_VLAN_OFFLOAD | + M_NET_FEATURE_RX_NTUPLE_FILTER | + M_NET_FEATURE_RX_HASH | + M_NET_FEATURE_USO | + M_NET_FEATURE_RX_FCS | + M_NET_FEATURE_STAG_FILTER | + M_NET_FEATURE_STAG_OFFLOAD; + /* start the default ahz, update later */ + hw->usecstocount = 125; +} + +/** + * rnpgbe_get_invariants_n210 - setup for hw info + * @hw: hw information structure + * + * rnpgbe_get_invariants_n210 initializes all private + * structure, such as dma, eth, mac and mbx base on + * hw->addr for n210 + **/ +static void rnpgbe_get_invariants_n210(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + /* get invariants based from n500 */ + rnpgbe_get_invariants_n500(hw); + + /* update msix base */ + hw->ring_msix_base = hw->hw_addr + 0x29000; + /* update mbx offset */ + mbx->vf2pf_mbox_vec_base = 0x29200; + mbx->fw2pf_mbox_vec = 0x29400; + mbx->pf_vf_shm_base = 0x29900; + mbx->mbx_mem_size = 64; + mbx->pf2vf_mbox_ctrl_base = 0x2aa00; + mbx->pf_vf_mbox_mask_lo = 0x2ab00; + mbx->pf_vf_mbox_mask_hi = 0; + mbx->fw_pf_shm_base = 0x2d900; + mbx->pf2fw_mbox_ctrl = 0x2e900; + mbx->fw_pf_mbox_mask = 0x2eb00; + mbx->fw_vf_share_ram = 0x2b900; + mbx->share_size = 512; + /* update hw feature */ + hw->feature_flags |= M_HW_FEATURE_EEE; + hw->usecstocount = 62; +} + +const struct rnpgbe_info rnpgbe_n500_info = { + .total_queue_pair_cnts = RNPGBE_MAX_QUEUES, + .hw_type = rnpgbe_hw_n500, + .get_invariants = &rnpgbe_get_invariants_n500, +}; + +const struct rnpgbe_info rnpgbe_n210_info = { + .total_queue_pair_cnts = RNPGBE_MAX_QUEUES, + .hw_type = rnpgbe_hw_n210, + .get_invariants = &rnpgbe_get_invariants_n210, +}; + +const struct rnpgbe_info rnpgbe_n210L_info = { + .total_queue_pair_cnts = RNPGBE_MAX_QUEUES, + .hw_type = rnpgbe_hw_n210L, + .get_invariants = &rnpgbe_get_invariants_n210, +}; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h new file mode 100644 index 000000000000..2c7372a5e88d --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_HW_H +#define _RNPGBE_HW_H +/* BAR */ +/* ----------------------------------------- */ +/* module | size | start | end */ +/* DMA | 32KB | 0_0000H | 0_7FFFH */ +/* ETH | 64KB | 1_0000H | 1_FFFFH */ +/* MAC | 32KB | 2_0000H | 2_7FFFH */ +/* MSIX | 32KB | 2_8000H | 2_FFFFH */ + +#define RNPGBE_RING_BASE (0x1000) +#define RNPGBE_MAC_BASE (0x20000) +#define RNPGBE_ETH_BASE (0x10000) +/* chip resourse */ +#define RNPGBE_MAX_QUEUES (8) +/* multicast control table */ +#define RNPGBE_MC_TBL_SIZE (128) +/* vlan filter table */ +#define RNPGBE_VFT_TBL_SIZE (128) +#define RNPGBE_RAR_ENTRIES (32) + +#define RNPGBE_MII_ADDR 0x00000010 /* MII Address */ +#define RNPGBE_MII_DATA 0x00000014 /* MII Data */ +#endif /* _RNPGBE_HW_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 13b49875006b..08f773199e9b 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -11,6 +11,11 @@ #include "rnpgbe.h" char rnpgbe_driver_name[] = "rnpgbe"; +static const struct rnpgbe_info *rnpgbe_info_tbl[] = { + [board_n500] = &rnpgbe_n500_info, + [board_n210] = &rnpgbe_n210_info, + [board_n210L] = &rnpgbe_n210L_info, +}; /* rnpgbe_pci_tbl - PCI Device ID Table * @@ -33,6 +38,7 @@ static struct pci_device_id rnpgbe_pci_tbl[] = { /** * rnpgbe_add_adapter - add netdev for this pci_dev * @pdev: PCI device information structure + * @ii: chip info structure * * rnpgbe_add_adapter initializes a netdev for this pci_dev * structure. Initializes Bar map, private structure, and a @@ -40,16 +46,24 @@ static struct pci_device_id rnpgbe_pci_tbl[] = { * * @return: 0 on success, negative on failure **/ -static int rnpgbe_add_adapter(struct pci_dev *pdev) +static int rnpgbe_add_adapter(struct pci_dev *pdev, + const struct rnpgbe_info *ii) { struct mucse *mucse = NULL; + struct mucse_hw *hw = NULL; + u8 __iomem *hw_addr = NULL; struct net_device *netdev; static int bd_number; + u32 dma_version = 0; + int err = 0; + u32 queues; - netdev = alloc_etherdev_mq(sizeof(struct mucse), 1); + queues = ii->total_queue_pair_cnts; + netdev = alloc_etherdev_mq(sizeof(struct mucse), queues); if (!netdev) return -ENOMEM; + SET_NETDEV_DEV(netdev, &pdev->dev); mucse = netdev_priv(netdev); mucse->netdev = netdev; mucse->pdev = pdev; @@ -58,7 +72,54 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev) rnpgbe_driver_name, mucse->bd_number); pci_set_drvdata(pdev, mucse); + hw = &mucse->hw; + hw->back = mucse; + hw->hw_type = ii->hw_type; + + switch (hw->hw_type) { + case rnpgbe_hw_n500: + /* n500 use bar2 */ + hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!hw_addr) { + dev_err(&pdev->dev, "map bar2 failed!\n"); + return -EIO; + } + + /* get dma version */ + dma_version = m_rd_reg(hw_addr); + break; + case rnpgbe_hw_n210: + case rnpgbe_hw_n210L: + /* check bar0 to load firmware */ + if (pci_resource_len(pdev, 0) == 0x100000) + return -EIO; + /* n210 use bar2 */ + hw_addr = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); + if (!hw_addr) { + dev_err(&pdev->dev, "map bar2 failed!\n"); + return -EIO; + } + + /* get dma version */ + dma_version = m_rd_reg(hw_addr); + break; + default: + err = -EIO; + goto err_free_net; + } + hw->hw_addr = hw_addr; + hw->dma.dma_version = dma_version; + ii->get_invariants(hw); + return 0; + +err_free_net: + free_netdev(netdev); + return err; } /** @@ -74,6 +135,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev) **/ static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + const struct rnpgbe_info *ii = rnpgbe_info_tbl[id->driver_data]; int err; err = pci_enable_device_mem(pdev); @@ -97,7 +159,7 @@ static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); pci_save_state(pdev); - err = rnpgbe_add_adapter(pdev); + err = rnpgbe_add_adapter(pdev, ii); if (err) goto err_regions; -- 2.25.1 Initialize basic mbx function. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/Makefile | 5 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 46 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 5 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 2 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 1 + .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c | 623 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h | 48 ++ 7 files changed, 727 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile index 42c359f459d9..41177103b50c 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/Makefile +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -5,5 +5,6 @@ # obj-$(CONFIG_MGBE) += rnpgbe.o -rnpgbe-objs := rnpgbe_main.o\ - rnpgbe_chip.o +rnpgbe-objs := rnpgbe_main.o \ + rnpgbe_chip.o \ + rnpgbe_mbx.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 2ae836fc8951..46e2bb2fe71e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -63,9 +63,51 @@ struct mucse_mac_info { int clk_csr; }; +struct mucse_hw; + +enum MBX_ID { + MBX_VF0 = 0, + MBX_VF1, + MBX_VF2, + MBX_VF3, + MBX_VF4, + MBX_VF5, + MBX_VF6, + MBX_VF7, + MBX_CM3CPU, + MBX_FW = MBX_CM3CPU, + MBX_VFCNT +}; + +struct mucse_mbx_operations { + void (*init_params)(struct mucse_hw *hw); + int (*read)(struct mucse_hw *hw, u32 *msg, + u16 size, enum MBX_ID id); + int (*write)(struct mucse_hw *hw, u32 *msg, + u16 size, enum MBX_ID id); + int (*read_posted)(struct mucse_hw *hw, u32 *msg, + u16 size, enum MBX_ID id); + int (*write_posted)(struct mucse_hw *hw, u32 *msg, + u16 size, enum MBX_ID id); + int (*check_for_msg)(struct mucse_hw *hw, enum MBX_ID id); + int (*check_for_ack)(struct mucse_hw *hw, enum MBX_ID id); + void (*configure)(struct mucse_hw *hw, int num_vec, + bool enable); +}; + +struct mucse_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + u32 acks; + u32 reqs; + u32 rsts; +}; + #define MAX_VF_NUM (8) struct mucse_mbx_info { + struct mucse_mbx_operations ops; + struct mucse_mbx_stats stats; u32 timeout; u32 usec_delay; u32 v2p_mailbox; @@ -99,6 +141,8 @@ struct mucse_mbx_info { int share_size; }; +#include "rnpgbe_mbx.h" + struct mucse_hw { void *back; u8 pfvfnum; @@ -110,6 +154,8 @@ struct mucse_hw { u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; + int max_vfs; + int max_vfs_noari; enum rnpgbe_hw_type hw_type; struct mucse_dma_info dma; struct mucse_eth_info eth; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index 38c094965db9..b0e5fda632f3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -6,6 +6,7 @@ #include "rnpgbe.h" #include "rnpgbe_hw.h" +#include "rnpgbe_mbx.h" /** * rnpgbe_get_invariants_n500 - setup for hw info @@ -67,7 +68,7 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) mbx->fw_pf_mbox_mask = 0x2e200; mbx->fw_vf_share_ram = 0x2b000; mbx->share_size = 512; - + memcpy(&hw->mbx.ops, &mucse_mbx_ops_generic, sizeof(hw->mbx.ops)); /* setup net feature here */ hw->feature_flags |= M_NET_FEATURE_SG | M_NET_FEATURE_TX_CHECKSUM | @@ -83,6 +84,7 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) M_NET_FEATURE_STAG_OFFLOAD; /* start the default ahz, update later */ hw->usecstocount = 125; + hw->max_vfs = 7; } /** @@ -117,6 +119,7 @@ static void rnpgbe_get_invariants_n210(struct mucse_hw *hw) /* update hw feature */ hw->feature_flags |= M_HW_FEATURE_EEE; hw->usecstocount = 62; + hw->max_vfs_noari = 7; } const struct rnpgbe_info rnpgbe_n500_info = { diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h index 2c7372a5e88d..ff7bd9b21550 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -14,6 +14,8 @@ #define RNPGBE_RING_BASE (0x1000) #define RNPGBE_MAC_BASE (0x20000) #define RNPGBE_ETH_BASE (0x10000) + +#define RNPGBE_DMA_DUMY (0x000c) /* chip resourse */ #define RNPGBE_MAX_QUEUES (8) /* multicast control table */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 08f773199e9b..1e8360cae560 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -114,6 +114,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, hw->hw_addr = hw_addr; hw->dma.dma_version = dma_version; ii->get_invariants(hw); + hw->mbx.ops.init_params(hw); return 0; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c new file mode 100644 index 000000000000..56ace3057fea --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2025 Mucse Corporation. */ + +#include +#include +#include +#include +#include "rnpgbe.h" +#include "rnpgbe_mbx.h" +#include "rnpgbe_hw.h" + +/** + * mucse_read_mbx - Reads a message from the mailbox + * @hw: Pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to read + * + * @return: 0 on success, negative on failure + **/ +int mucse_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (!mbx->ops.read) + return -EIO; + + return mbx->ops.read(hw, msg, size, mbx_id); +} + +/** + * mucse_write_mbx - Write a message to the mailbox + * @hw: Pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to write + * + * @return: 0 on success, negative on failure + **/ +int mucse_write_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + if (size > mbx->size) + return -EINVAL; + + if (!mbx->ops.write) + return -EIO; + + return mbx->ops.write(hw, msg, size, mbx_id); +} + +/** + * mucse_mbx_get_req - Read req from reg + * @hw: Pointer to the HW structure + * @reg: Register to read + * + * @return: the req value + **/ +static u16 mucse_mbx_get_req(struct mucse_hw *hw, int reg) +{ + /* force memory barrier */ + mb(); + return ioread32(hw->hw_addr + reg) & GENMASK(15, 0); +} + +/** + * mucse_mbx_get_ack - Read ack from reg + * @hw: Pointer to the HW structure + * @reg: Register to read + * + * @return: the ack value + **/ +static u16 mucse_mbx_get_ack(struct mucse_hw *hw, int reg) +{ + /* force memory barrier */ + mb(); + return (mbx_rd32(hw, reg) >> 16); +} + +/** + * mucse_mbx_inc_pf_req - Increase req + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to read + * + * mucse_mbx_inc_pf_req read pf_req from hw, then write + * new value back after increase + **/ +static void mucse_mbx_inc_pf_req(struct mucse_hw *hw, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg, v; + u16 req; + + reg = (mbx_id == MBX_FW) ? PF2FW_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + v = mbx_rd32(hw, reg); + req = (v & GENMASK(15, 0)); + req++; + v &= GENMASK(31, 16); + v |= req; + /* force before write to hw */ + mb(); + mbx_wr32(hw, reg, v); + /* update stats */ + hw->mbx.stats.msgs_tx++; +} + +/** + * mucse_mbx_inc_pf_ack - Increase ack + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to read + * + * mucse_mbx_inc_pf_ack read pf_ack from hw, then write + * new value back after increase + **/ +static void mucse_mbx_inc_pf_ack(struct mucse_hw *hw, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 reg, v; + u16 ack; + + reg = (mbx_id == MBX_FW) ? PF2FW_COUNTER(mbx) : + PF2VF_COUNTER(mbx, mbx_id); + v = mbx_rd32(hw, reg); + ack = (v >> 16) & GENMASK(15, 0); + ack++; + v &= GENMASK(15, 0); + v |= (ack << 16); + /* force before write to hw */ + mb(); + mbx_wr32(hw, reg, v); + /* update stats */ + hw->mbx.stats.msgs_rx++; +} + +/** + * mucse_check_for_msg - Checks to see if vf/fw sent us mail + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to check + * + * @return: 0 on success, negative on failure + **/ +int mucse_check_for_msg(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops.check_for_msg) + return -EIO; + + return mbx->ops.check_for_msg(hw, mbx_id); +} + +/** + * mucse_check_for_ack - Checks to see if vf/fw sent us ACK + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to check + * + * @return: 0 on success, negative on failure + **/ +int mucse_check_for_ack(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + if (!mbx->ops.check_for_ack) + return -EIO; + return mbx->ops.check_for_ack(hw, mbx_id); +} + +/** + * mucse_poll_for_msg - Wait for message notification + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to poll + * + * @return: 0 on success, negative on failure + **/ +static int mucse_poll_for_msg(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + int val; + + if (!countdown || !mbx->ops.check_for_msg) + return -EIO; + + return read_poll_timeout(mbx->ops.check_for_msg, + val, val == 0, mbx->usec_delay, + countdown * mbx->usec_delay, + false, hw, mbx_id); +} + +/** + * mucse_poll_for_ack - Wait for message acknowledgment + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to poll + * + * @return: 0 if it successfully received a message acknowledgment + **/ +static int mucse_poll_for_ack(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + int val; + + if (!countdown || !mbx->ops.check_for_ack) + return -EIO; + + return read_poll_timeout(mbx->ops.check_for_ack, + val, val == 0, mbx->usec_delay, + countdown * mbx->usec_delay, + false, hw, mbx_id); +} + +/** + * mucse_read_posted_mbx - Wait for message notification and receive message + * @hw: Pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to read + * + * @return: 0 if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static int mucse_read_posted_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int ret_val; + + if (!mbx->ops.read) + return -EIO; + + ret_val = mucse_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * mucse_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: Pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to write + * + * @return: 0 if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static int mucse_write_posted_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int ret_val; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + return -EIO; + + /* send msg and hold buffer lock */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = mucse_poll_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * mucse_check_for_msg_pf - checks to see if the vf/fw has sent mail + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to check + * + * @return: 0 if the vf/fw has set the Status bit or else + * -EIO + **/ +static int mucse_check_for_msg_pf(struct mucse_hw *hw, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u16 hw_req_count = 0; + int ret_val = -EIO; + + if (mbx_id == MBX_FW) { + hw_req_count = mucse_mbx_get_req(hw, FW2PF_COUNTER(mbx)); + /* reg in hw should avoid 0 check */ + if (mbx->mbx_feature & MBX_FEATURE_NO_ZERO) { + if (hw_req_count != 0 && + hw_req_count != hw->mbx.fw_req) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } else { + if (hw_req_count != hw->mbx.fw_req) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + } else { + if (mucse_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_req[mbx_id]) { + ret_val = 0; + hw->mbx.stats.reqs++; + } + } + + return ret_val; +} + +/** + * mucse_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: Pointer to the HW structure + * @mbx_id: Id of vf/fw to check + * + * @return: 0 if the vf/fw has set the Status bit or else + * -EIO + **/ +static int mucse_check_for_ack_pf(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int ret_val = -EIO; + u16 hw_fw_ack; + + if (mbx_id == MBX_FW) { + hw_fw_ack = mucse_mbx_get_ack(hw, FW2PF_COUNTER(mbx)); + if (hw_fw_ack != 0 && + hw_fw_ack != hw->mbx.fw_ack) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } else { + if (mucse_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)) != + hw->mbx.vf_ack[mbx_id]) { + ret_val = 0; + hw->mbx.stats.acks++; + } + } + + return ret_val; +} + +/** + * mucse_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @mbx_id: Id of vf/fw to obtain + * + * This function maybe used in an irq handler. + * + * @return: 0 if we obtained the mailbox lock + **/ +static int mucse_obtain_mbx_lock_pf(struct mucse_hw *hw, enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int try_cnt = 5000, ret; + u32 reg; + + reg = (mbx_id == MBX_FW) ? PF2FW_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + while (try_cnt-- > 0) { + /* Take ownership of the buffer */ + mbx_wr32(hw, reg, MBOX_PF_HOLD); + /* force write back before check */ + wmb(); + if (mbx_rd32(hw, reg) & MBOX_PF_HOLD) + return 0; + udelay(100); + } + return ret; +} + +/** + * mucse_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to write + * + * This function maybe used in an irq handler. + * + * @return: 0 if it successfully copied message into the buffer + **/ +static int mucse_write_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 data_reg, ctrl_reg; + int ret_val = 0; + u16 i; + + data_reg = (mbx_id == MBX_FW) ? FW_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + ctrl_reg = (mbx_id == MBX_FW) ? PF2FW_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + if (size > MUCSE_VFMAILBOX_SIZE) + return -EINVAL; + + /* lock the mailbox to prevent pf/vf/fw race condition */ + ret_val = mucse_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_write; + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + mbx_wr32(hw, data_reg + i * 4, msg[i]); + + /* flush msg and acks as we are overwriting the message buffer */ + if (mbx_id == MBX_FW) { + hw->mbx.fw_ack = mucse_mbx_get_ack(hw, FW2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_ack[mbx_id] = + mucse_mbx_get_ack(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + mucse_mbx_inc_pf_req(hw, mbx_id); + + /* Interrupt VF/FW to tell it a message + * has been sent and release buffer + */ + if (mbx->mbx_feature & MBX_FEATURE_WRITE_DELAY) + udelay(300); + mbx_wr32(hw, ctrl_reg, MBOX_CTRL_REQ); + +out_no_write: + return ret_val; +} + +/** + * mucse_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: Id of vf/fw to read + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a vf/fw request so no polling for message is needed. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_read_mbx_pf(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + u32 data_reg, ctrl_reg; + int ret_val; + u32 i; + + data_reg = (mbx_id == MBX_FW) ? FW_PF_SHM_DATA(mbx) : + PF_VF_SHM_DATA(mbx, mbx_id); + ctrl_reg = (mbx_id == MBX_FW) ? PF2FW_MBOX_CTRL(mbx) : + PF2VF_MBOX_CTRL(mbx, mbx_id); + + if (size > MUCSE_VFMAILBOX_SIZE) + return -EINVAL; + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = mucse_obtain_mbx_lock_pf(hw, mbx_id); + if (ret_val) + goto out_no_read; + + /* we need this */ + mb(); + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = mbx_rd32(hw, data_reg + 4 * i); + mbx_wr32(hw, data_reg, 0); + + /* update req */ + if (mbx_id == MBX_FW) { + hw->mbx.fw_req = mucse_mbx_get_req(hw, FW2PF_COUNTER(mbx)); + } else { + hw->mbx.vf_req[mbx_id] = + mucse_mbx_get_req(hw, VF2PF_COUNTER(mbx, mbx_id)); + } + /* Acknowledge receipt and release mailbox, then we're done */ + mucse_mbx_inc_pf_ack(hw, mbx_id); + /* free ownership of the buffer */ + mbx_wr32(hw, ctrl_reg, 0); + +out_no_read: + return ret_val; +} + +/** + * mucse_mbx_reset - reset mbx info, sync info from regs + * @hw: Pointer to the HW structure + * + * This function reset all mbx variables to default. + **/ +static void mucse_mbx_reset(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int idx, v; + + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & GENMASK(15, 0); + hw->mbx.vf_ack[idx] = (v >> 16) & GENMASK(15, 0); + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + v = mbx_rd32(hw, FW2PF_COUNTER(mbx)); + hw->mbx.fw_req = v & GENMASK(15, 0); + hw->mbx.fw_ack = (v >> 16) & GENMASK(15, 0); + + mbx_wr32(hw, PF2FW_MBOX_CTRL(mbx), 0); + + if (PF_VF_MBOX_MASK_LO(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); + if (PF_VF_MBOX_MASK_HI(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + + mbx_wr32(hw, FW_PF_MBOX_MASK(mbx), GENMASK(31, 16)); +} + +/** + * mucse_mbx_configure_pf - configure mbx to use nr_vec interrupt + * @hw: Pointer to the HW structure + * @nr_vec: Vector number for mbx + * @enable: TRUE for enable, FALSE for disable + * + * This function configure mbx to use interrupt nr_vec. + **/ +static void mucse_mbx_configure_pf(struct mucse_hw *hw, int nr_vec, + bool enable) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + int idx = 0; + u32 v; + + if (enable) { + for (idx = 0; idx < hw->max_vfs; idx++) { + v = mbx_rd32(hw, VF2PF_COUNTER(mbx, idx)); + hw->mbx.vf_req[idx] = v & GENMASK(15, 0); + hw->mbx.vf_ack[idx] = (v >> 16) & GENMASK(15, 0); + + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + } + v = mbx_rd32(hw, FW2PF_COUNTER(mbx)); + hw->mbx.fw_req = v & GENMASK(15, 0); + hw->mbx.fw_ack = (v >> 16) & GENMASK(15, 0); + mbx_wr32(hw, PF2FW_MBOX_CTRL(mbx), 0); + + for (idx = 0; idx < hw->max_vfs; idx++) { + /* vf to pf req interrupt */ + mbx_wr32(hw, VF2PF_MBOX_VEC(mbx, idx), + nr_vec); + } + + if (PF_VF_MBOX_MASK_LO(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), 0); + /* allow vf to vectors */ + + if (PF_VF_MBOX_MASK_HI(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), 0); + /* enable irq */ + /* bind fw mbx to irq */ + mbx_wr32(hw, FW2PF_MBOX_VEC(mbx), nr_vec); + /* allow CM3FW to PF MBX IRQ */ + mbx_wr32(hw, FW_PF_MBOX_MASK(mbx), GENMASK(31, 16)); + } else { + if (PF_VF_MBOX_MASK_LO(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_LO(mbx), + GENMASK(31, 0)); + /* disable irq */ + if (PF_VF_MBOX_MASK_HI(mbx)) + mbx_wr32(hw, PF_VF_MBOX_MASK_HI(mbx), + GENMASK(31, 0)); + + /* disable CM3FW to PF MBX IRQ */ + mbx_wr32(hw, FW_PF_MBOX_MASK(mbx), 0xfffffffe); + + /* reset vf->pf status/ctrl */ + for (idx = 0; idx < hw->max_vfs; idx++) + mbx_wr32(hw, PF2VF_MBOX_CTRL(mbx, idx), 0); + /* reset pf->cm3 ctrl */ + mbx_wr32(hw, PF2FW_MBOX_CTRL(mbx), 0); + /* used to sync link status */ + mbx_wr32(hw, RNPGBE_DMA_DUMY, 0); + } +} + +/** + * mucse_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +static void mucse_init_mbx_params_pf(struct mucse_hw *hw) +{ + struct mucse_mbx_info *mbx = &hw->mbx; + + mbx->usec_delay = 100; + mbx->timeout = (4 * 1000 * 1000) / mbx->usec_delay; + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + mbx->size = MUCSE_VFMAILBOX_SIZE; + + mutex_init(&mbx->lock); + mucse_mbx_reset(hw); +} + +struct mucse_mbx_operations mucse_mbx_ops_generic = { + .init_params = mucse_init_mbx_params_pf, + .read = mucse_read_mbx_pf, + .write = mucse_write_mbx_pf, + .read_posted = mucse_read_posted_mbx, + .write_posted = mucse_write_posted_mbx, + .check_for_msg = mucse_check_for_msg_pf, + .check_for_ack = mucse_check_for_ack_pf, + .configure = mucse_mbx_configure_pf, +}; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h new file mode 100644 index 000000000000..0b4183e53e61 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_H +#define _RNPGBE_MBX_H + +#include "rnpgbe.h" + +/* 14 words */ +#define MUCSE_VFMAILBOX_SIZE 14 +/* ================ PF <--> VF mailbox ================ */ +#define SHARE_MEM_BYTES 64 +static inline u32 PF_VF_SHM(struct mucse_mbx_info *mbx, int vf) +{ + return mbx->pf_vf_shm_base + mbx->mbx_mem_size * vf; +} + +#define PF2VF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 0) +#define VF2PF_COUNTER(mbx, vf) (PF_VF_SHM(mbx, vf) + 4) +#define PF_VF_SHM_DATA(mbx, vf) (PF_VF_SHM(mbx, vf) + 8) +#define VF2PF_MBOX_VEC(mbx, vf) ((mbx)->vf2pf_mbox_vec_base + 4 * (vf)) +#define PF2VF_MBOX_CTRL(mbx, vf) ((mbx)->pf2vf_mbox_ctrl_base + 4 * (vf)) +#define PF_VF_MBOX_MASK_LO(mbx) ((mbx)->pf_vf_mbox_mask_lo) +#define PF_VF_MBOX_MASK_HI(mbx) ((mbx)->pf_vf_mbox_mask_hi) +/* ================ PF <--> FW mailbox ================ */ +#define FW_PF_SHM(mbx) ((mbx)->fw_pf_shm_base) +#define FW2PF_COUNTER(mbx) (FW_PF_SHM(mbx) + 0) +#define PF2FW_COUNTER(mbx) (FW_PF_SHM(mbx) + 4) +#define FW_PF_SHM_DATA(mbx) (FW_PF_SHM(mbx) + 8) +#define FW2PF_MBOX_VEC(mbx) ((mbx)->fw2pf_mbox_vec) +#define PF2FW_MBOX_CTRL(mbx) ((mbx)->pf2fw_mbox_ctrl) +#define FW_PF_MBOX_MASK(mbx) ((mbx)->fw_pf_mbox_mask) +#define MBOX_CTRL_REQ BIT(0) /* WO */ +#define MBOX_PF_HOLD (BIT(3)) /* VF:RO, PF:WR */ +#define MBOX_IRQ_EN 0 +#define MBOX_IRQ_DISABLE 1 +#define mbx_rd32(hw, reg) m_rd_reg((hw)->hw_addr + (reg)) +#define mbx_wr32(hw, reg, val) m_wr_reg((hw)->hw_addr + (reg), (val)) + +extern struct mucse_mbx_operations mucse_mbx_ops_generic; + +int mucse_read_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id); +int mucse_write_mbx(struct mucse_hw *hw, u32 *msg, u16 size, + enum MBX_ID mbx_id); +int mucse_check_for_msg(struct mucse_hw *hw, enum MBX_ID mbx_id); +int mucse_check_for_ack(struct mucse_hw *hw, enum MBX_ID mbx_id); +#endif /* _RNPGBE_MBX_H */ -- 2.25.1 Initialize get hw capability from mbx_fw ops. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/Makefile | 3 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 8 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 8 + .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 140 +++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 568 ++++++++++++++++++ 5 files changed, 726 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile index 41177103b50c..fd455cb111a9 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/Makefile +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -7,4 +7,5 @@ obj-$(CONFIG_MGBE) += rnpgbe.o rnpgbe-objs := rnpgbe_main.o \ rnpgbe_chip.o \ - rnpgbe_mbx.o + rnpgbe_mbx.o \ + rnpgbe_mbx_fw.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 46e2bb2fe71e..4514bc1223c1 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -154,6 +154,14 @@ struct mucse_hw { u16 vendor_id; u16 subsystem_device_id; u16 subsystem_vendor_id; + u32 wol; + u32 wol_en; + u32 fw_version; + u32 axi_mhz; + u32 bd_uid; + int ncsi_en; + int force_en; + int force_cap; int max_vfs; int max_vfs_noari; enum rnpgbe_hw_type hw_type; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 1e8360cae560..aeb560145c47 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -9,6 +9,7 @@ #include #include "rnpgbe.h" +#include "rnpgbe_mbx_fw.h" char rnpgbe_driver_name[] = "rnpgbe"; static const struct rnpgbe_info *rnpgbe_info_tbl[] = { @@ -116,6 +117,13 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, ii->get_invariants(hw); hw->mbx.ops.init_params(hw); + err = mucse_mbx_get_capability(hw); + if (err) { + dev_err(&pdev->dev, + "mucse_mbx_get_capability failed!\n"); + goto err_free_net; + } + return 0; err_free_net: diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c new file mode 100644 index 000000000000..1674229fcd43 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include + +#include "rnpgbe_mbx_fw.h" + +/** + * mucse_fw_send_cmd_wait - Send cmd req and wait for response + * @hw: Pointer to the HW structure + * @req: Pointer to the cmd req structure + * @reply: Pointer to the fw reply structure + * + * mucse_fw_send_cmd_wait sends req to pf-fw mailbox and wait + * reply from fw. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_fw_send_cmd_wait(struct mucse_hw *hw, + struct mbx_fw_cmd_req *req, + struct mbx_fw_cmd_reply *reply) +{ + int len = le32_to_cpu(req->datalen) + MBX_REQ_HDR_LEN; + int retry_cnt = 3; + int err; + + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + + err = hw->mbx.ops.write_posted(hw, (u32 *)req, + L_WD(len), + MBX_FW); + if (err) { + mutex_unlock(&hw->mbx.lock); + return err; + } + +retry: + retry_cnt--; + if (retry_cnt < 0) + return -EIO; + + err = hw->mbx.ops.read_posted(hw, (u32 *)reply, + L_WD(sizeof(*reply)), + MBX_FW); + if (err) { + mutex_unlock(&hw->mbx.lock); + return err; + } + + if (reply->opcode != req->opcode) + goto retry; + + mutex_unlock(&hw->mbx.lock); + + if (reply->error_code) + return -EIO; + + return 0; +} + +/** + * mucse_fw_get_capability - Get hw abilities from fw + * @hw: Pointer to the HW structure + * @abil: Pointer to the hw_abilities structure + * + * mucse_fw_get_capability tries to get hw abilities from + * hw. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_fw_get_capability(struct mucse_hw *hw, + struct hw_abilities *abil) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err = 0; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + build_phy_abalities_req(&req, &req); + err = mucse_fw_send_cmd_wait(hw, &req, &reply); + if (err == 0) + memcpy(abil, &reply.hw_abilities, sizeof(*abil)); + + return err; +} + +/** + * mucse_mbx_get_capability - Get hw abilities from fw + * @hw: Pointer to the HW structure + * + * mucse_mbx_get_capability tries to some capabities from + * hw. Many retrys will do if it is failed. + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_get_capability(struct mucse_hw *hw) +{ + struct hw_abilities ability; + int try_cnt = 3; + int err = 0; + + memset(&ability, 0, sizeof(ability)); + + while (try_cnt--) { + err = mucse_fw_get_capability(hw, &ability); + if (err == 0) { + u16 nic_mode = le16_to_cpu(ability.nic_mode); + u32 wol = le32_to_cpu(ability.wol_status); + + hw->ncsi_en = (nic_mode & 0x4) ? 1 : 0; + hw->pfvfnum = le16_to_cpu(ability.pfnum); + hw->fw_version = le32_to_cpu(ability.fw_version); + hw->axi_mhz = le32_to_cpu(ability.axi_mhz); + hw->bd_uid = le32_to_cpu(ability.bd_uid); + + if (hw->fw_version >= 0x0001012C) { + /* this version can get wol_en from hw */ + hw->wol = wol & 0xff; + hw->wol_en = wol & 0x100; + } else { + /* other version only pf0 or ncsi can wol */ + hw->wol = wol & 0xff; + if (hw->ncsi_en || !hw->pfvfnum) + hw->wol_en = 1; + } + /* 0.1.5.0 can get force status from fw */ + if (hw->fw_version >= 0x00010500) { + ability_update_host_endian(&ability); + hw->force_en = ability.e_host.force_down_en; + hw->force_cap = 1; + } + return 0; + } + } + + return err; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h new file mode 100644 index 000000000000..a24c5d4e0075 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -0,0 +1,568 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_MBX_FW_H +#define _RNPGBE_MBX_FW_H + +#include +#include +#include + +#include "rnpgbe.h" + +#define MBX_REQ_HDR_LEN 24 +#define L_WD(x) ((x) / 4) + +struct mbx_fw_cmd_reply; +typedef void (*cookie_cb)(struct mbx_fw_cmd_reply *reply, void *priv); + +struct mbx_req_cookie { + int magic; +#define COOKIE_MAGIC 0xCE + cookie_cb cb; + int timeout_jiffes; + int errcode; + wait_queue_head_t wait; + int done; + int priv_len; + char priv[]; +}; + +enum MUCSE_FW_CMD { + GET_VERSION = 0x0001, + READ_REG = 0xFF03, + WRITE_REG = 0xFF04, + MODIFY_REG = 0xFF07, + IFUP_DOWN = 0x0800, + SEND_TO_PF = 0x0801, + SEND_TO_VF = 0x0802, + DRIVER_INSMOD = 0x0803, + SYSTEM_SUSPUSE = 0x0804, + SYSTEM_FORCE = 0x0805, + GET_PHY_ABALITY = 0x0601, + GET_MAC_ADDRES = 0x0602, + RESET_PHY = 0x0603, + LED_SET = 0x0604, + GET_LINK_STATUS = 0x0607, + LINK_STATUS_EVENT = 0x0608, + SET_LANE_FUN = 0x0609, + GET_LANE_STATUS = 0x0610, + SFP_SPEED_CHANGED_EVENT = 0x0611, + SET_EVENT_MASK = 0x0613, + SET_LOOPBACK_MODE = 0x0618, + SET_PHY_REG = 0x0628, + GET_PHY_REG = 0x0629, + PHY_LINK_SET = 0x0630, + GET_PHY_STATISTICS = 0x0631, + PHY_PAUSE_SET = 0x0632, + PHY_PAUSE_GET = 0x0633, + PHY_EEE_SET = 0x0636, + PHY_EEE_GET = 0x0637, + SFP_MODULE_READ = 0x0900, + SFP_MODULE_WRITE = 0x0901, + FW_UPDATE = 0x0700, + FW_MAINTAIN = 0x0701, + FW_UPDATE_GBE = 0x0702, + WOL_EN = 0x0910, + GET_DUMP = 0x0a00, + SET_DUMP = 0x0a10, + GET_TEMP = 0x0a11, + SET_WOL = 0x0a12, + SET_TEST_MODE = 0x0a13, + SHOW_TX_STAMP = 0x0a14, + LLDP_TX_CTRL = 0x0a15, +}; + +struct hw_abilities { + u8 link_stat; + u8 lane_mask; + __le32 speed; + __le16 phy_type; + __le16 nic_mode; + __le16 pfnum; + __le32 fw_version; + __le32 axi_mhz; + union { + u8 port_id[4]; + __le32 port_ids; + }; + __le32 bd_uid; + __le32 phy_id; + __le32 wol_status; + union { + __le32 ext_ability; + struct { + __le32 valid : 1; /* 0 */ + __le32 wol_en : 1; /* 1 */ + __le32 pci_preset_runtime_en : 1; /* 2 */ + __le32 smbus_en : 1; /* 3 */ + __le32 ncsi_en : 1; /* 4 */ + __le32 rpu_en : 1; /* 5 */ + __le32 v2 : 1; /* 6 */ + __le32 pxe_en : 1; /* 7 */ + __le32 mctp_en : 1; /* 8 */ + __le32 yt8614 : 1; /* 9 */ + __le32 pci_ext_reset : 1; /* 10 */ + __le32 rpu_availble : 1; /* 11 */ + __le32 fw_lldp_ability : 1; /* 12 */ + __le32 lldp_enabled : 1; /* 13 */ + __le32 only_1g : 1; /* 14 */ + __le32 force_down_en: 1; /* 15 */ + } e; + struct { + u32 valid : 1; /* 0 */ + u32 wol_en : 1; /* 1 */ + u32 pci_preset_runtime_en : 1; /* 2 */ + u32 smbus_en : 1; /* 3 */ + u32 ncsi_en : 1; /* 4 */ + u32 rpu_en : 1; /* 5 */ + u32 v2 : 1; /* 6 */ + u32 pxe_en : 1; /* 7 */ + u32 mctp_en : 1; /* 8 */ + u32 yt8614 : 1; /* 9 */ + u32 pci_ext_reset : 1; /* 10 */ + u32 rpu_availble : 1; /* 11 */ + u32 fw_lldp_ability : 1; /* 12 */ + u32 lldp_enabled : 1; /* 13 */ + u32 only_1g : 1; /* 14 */ + u32 force_down_en: 1; /* 15 */ + } e_host; + }; +} __packed; + +static inline void ability_update_host_endian(struct hw_abilities *abi) +{ + u32 host_val = le32_to_cpu(abi->ext_ability); + + abi->e_host = *(typeof(abi->e_host) *)&host_val; +} + +struct phy_pause_data { + u32 pause_mode; +}; + +struct lane_stat_data { + u8 nr_lane; + u8 pci_gen : 4; + u8 pci_lanes : 4; + u8 pma_type; + u8 phy_type; + __le16 linkup : 1; + __le16 duplex : 1; + __le16 autoneg : 1; + __le16 fec : 1; + __le16 an : 1; + __le16 link_traing : 1; + __le16 media_availble : 1; + __le16 is_sgmii : 1; + __le16 link_fault : 4; +#define LINK_LINK_FAULT BIT(0) +#define LINK_TX_FAULT BIT(1) +#define LINK_RX_FAULT BIT(2) +#define LINK_REMOTE_FAULT BIT(3) + __le16 is_backplane : 1; + __le16 tp_mdx : 2; + union { + u8 phy_addr; + struct { + u8 mod_abs : 1; + u8 fault : 1; + u8 tx_dis : 1; + u8 los : 1; + } sfp; + }; + u8 sfp_connector; + __le32 speed; + __le32 si_main; + __le32 si_pre; + __le32 si_post; + __le32 si_tx_boost; + __le32 supported_link; + __le32 phy_id; + __le32 advertised_link; +} __packed; + +struct yt_phy_statistics { + __le32 pkg_ib_valid; /* rx crc good and length 64-1518 */ + __le32 pkg_ib_os_good; /* rx crc good and length >1518 */ + __le32 pkg_ib_us_good; /* rx crc good and length <64 */ + __le16 pkg_ib_err; /* rx crc wrong and length 64-1518 */ + __le16 pkg_ib_os_bad; /* rx crc wrong and length >1518 */ + __le16 pkg_ib_frag; /* rx crc wrong and length <64 */ + __le16 pkg_ib_nosfd; /* rx sfd missed */ + __le32 pkg_ob_valid; /* tx crc good and length 64-1518 */ + __le32 pkg_ob_os_good; /* tx crc good and length >1518 */ + __le32 pkg_ob_us_good; /* tx crc good and length <64 */ + __le16 pkg_ob_err; /* tx crc wrong and length 64-1518 */ + __le16 pkg_ob_os_bad; /* tx crc wrong and length >1518 */ + __le16 pkg_ob_frag; /* tx crc wrong and length <64 */ + __le16 pkg_ob_nosfd; /* tx sfd missed */ +} __packed; + +struct phy_statistics { + union { + struct yt_phy_statistics yt; + }; +} __packed; + +struct port_stat { + u8 phyid; + u8 duplex : 1; + u8 autoneg : 1; + u8 fec : 1; + __le16 speed; + union { + __le16 stat; + struct { + __le16 pause : 4; + __le16 local_eee : 3; + __le16 partner_eee : 3; + __le16 tp_mdx : 2; + __le16 lldp_status : 1; + __le16 revs : 3; + } v; + struct { + u16 pause : 4; + u16 local_eee : 3; + u16 partner_eee : 3; + u16 tp_mdx : 2; + u16 lldp_status : 1; + u16 revs : 3; + } v_host; + }; +} __packed; + +#define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */ +/* driver clear 0, FW must set only if it reporting an error */ +#define FLAGS_ERR BIT(2) + +/* req is little endian. bigendian should be conserened */ +struct mbx_fw_cmd_req { + __le16 flags; /* 0-1 */ + __le16 opcode; /* 2-3 enum GENERIC_CMD */ + __le16 datalen; /* 4-5 */ + __le16 ret_value; /* 6-7 */ + union { + struct { + __le32 cookie_lo; /* 8-11 */ + __le32 cookie_hi; /* 12-15 */ + }; + + void *cookie; + }; + __le32 reply_lo; /* 16-19 5dw */ + __le32 reply_hi; /* 20-23 */ + union { + u8 data[32]; + struct { + __le32 addr; + __le32 bytes; + } r_reg; + + struct { + __le32 addr; + __le32 bytes; + __le32 data[4]; + } w_reg; + + struct { + __le32 lanes; + } ptp; + + struct { + __le32 lane; + __le32 up; + } ifup; + + struct { + __le32 sec; + __le32 nanosec; + + } tstamps; + + struct { + __le32 lane; + __le32 status; + } ifinsmod; + + struct { + __le32 lane; + __le32 status; + } ifforce; + + struct { + __le32 lane; + __le32 status; + } ifsuspuse; + + struct { + __le32 nr_lane; + } get_lane_st; + + struct { + __le32 nr_lane; + __le32 func; +#define LANE_FUN_AN 0 +#define LANE_FUN_LINK_TRAING 1 +#define LANE_FUN_FEC 2 +#define LANE_FUN_SI 3 +#define LANE_FUN_SFP_TX_DISABLE 4 +#define LANE_FUN_PCI_LANE 5 +#define LANE_FUN_PRBS 6 +#define LANE_FUN_SPEED_CHANGE 7 + __le32 value0; + __le32 value1; + __le32 value2; + __le32 value3; + } set_lane_fun; + + struct { + __le32 flag; + __le32 nr_lane; + } set_dump; + + struct { + __le32 lane; + __le32 enable; + } wol; + + struct { + __le32 lane; + __le32 mode; + } gephy_test; + + struct { + __le32 lane; + __le32 op; + __le32 enable; + __le32 inteval; + } lldp_tx; + + struct { + __le32 bytes; + __le32 nr_lane; + __le32 bin_offset; + __le32 no_use; + } get_dump; + + struct { + __le32 nr_lane; + __le32 value; +#define LED_IDENTIFY_INACTIVE 0 +#define LED_IDENTIFY_ACTIVE 1 +#define LED_IDENTIFY_ON 2 +#define LED_IDENTIFY_OFF 3 + } led_set; + + struct { + __le32 addr; + __le32 data; + __le32 mask; + } modify_reg; + + struct { + __le32 adv_speed_mask; + __le32 autoneg; + __le32 speed; + __le32 duplex; + __le32 nr_lane; + __le32 tp_mdix_ctrl; + } phy_link_set; + + struct { + __le32 pause_mode; + __le32 nr_lane; + } phy_pause_set; + + struct { + __le32 pause_mode; + __le32 nr_lane; + } phy_pause_get; + + struct { + __le32 local_eee; + __le32 tx_lpi_timer; + __le32 nr_lane; + } phy_eee_set; + + struct { + __le32 nr_lane; + __le32 sfp_adr; /* 0xa0 or 0xa2 */ + __le32 reg; + __le32 cnt; + } sfp_read; + + struct { + __le32 nr_lane; + __le32 sfp_adr; /* 0xa0 or 0xa2 */ + __le32 reg; + __le32 val; + } sfp_write; + + struct { + __le32 nr_lane; /* 0-3 */ + } get_linkstat; + + struct { + __le16 changed_lanes; + __le16 lane_status; + __le32 port_st_magic; +#define SPEED_VALID_MAGIC 0xa4a6a8a9 + struct port_stat st[4]; + } link_stat; /* FW->RC */ + + struct { + __le16 enable_stat; + __le16 event_mask; + } stat_event_mask; + + struct { + __le32 cmd; + __le32 arg0; + __le32 req_bytes; + __le32 reply_bytes; + __le32 ddr_lo; + __le32 ddr_hi; + } maintain; + + struct { /* set phy register */ + u8 phy_interface; + union { + u8 page_num; + u8 external_phy_addr; + }; + __le32 phy_reg_addr; + __le32 phy_w_data; + __le32 reg_addr; + __le32 w_data; + /* 1 = ignore page_num, use last QSFP */ + u8 recall_qsfp_page : 1; + /* page value */ + /* 0 = use page_num for QSFP */ + u8 nr_lane; + } set_phy_reg; + + struct { + __le32 lane_mask; + __le32 pfvf_num; + } get_mac_addr; + + struct { + u8 phy_interface; + union { + u8 page_num; + u8 external_phy_addr; + }; + __le32 phy_reg_addr; + u8 nr_lane; + } get_phy_reg; + + struct { + __le32 nr_lane; + } phy_statistics; + + struct { + u8 paration; + __le32 bytes; + __le32 bin_phy_lo; + __le32 bin_phy_hi; + } fw_update; + }; +} __packed; + +#define EEE_1000BT BIT(2) +#define EEE_100BT BIT(1) + +struct rnpgbe_eee_cap { + __le32 local_capability; + __le32 local_eee; + __le32 partner_eee; +}; + +/* firmware -> driver */ +struct mbx_fw_cmd_reply { + /* fw must set: DD, CMP, Error(if error), copy value */ + __le16 flags; + /* from command: LB,RD,VFC,BUF,SI,EI,FE */ + __le16 opcode; /* 2-3: copy from req */ + __le16 error_code; /* 4-5: 0 if no error */ + __le16 datalen; /* 6-7: */ + union { + struct { + __le32 cookie_lo; /* 8-11: */ + __le32 cookie_hi; /* 12-15: */ + }; + void *cookie; + }; + /* ===== data ==== [16-64] */ + union { + u8 data[40]; + + struct version { + __le32 major; + __le32 sub; + __le32 modify; + } version; + + struct { + __le32 value[4]; + } r_reg; + + struct { + __le32 new_value; + } modify_reg; + + struct get_temp { + __le32 temp; + __le32 volatage; + } get_temp; + + struct { +#define MBX_SFP_READ_MAX_CNT 32 + u8 value[MBX_SFP_READ_MAX_CNT]; + } sfp_read; + + struct mac_addr { + __le32 lanes; + struct _addr { + /* + * for macaddr:01:02:03:04:05:06 + * mac-hi=0x01020304 mac-lo=0x05060000 + */ + u8 mac[8]; + } addrs[4]; + } mac_addr; + + struct get_dump_reply { + __le32 flags; + __le32 version; + __le32 bytes; + __le32 data[4]; + } get_dump; + + struct get_lldp_reply { + __le32 value; + __le32 inteval; + } get_lldp; + + struct rnpgbe_eee_cap phy_eee_abilities; + struct lane_stat_data lanestat; + struct hw_abilities hw_abilities; + struct phy_statistics phy_statistics; + }; +} __packed; + +static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req, + void *cookie) +{ + req->flags = 0; + req->opcode = cpu_to_le32(GET_PHY_ABALITY); + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + +int mucse_mbx_get_capability(struct mucse_hw *hw); + +#endif /* _RNPGBE_MBX_FW_H */ -- 2.25.1 Initialize download fw function for n210 series. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/Kconfig | 1 + drivers/net/ethernet/mucse/rnpgbe/Makefile | 3 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 4 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 18 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c | 476 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h | 30 ++ 6 files changed, 529 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h diff --git a/drivers/net/ethernet/mucse/Kconfig b/drivers/net/ethernet/mucse/Kconfig index be0fdf268484..8a0262a70036 100644 --- a/drivers/net/ethernet/mucse/Kconfig +++ b/drivers/net/ethernet/mucse/Kconfig @@ -20,6 +20,7 @@ config MGBE tristate "Mucse(R) 1GbE PCI Express adapters support" depends on PCI select PAGE_POOL + select NET_DEVLINK help This driver supports Mucse(R) 1GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile index fd455cb111a9..db7d3a8140b2 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/Makefile +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -8,4 +8,5 @@ obj-$(CONFIG_MGBE) += rnpgbe.o rnpgbe-objs := rnpgbe_main.o \ rnpgbe_chip.o \ rnpgbe_mbx.o \ - rnpgbe_mbx_fw.o + rnpgbe_mbx_fw.o \ + rnpgbe_sfc.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 4514bc1223c1..ea28236669e3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -6,6 +6,7 @@ #include #include +#include extern const struct rnpgbe_info rnpgbe_n500_info; extern const struct rnpgbe_info rnpgbe_n210_info; @@ -195,9 +196,12 @@ struct mucse_hw { struct mucse { struct net_device *netdev; struct pci_dev *pdev; + struct devlink *dl; struct mucse_hw hw; /* board number */ u16 bd_number; + u32 flags2; +#define M_FLAG2_NO_NET_REG BIT(0) char name[60]; }; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index aeb560145c47..61dd0d232d99 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -7,9 +7,11 @@ #include #include #include +#include #include "rnpgbe.h" #include "rnpgbe_mbx_fw.h" +#include "rnpgbe_sfc.h" char rnpgbe_driver_name[] = "rnpgbe"; static const struct rnpgbe_info *rnpgbe_info_tbl[] = { @@ -76,6 +78,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, hw = &mucse->hw; hw->back = mucse; hw->hw_type = ii->hw_type; + hw->pdev = pdev; switch (hw->hw_type) { case rnpgbe_hw_n500: @@ -94,8 +97,18 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, case rnpgbe_hw_n210: case rnpgbe_hw_n210L: /* check bar0 to load firmware */ - if (pci_resource_len(pdev, 0) == 0x100000) - return -EIO; + if (pci_resource_len(pdev, 0) == 0x100000) { + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + if (!hw->hw_addr) { + dev_err(&pdev->dev, "map bar0 failed!\n"); + return -EIO; + } + rnpgbe_devlink_register(mucse); + mucse->flags2 |= M_FLAG2_NO_NET_REG; + return 0; + } /* n210 use bar2 */ hw_addr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 2), @@ -191,6 +204,7 @@ static void rnpgbe_rm_adapter(struct mucse *mucse) { struct net_device *netdev; + rnpgbe_devlink_unregister(mucse); netdev = mucse->netdev; free_netdev(netdev); } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c new file mode 100644 index 000000000000..91a637b3ac19 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2022 - 2025 Mucse Corporation. */ + +#include +#include +#include + +#include "rnpgbe_sfc.h" +#include "rnpgbe.h" + +/** + * mucse_sfc_command - Write sfc cmd to hw and wait ok + * @hw_addr: bar addr for sfc controller + * @cmd: sfc command + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_command(u8 __iomem *hw_addr, u32 cmd) +{ + int val; + + iowrite32(cmd, (hw_addr + 0x8)); + iowrite32(1, (hw_addr + 0x0)); + + return read_poll_timeout(ioread32, val, !val, + 100, 10000, true, + hw_addr); +} + +/** + * mucse_sfc_flash_write_disable - Enable flash cmd protect + * @hw_addr: bar addr for sfc controller + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_flash_write_disable(u8 __iomem *hw_addr) +{ + iowrite32(CMD_CYCLE(8), (hw_addr + 0x10)); + iowrite32(WR_DATA_CYCLE(0), (hw_addr + 0x14)); + + return mucse_sfc_command(hw_addr, CMD_WRITE_DISABLE); +} + +/** + * mucse_sfc_flash_wait_idle - Wait sfc controller idle + * @hw_addr: bar addr for sfc controller + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_flash_wait_idle(u8 __iomem *hw_addr) +{ + int try_count = 100; + int err, val; + + iowrite32(CMD_CYCLE(8), (hw_addr + 0x10)); + iowrite32(RD_DATA_CYCLE(8), (hw_addr + 0x14)); + +try: + try_count--; + err = mucse_sfc_command(hw_addr, CMD_READ_STATUS); + if (err && try_count) + goto try; + err = read_poll_timeout(ioread32, val, !(val & 0x1), + 100, 1000, true, + hw_addr + 0x4); + if (err && try_count) + goto try; + + return err; +} + +/** + * mucse_sfc_flash_write_enable - Disable flash cmd protect + * @hw_addr: bar addr for sfc controller + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_flash_write_enable(u8 __iomem *hw_addr) +{ + iowrite32(CMD_CYCLE(8), (hw_addr + 0x10)); + iowrite32(0x1f, (hw_addr + 0x18)); + iowrite32(0x100000, (hw_addr + 0x14)); + + return mucse_sfc_command(hw_addr, CMD_WRITE_ENABLE); +} + +/** + * mucse_sfc_flash_erase_sector - Erase flash sector + * @hw_addr: bar addr for sfc controller + * @address: sector start address + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_flash_erase_sector(u8 __iomem *hw_addr, + u32 address) +{ + int err; + + if (address >= RSP_FLASH_HIGH_16M_OFFSET) + return -EINVAL; + + if (address % 4096) + return -EINVAL; + + err = mucse_sfc_flash_write_enable(hw_addr); + if (err) + return err; + iowrite32((CMD_CYCLE(8) | ADDR_CYCLE(24)), (hw_addr + 0x10)); + iowrite32((RD_DATA_CYCLE(0) | WR_DATA_CYCLE(0)), (hw_addr + 0x14)); + iowrite32(SFCADDR(address), (hw_addr + 0xc)); + err = mucse_sfc_command(hw_addr, CMD_SECTOR_ERASE); + if (err) + return err; + err = mucse_sfc_flash_wait_idle(hw_addr); + if (err) + return err; + err = mucse_sfc_flash_write_disable(hw_addr); + + return err; +} + +/** + * mucse_sfc_write_protect - set flash write protect off + * @hw: Pointer to the HW structure + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_write_protect(struct mucse_hw *hw) +{ + int err; + + err = mucse_sfc_flash_write_enable(hw->hw_addr); + if (err) + return err; + + iowrite32(CMD_CYCLE(8), (hw->hw_addr + 0x10)); + iowrite32(WR_DATA_CYCLE(8), (hw->hw_addr + 0x14)); + iowrite32(0, (hw->hw_addr + 0x04)); + err = mucse_sfc_command(hw->hw_addr, CMD_WRITE_STATUS); + + return err; +} + +/** + * mucse_sfc_flash_erase - Erase flash + * @hw: Pointer to the HW structure + * @size: Data length + * + * mucse_sfc_flash_erase tries to erase sfc_flash + * + * @return: 0 on success, negative on failure + **/ +static int mucse_sfc_flash_erase(struct mucse_hw *hw, u32 size) +{ + u32 addr = SFC_MEM_BASE; + u32 page_size = 0x1000; + u32 i = 0; + int err; + + size = ((size + (page_size - 1)) / page_size) * page_size; + addr = addr - SFC_MEM_BASE; + + if (size == 0) + return -EINVAL; + + if ((addr + size) > RSP_FLASH_HIGH_16M_OFFSET) + return -EINVAL; + + if (addr % page_size) + return -EINVAL; + + if (size % page_size) + return -EINVAL; + /* skip some info */ + for (i = 0; i < size; i += page_size) { + if (i >= 0x1f000 && i < 0x20000) + continue; + + err = mucse_sfc_flash_erase_sector(hw->hw_addr, (addr + i)); + if (err) + break; + } + + return err; +} + +/** + * mucse_download_firmware - Download data to chip + * @hw: Pointer to the HW structure + * @data: Data to use + * @file_size: Data length + * + * mucse_download_firmware tries to download data to white-chip + * by hw_addr regs. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_download_firmware(struct mucse_hw *hw, const u8 *data, + int file_size) +{ + char *buf = kzalloc(0x1000, GFP_KERNEL); + loff_t end_pos = file_size; + u32 rd_len = 0x1000; + loff_t old_pos = 0; + u32 old_data = 0; + u32 new_data = 0; + int get_len = 0; + u32 fw_off = 0; + loff_t pos = 0; + u32 iter = 0; + int err = 0; + + if (!buf) + return -ENOMEM; + /* copy bin to bar */ + while (pos < end_pos) { + /* we must skip header 4k */ + if ((pos >= 0x1f000 && pos < 0x20000) || pos == 0) { + pos += rd_len; + continue; + } + + old_pos = pos; + if (end_pos - pos < rd_len) + get_len = end_pos - pos; + else + get_len = rd_len; + + memcpy(buf, data + pos, get_len); + if ((get_len < rd_len && ((old_pos + get_len) != end_pos)) || + get_len < 0) { + err = -EIO; + goto out; + } + + for (iter = 0; iter < get_len; iter += 4) { + old_data = *((u32 *)(buf + iter)); + fw_off = (u32)old_pos + iter + 0x1000; + iowrite32(old_data, (hw->hw_addr + fw_off)); + } + + if (pos == old_pos) + pos += get_len; + } + /* write first 4k header */ + pos = 0; + old_pos = pos; + get_len = rd_len; + memcpy(buf, data + pos, get_len); + + for (iter = 0; iter < get_len; iter += 4) { + old_data = *((u32 *)(buf + iter)); + fw_off = (u32)old_pos + iter + 0x1000; + iowrite32(old_data, (hw->hw_addr + fw_off)); + } + /* check */ + pos = 0x0; + while (pos < end_pos) { + if (pos >= 0x1f000 && pos < 0x20000) { + pos += rd_len; + continue; + } + + old_pos = pos; + if (end_pos - pos < rd_len) + get_len = end_pos - pos; + else + get_len = rd_len; + + memcpy(buf, data + pos, get_len); + if ((get_len < rd_len && ((old_pos + get_len) != end_pos)) || + get_len < 0) { + err = -EIO; + goto out; + } + + for (iter = 0; iter < get_len; iter += 4) { + old_data = *((u32 *)(buf + iter)); + fw_off = (u32)old_pos + iter + 0x1000; + new_data = ioread32(hw->hw_addr + fw_off); + if (old_data != new_data) + err = -EIO; + } + + if (pos == old_pos) + pos += get_len; + } +out: + kfree(buf); + return err; +} + +/** + * rnpgbe_check_fw_from_flash - Check chip-id and bin-id + * @hw: Pointer to the HW structure + * @data: data from bin files + * + * rnpgbe_check_fw_from_flash tries to match chip-id and bin-id + * + * @return: 0 on mactch, negative on failure + **/ +static int rnpgbe_check_fw_from_flash(struct mucse_hw *hw, const u8 *data) +{ + enum rnpgbe_hw_type hw_type = rnpgbe_hw_unknow; + u32 device_id; + u32 chip_data; + int ret = 0; + +#define RNPGBE_BIN_HEADER (0xa55aa55a) + if (*((u32 *)(data)) != RNPGBE_BIN_HEADER) + return -EINVAL; + + device_id = *((u16 *)data + 30); + + /* if no device_id no check */ + if (device_id == 0 || device_id == 0xffff) + return 0; + +#define CHIP_OFFSET (0x1f014 + 0x1000) +#define CHIP_N210_FLAG (0x11111111) + /* we should get hw_type from sfc-flash */ + chip_data = ioread32(hw->hw_addr + CHIP_OFFSET); + if (chip_data == CHIP_N210_FLAG) + hw_type = rnpgbe_hw_n210; + else if (chip_data == 0x0) + hw_type = rnpgbe_hw_n210L; + + switch (hw_type) { + case rnpgbe_hw_n210: + if (device_id != 0x8208) + ret = -EINVAL; + break; + case rnpgbe_hw_n210L: + if (device_id != 0x820a) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +/** + * init_firmware_for_n210 - download firmware + * @hw: Pointer to the HW structure + * @fw: pointer to the firmware + * + * init_firmware_for_n210 try to download firmware + * for n210, by bar0(hw->hw_addr). + * + * @return: 0 on success, negative on failure + **/ +static int init_firmware_for_n210(struct mucse_hw *hw, + const struct firmware *fw) +{ + struct pci_dev *pdev = hw->pdev; + int err = 0; + + if (rnpgbe_check_fw_from_flash(hw, fw->data)) { + dev_err(&pdev->dev, "firmware type error\n"); + return -EINVAL; + } + /* first protect off */ + err = mucse_sfc_write_protect(hw); + if (err) { + dev_err(&pdev->dev, "protect off command failed!"); + goto out; + } + + err = mucse_sfc_flash_erase(hw, fw->size); + if (err) { + dev_err(&pdev->dev, "erase flash failed!"); + goto out; + } + + err = mucse_download_firmware(hw, fw->data, fw->size); + if (err) { + dev_err(&pdev->dev, "init firmware failed!"); + goto out; + } + +out: + return err; +} + +/** + * rnpgbe_dl_info_get - return card fw info + * @dl: devlink structure + * @req: devlink info req + * @extack: extack info + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_dl_info_get(struct devlink *dl, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + int err; + + err = devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + "NULL"); + + return err; +} + +/** + * rnpgbe_dl_flash_update - Update fw to chip flash + * @dl: devlink structure + * @params: flash update params + * @extack: extack info + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_dl_flash_update(struct devlink *dl, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct rnpgbe_devlink *rnpgbe_devlink = devlink_priv(dl); + struct mucse *mucse = rnpgbe_devlink->priv; + struct mucse_hw *hw = &mucse->hw; + int err; + + err = init_firmware_for_n210(hw, params->fw); + if (err) { + devlink_flash_update_status_notify(dl, + "Flash failed", + NULL, 0, 0); + } else { + devlink_flash_update_status_notify(dl, + "Flash done", + NULL, 0, 0); + } + return err; +} + +static const struct devlink_ops rnpgbe_dl_ops = { + .info_get = rnpgbe_dl_info_get, + .flash_update = rnpgbe_dl_flash_update, +}; + +/** + * rnpgbe_devlink_register - Regist devlink + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +int rnpgbe_devlink_register(struct mucse *mucse) +{ + struct device *dev = &mucse->pdev->dev; + struct rnpgbe_devlink *rnpgbe_devlink; + struct devlink *dl; + + dl = devlink_alloc(&rnpgbe_dl_ops, sizeof(struct rnpgbe_devlink), dev); + if (!dl) + return -EIO; + mucse->dl = dl; + rnpgbe_devlink = devlink_priv(dl); + rnpgbe_devlink->priv = mucse; + + devlink_register(dl); + return 0; +} + +/** + * rnpgbe_devlink_unregister - remove devlink + * @mucse: pointer to private structure + **/ +void rnpgbe_devlink_unregister(struct mucse *mucse) +{ + if (!mucse->dl) + return; + devlink_unregister(mucse->dl); + devlink_free(mucse->dl); +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h new file mode 100644 index 000000000000..523a873d65c8 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_sfc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2022 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_SFC_H +#define _RNPGBE_SFC_H + +#include "rnpgbe.h" + +/* Return value */ +#define RSP_FLASH_HIGH_16M_OFFSET 0x1000000 +#define SFC_MEM_BASE 0x28000000 +#define CMD_WRITE_DISABLE 0x04000000 +#define CMD_READ_STATUS 0x05000000 +#define CMD_WRITE_STATUS 0x01000000 +#define CMD_WRITE_ENABLE 0x06000000 +#define CMD_SECTOR_ERASE 0x20000000 +#define SFCADDR(a) ((a) << 8) +#define CMD_CYCLE(c) (((c) & 0xff) << 0) +#define RD_DATA_CYCLE(c) (((c) & 0xff) << 8) +#define WR_DATA_CYCLE(c) (((c) & 0xff) << 0) +#define ADDR_CYCLE(c) (((c) & 0xff) << 16) + +struct rnpgbe_devlink { + struct mucse *priv; +}; + +int rnpgbe_devlink_register(struct mucse *mucse); +void rnpgbe_devlink_unregister(struct mucse *mucse); + +#endif /* _RNPGBE_SFC_H */ -- 2.25.1 Initialize functions (init, reset ...) to control chip. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 35 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 107 ++++++++ drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 15 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 28 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 255 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 74 +++++ 6 files changed, 513 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index ea28236669e3..527091e6a680 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -144,6 +144,25 @@ struct mucse_mbx_info { #include "rnpgbe_mbx.h" +struct lldp_status { + int enable; + int inteval; +}; + +struct mucse_hw_operations { + int (*init_hw)(struct mucse_hw *hw); + int (*reset_hw)(struct mucse_hw *hw); + void (*start_hw)(struct mucse_hw *hw); + /* ops to fw */ + void (*driver_status)(struct mucse_hw *hw, bool enable, int mode); +}; + +enum { + mucse_driver_insmod, + mucse_driver_suspuse, + mucse_driver_force_control_phy, +}; + struct mucse_hw { void *back; u8 pfvfnum; @@ -166,6 +185,7 @@ struct mucse_hw { int max_vfs; int max_vfs_noari; enum rnpgbe_hw_type hw_type; + struct mucse_hw_operations ops; struct mucse_dma_info dma; struct mucse_eth_info eth; struct mucse_mac_info mac; @@ -190,7 +210,11 @@ struct mucse_hw { #define M_HW_FEATURE_EEE BIT(17) #define M_HW_SOFT_MASK_OTHER_IRQ BIT(18) u32 feature_flags; + u32 driver_version; u16 usecstocount; + int nr_lane; + struct lldp_status lldp_status; + int link; }; struct mucse { @@ -225,5 +249,16 @@ struct rnpgbe_info { #define m_rd_reg(reg) readl(reg) #define m_wr_reg(reg, val) writel((val), reg) +#define hw_wr32(hw, reg, val) m_wr_reg((hw)->hw_addr + (reg), (val)) +#define dma_wr32(dma, reg, val) m_wr_reg((dma)->dma_base_addr + (reg), (val)) +#define dma_rd32(dma, reg) m_rd_reg((dma)->dma_base_addr + (reg)) +#define eth_wr32(eth, reg, val) m_wr_reg((eth)->eth_base_addr + (reg), (val)) +#define eth_rd32(eth, reg) m_rd_reg((eth)->eth_base_addr + (reg)) + +#define mucse_err(mucse, fmt, arg...) \ + dev_err(&(mucse)->pdev->dev, fmt, ##arg) + +#define mucse_dbg(mucse, fmt, arg...) \ + dev_dbg(&(mucse)->pdev->dev, fmt, ##arg) #endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index b0e5fda632f3..7a162b844fe4 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -7,6 +7,111 @@ #include "rnpgbe.h" #include "rnpgbe_hw.h" #include "rnpgbe_mbx.h" +#include "rnpgbe_mbx_fw.h" + +/** + * rnpgbe_init_hw_ops_n500 - Init hardware + * @hw: hw information structure + * + * rnpgbe_init_hw_ops_n500 first do a hw reset, then + * tries to start hw + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_init_hw_ops_n500(struct mucse_hw *hw) +{ + int status = 0; + /* Reset the hardware */ + status = hw->ops.reset_hw(hw); + if (status == 0) + hw->ops.start_hw(hw); + + return status; +} + +/** + * rnpgbe_reset_hw_ops_n500 - Do a hardware reset + * @hw: hw information structure + * + * rnpgbe_reset_hw_ops_n500 calls fw to do a hardware + * reset, and cleans some regs to default. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_reset_hw_ops_n500(struct mucse_hw *hw) +{ + struct mucse_dma_info *dma = &hw->dma; + struct mucse_eth_info *eth = &hw->eth; + int err; + int i; + /* Call hw to stop dma */ + dma_wr32(dma, RNPGBE_DMA_AXI_EN, 0); + err = mucse_mbx_fw_reset_phy(hw); + if (err) + return err; + eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, + RNPGBE_PKT_LEN_ERR | RNPGBE_HDR_LEN_ERR); + dma_wr32(dma, RNPGBE_DMA_RX_PROG_FULL_THRESH, 0xa); + for (i = 0; i < 12; i++) + m_wr_reg(hw->ring_msix_base + RING_VECTOR(i), 0); + + hw->link = 0; + + return 0; +} + +/** + * rnpgbe_start_hw_ops_n500 - Setup hw to start + * @hw: hw information structure + * + * rnpgbe_start_hw_ops_n500 initializes default + * hw status, ready to start. + * + **/ +static void rnpgbe_start_hw_ops_n500(struct mucse_hw *hw) +{ + struct mucse_eth_info *eth = &hw->eth; + struct mucse_dma_info *dma = &hw->dma; + u32 value; + + value = dma_rd32(dma, RNPGBE_DMA_DUMY); + value |= BIT(0); + dma_wr32(dma, RNPGBE_DMA_DUMY, value); + dma_wr32(dma, RNPGBE_DMA_CONFIG, DMA_VEB_BYPASS); + dma_wr32(dma, RNPGBE_DMA_AXI_EN, (RX_AXI_RW_EN | TX_AXI_RW_EN)); + eth_wr32(eth, RNPGBE_ETH_BYPASS, 0); + eth_wr32(eth, RNPGBE_ETH_DEFAULT_RX_RING, 0); +} + +/** + * rnpgbe_driver_status_hw_ops_n500 - Echo driver status to hw + * @hw: hw information structure + * @enable: true or false status + * @mode: status mode + **/ +static void rnpgbe_driver_status_hw_ops_n500(struct mucse_hw *hw, + bool enable, + int mode) +{ + switch (mode) { + case mucse_driver_insmod: + mucse_mbx_ifinsmod(hw, enable); + break; + case mucse_driver_suspuse: + mucse_mbx_ifsuspuse(hw, enable); + break; + case mucse_driver_force_control_phy: + mucse_mbx_ifforce_control_mac(hw, enable); + break; + } +} + +static struct mucse_hw_operations hw_ops_n500 = { + .init_hw = &rnpgbe_init_hw_ops_n500, + .reset_hw = &rnpgbe_reset_hw_ops_n500, + .start_hw = &rnpgbe_start_hw_ops_n500, + .driver_status = &rnpgbe_driver_status_hw_ops_n500, +}; /** * rnpgbe_get_invariants_n500 - setup for hw info @@ -84,7 +189,9 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) M_NET_FEATURE_STAG_OFFLOAD; /* start the default ahz, update later */ hw->usecstocount = 125; + hw->max_vfs_noari = 1; hw->max_vfs = 7; + memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops)); } /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h index ff7bd9b21550..35e3cb77a38b 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -14,8 +14,21 @@ #define RNPGBE_RING_BASE (0x1000) #define RNPGBE_MAC_BASE (0x20000) #define RNPGBE_ETH_BASE (0x10000) - +/* dma regs */ +#define DMA_VEB_BYPASS BIT(4) +#define RNPGBE_DMA_CONFIG (0x0004) #define RNPGBE_DMA_DUMY (0x000c) +#define RNPGBE_DMA_AXI_EN (0x0010) +#define RX_AXI_RW_EN (0x03 << 0) +#define TX_AXI_RW_EN (0x03 << 2) +#define RNPGBE_DMA_RX_PROG_FULL_THRESH (0x00a0) +#define RING_VECTOR(n) (0x04 * (n)) +/* eth regs */ +#define RNPGBE_ETH_BYPASS (0x8000) +#define RNPGBE_ETH_ERR_MASK_VECTOR (0x8060) +#define RNPGBE_ETH_DEFAULT_RX_RING (0x806c) +#define RNPGBE_PKT_LEN_ERR (2) +#define RNPGBE_HDR_LEN_ERR (1) /* chip resourse */ #define RNPGBE_MAX_QUEUES (8) /* multicast control table */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 61dd0d232d99..ba21e3858c0e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -38,6 +38,17 @@ static struct pci_device_id rnpgbe_pci_tbl[] = { {0, }, }; +/** + * rnpgbe_sw_init - Init driver private status + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_sw_init(struct mucse *mucse) +{ + return 0; +} + /** * rnpgbe_add_adapter - add netdev for this pci_dev * @pdev: PCI device information structure @@ -127,8 +138,12 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, } hw->hw_addr = hw_addr; hw->dma.dma_version = dma_version; + hw->driver_version = 0x0002040f; + hw->nr_lane = 0; ii->get_invariants(hw); hw->mbx.ops.init_params(hw); + /* echo fw driver insmod */ + hw->ops.driver_status(hw, true, mucse_driver_insmod); err = mucse_mbx_get_capability(hw); if (err) { @@ -137,6 +152,16 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, goto err_free_net; } + err = rnpgbe_sw_init(mucse); + if (err) + goto err_free_net; + + err = hw->ops.reset_hw(hw); + if (err) { + dev_err(&pdev->dev, "Hw reset failed\n"); + goto err_free_net; + } + return 0; err_free_net: @@ -202,11 +227,14 @@ static int rnpgbe_probe(struct pci_dev *pdev, const struct pci_device_id *id) **/ static void rnpgbe_rm_adapter(struct mucse *mucse) { + struct mucse_hw *hw = &mucse->hw; struct net_device *netdev; rnpgbe_devlink_unregister(mucse); netdev = mucse->netdev; + hw->ops.driver_status(hw, false, mucse_driver_insmod); free_netdev(netdev); + mucse->netdev = NULL; } /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c index 1674229fcd43..18f57ef8b1ad 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -3,6 +3,7 @@ #include +#include "rnpgbe.h" #include "rnpgbe_mbx_fw.h" /** @@ -138,3 +139,257 @@ int mucse_mbx_get_capability(struct mucse_hw *hw) return err; } + +/** + * mbx_req_cookie - Alloc a cookie structure + * @priv_len: private length for this cookie + * + * @return: cookie structure on success + **/ +static struct mbx_req_cookie *mbx_cookie_zalloc(int priv_len) +{ + struct mbx_req_cookie *cookie; + + cookie = kzalloc(struct_size(cookie, priv, priv_len), GFP_KERNEL); + + if (cookie) { + cookie->timeout_jiffes = 30 * HZ; + cookie->magic = COOKIE_MAGIC; + cookie->priv_len = priv_len; + } + + return cookie; +} + +/** + * mucse_mbx_fw_post_req - Posts a mbx req to firmware and wait reply + * @hw: Pointer to the HW structure + * @req: Pointer to the cmd req structure + * @cookie: Pointer to the req cookie + * + * mucse_mbx_fw_post_req posts a mbx req to firmware and wait for the + * reply. cookie->wait will be set in irq handler. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_mbx_fw_post_req(struct mucse_hw *hw, + struct mbx_fw_cmd_req *req, + struct mbx_req_cookie *cookie) +{ + int len = le32_to_cpu(req->datalen) + MBX_REQ_HDR_LEN; + int err = 0; + + cookie->errcode = 0; + cookie->done = 0; + init_waitqueue_head(&cookie->wait); + + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + + err = mucse_write_mbx(hw, (u32 *)req, + L_WD(len), + MBX_FW); + if (err) { + mutex_unlock(&hw->mbx.lock); + return err; + } + + if (cookie->timeout_jiffes != 0) { +retry: + err = wait_event_interruptible_timeout(cookie->wait, + cookie->done == 1, + cookie->timeout_jiffes); + if (err == -ERESTARTSYS) + goto retry; + if (err == 0) + err = -ETIME; + else + err = 0; + } else { +retry_no_timeout: + err = wait_event_interruptible(cookie->wait, cookie->done == 1); + if (err == -ERESTARTSYS) + goto retry_no_timeout; + } + + mutex_unlock(&hw->mbx.lock); + + if (cookie->errcode) + err = cookie->errcode; + + return err; +} + +/** + * rnpgbe_mbx_lldp_get - Get lldp status from hw + * @hw: Pointer to the HW structure + * + * @return: 0 on success, negative on failure + **/ +int rnpgbe_mbx_lldp_get(struct mucse_hw *hw) +{ + struct mbx_req_cookie *cookie = NULL; + struct get_lldp_reply *get_lldp; + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + cookie = mbx_cookie_zalloc(sizeof(*get_lldp)); + if (!cookie) + return -ENOMEM; + + get_lldp = (struct get_lldp_reply *)cookie->priv; + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + build_get_lldp_req(&req, cookie, hw->nr_lane); + if (hw->mbx.irq_enabled) { + err = mucse_mbx_fw_post_req(hw, &req, cookie); + } else { + err = mucse_fw_send_cmd_wait(hw, &req, &reply); + get_lldp = &reply.get_lldp; + } + + if (err == 0) { + hw->lldp_status.enable = le32_to_cpu(get_lldp->value); + hw->lldp_status.inteval = le32_to_cpu(get_lldp->inteval); + } + + kfree(cookie); + + return err; +} + +/** + * mucse_mbx_ifinsmod - Echo driver insmod status to hw + * @hw: Pointer to the HW structure + * @status: true for insmod, false for rmmod + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_ifinsmod(struct mucse_hw *hw, int status) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int len; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + build_ifinsmod(&req, hw->driver_version, status); + len = le32_to_cpu(req.datalen) + MBX_REQ_HDR_LEN; + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + + if (status) { + err = hw->mbx.ops.write_posted(hw, (u32 *)&req, + L_WD(len), + MBX_FW); + } else { + err = hw->mbx.ops.write(hw, (u32 *)&req, + L_WD(len), + MBX_FW); + } + + mutex_unlock(&hw->mbx.lock); + return err; +} + +/** + * mucse_mbx_ifsuspuse - Echo driver suspuse status to hw + * @hw: Pointer to the HW structure + * @status: true for suspuse, false for no susupuse + * + * mucse_mbx_ifsuspuse echo driver susupus status to hw. The + * status is used to enter wol status for hw. + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_ifsuspuse(struct mucse_hw *hw, int status) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int len; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + build_ifsuspuse(&req, hw->nr_lane, status); + len = le32_to_cpu(req.datalen) + MBX_REQ_HDR_LEN; + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + + err = hw->mbx.ops.write_posted(hw, (u32 *)&req, + L_WD(len), + MBX_FW); + mutex_unlock(&hw->mbx.lock); + + return err; +} + +/** + * mucse_mbx_ifforce_control_mac - Echo force mac control to hw + * @hw: Pointer to the HW structure + * @status: true for force control, false for not + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_ifforce_control_mac(struct mucse_hw *hw, int status) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int len; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + build_ifforce(&req, hw->nr_lane, status); + len = le32_to_cpu(req.datalen) + MBX_REQ_HDR_LEN; + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + + err = hw->mbx.ops.write_posted(hw, (u32 *)&req, + L_WD(len), + MBX_FW); + mutex_unlock(&hw->mbx.lock); + + return err; +} + +/** + * mucse_mbx_fw_reset_phy - Posts a mbx req to reset hw + * @hw: Pointer to the HW structure + * + * mucse_mbx_fw_reset_phy posts a mbx req to firmware to reset hw. + * It uses mucse_fw_send_cmd_wait if no irq, and mucse_mbx_fw_post_req + * if other irq is registered. + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_fw_reset_phy(struct mucse_hw *hw) +{ + struct mbx_fw_cmd_req req; + struct mbx_fw_cmd_reply reply; + int ret; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + if (hw->mbx.irq_enabled) { + struct mbx_req_cookie *cookie = mbx_cookie_zalloc(0); + + if (!cookie) + return -ENOMEM; + + build_reset_phy_req(&req, cookie); + ret = mucse_mbx_fw_post_req(hw, &req, cookie); + kfree(cookie); + return ret; + + } else { + build_reset_phy_req(&req, &req); + return mucse_fw_send_cmd_wait(hw, &req, &reply); + } +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h index a24c5d4e0075..9e07858f2733 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -563,6 +563,80 @@ static inline void build_phy_abalities_req(struct mbx_fw_cmd_req *req, req->cookie = cookie; } +static inline void build_get_lldp_req(struct mbx_fw_cmd_req *req, void *cookie, + int nr_lane) +{ +#define LLDP_TX_GET (1) + + req->flags = 0; + req->opcode = cpu_to_le32(LLDP_TX_CTRL); + req->datalen = cpu_to_le32(sizeof(req->lldp_tx)); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->lldp_tx.lane = cpu_to_le32(nr_lane); + req->lldp_tx.op = cpu_to_le32(LLDP_TX_GET); + req->lldp_tx.enable = 0; +} + +static inline void build_ifinsmod(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, + int status) +{ + req->flags = 0; + req->opcode = cpu_to_le32(DRIVER_INSMOD); + req->datalen = cpu_to_le32(sizeof(req->ifinsmod)); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = cpu_to_le32(nr_lane); + req->ifinsmod.status = cpu_to_le32(status); +} + +static inline void build_ifsuspuse(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, + int status) +{ + req->flags = 0; + req->opcode = cpu_to_le32(SYSTEM_SUSPUSE); + req->datalen = cpu_to_le32(sizeof(req->ifsuspuse)); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifinsmod.lane = cpu_to_le32(nr_lane); + req->ifinsmod.status = cpu_to_le32(status); +} + +static inline void build_ifforce(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, + int status) +{ + req->flags = 0; + req->opcode = cpu_to_le32(SYSTEM_FORCE); + req->datalen = cpu_to_le32(sizeof(req->ifforce)); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifforce.lane = cpu_to_le32(nr_lane); + req->ifforce.status = cpu_to_le32(status); +} + +static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, + void *cookie) +{ + req->flags = 0; + req->opcode = cpu_to_le32(RESET_PHY); + req->datalen = 0; + req->reply_lo = 0; + req->reply_hi = 0; + req->cookie = cookie; +} + int mucse_mbx_get_capability(struct mucse_hw *hw); +int rnpgbe_mbx_lldp_get(struct mucse_hw *hw); +int mucse_mbx_ifinsmod(struct mucse_hw *hw, int status); +int mucse_mbx_ifsuspuse(struct mucse_hw *hw, int status); +int mucse_mbx_ifforce_control_mac(struct mucse_hw *hw, int status); +int mucse_mbx_fw_reset_phy(struct mucse_hw *hw); #endif /* _RNPGBE_MBX_FW_H */ -- 2.25.1 Initialize gets mac function for driver use. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 61 ++++++- .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 149 ++++++++++++++++++ drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 9 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 29 ++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 63 ++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 17 ++ 6 files changed, 326 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 527091e6a680..30b5400241c3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -34,8 +34,18 @@ struct mucse_dma_info { u32 dma_version; }; +struct mucse_eth_info; + +struct mucse_eth_operations { + int (*get_mac_addr)(struct mucse_eth_info *eth, u8 *addr); + int (*set_rar)(struct mucse_eth_info *eth, u32 index, u8 *addr); + int (*clear_rar)(struct mucse_eth_info *eth, u32 index); + void (*clr_mc_addr)(struct mucse_eth_info *eth); +}; + #define RNPGBE_MAX_MTA 128 struct mucse_eth_info { + struct mucse_eth_operations ops; u8 __iomem *eth_base_addr; void *back; u32 mta_shadow[RNPGBE_MAX_MTA]; @@ -64,6 +74,13 @@ struct mucse_mac_info { int clk_csr; }; +struct mucse_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + bool user_set_promisc; +}; + struct mucse_hw; enum MBX_ID { @@ -153,6 +170,7 @@ struct mucse_hw_operations { int (*init_hw)(struct mucse_hw *hw); int (*reset_hw)(struct mucse_hw *hw); void (*start_hw)(struct mucse_hw *hw); + void (*init_rx_addrs)(struct mucse_hw *hw); /* ops to fw */ void (*driver_status)(struct mucse_hw *hw, bool enable, int mode); }; @@ -176,6 +194,10 @@ struct mucse_hw { u16 subsystem_vendor_id; u32 wol; u32 wol_en; + u16 min_len_cap; + u16 max_len_cap; + u16 min_len_cur; + u16 max_len_cur; u32 fw_version; u32 axi_mhz; u32 bd_uid; @@ -209,12 +231,29 @@ struct mucse_hw { #define M_VEB_VLAN_MASK_EN BIT(16) #define M_HW_FEATURE_EEE BIT(17) #define M_HW_SOFT_MASK_OTHER_IRQ BIT(18) + struct mucse_addr_filter_info addr_ctrl; u32 feature_flags; + u32 flags; +#define M_FLAGS_INIT_MAC_ADDRESS BIT(27) u32 driver_version; u16 usecstocount; + u16 max_msix_vectors; int nr_lane; struct lldp_status lldp_status; int link; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; +}; + +enum mucse_state_t { + __MUCSE_DOWN, + __MUCSE_SERVICE_SCHED, + __MUCSE_PTP_TX_IN_PROGRESS, + __MUCSE_USE_VFINFI, + __MUCSE_IN_IRQ, + __MUCSE_REMOVE, + __MUCSE_SERVICE_CHECK, + __MUCSE_EEE_REMOVE, }; struct mucse { @@ -224,9 +263,20 @@ struct mucse { struct mucse_hw hw; /* board number */ u16 bd_number; + u16 tx_work_limit; u32 flags2; #define M_FLAG2_NO_NET_REG BIT(0) - + u32 priv_flags; +#define M_PRIV_FLAG_TX_COALESCE BIT(25) +#define M_PRIV_FLAG_RX_COALESCE BIT(26) + int tx_ring_item_count; + int rx_ring_item_count; + int napi_budge; + u16 rx_usecs; + u16 rx_frames; + u16 tx_frames; + u16 tx_usecs; + unsigned long state; char name[60]; }; @@ -247,6 +297,15 @@ struct rnpgbe_info { #define PCI_DEVICE_ID_N210 0x8208 #define PCI_DEVICE_ID_N210L 0x820a +#define M_DEFAULT_TXD (512) +#define M_DEFAULT_TX_WORK (128) +#define M_PKT_TIMEOUT_TX (200) +#define M_TX_PKT_POLL_BUDGET (0x30) + +#define M_DEFAULT_RXD (512) +#define M_PKT_TIMEOUT (30) +#define M_RX_PKT_POLL_BUDGET (64) + #define m_rd_reg(reg) readl(reg) #define m_wr_reg(reg, val) writel((val), reg) #define hw_wr32(hw, reg, val) m_wr_reg((hw)->hw_addr + (reg), (val)) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index 7a162b844fe4..fc179eb8c516 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -3,12 +3,94 @@ #include #include +#include #include "rnpgbe.h" #include "rnpgbe_hw.h" #include "rnpgbe_mbx.h" #include "rnpgbe_mbx_fw.h" +/** + * rnpgbe_eth_set_rar_n500 - Set Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * + * rnpgbe_eth_set_rar_n500 puts an ethernet address + * into a receive address register. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_eth_set_rar_n500(struct mucse_eth_info *eth, + u32 index, u8 *addr) +{ + u32 rar_entries = eth->num_rar_entries; + u32 rar_low, rar_high = 0; + u32 mcstctrl; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) + return -EINVAL; + + rar_low = ((u32)addr[5] | ((u32)addr[4] << 8) | + ((u32)addr[3] << 16) | + ((u32)addr[2] << 24)); + rar_high |= ((u32)addr[1] | ((u32)addr[0] << 8)); + rar_high |= RNPGBE_RAH_AV; + + eth_wr32(eth, RNPGBE_ETH_RAR_RL(index), rar_low); + eth_wr32(eth, RNPGBE_ETH_RAR_RH(index), rar_high); + /* open unicast filter */ + mcstctrl = eth_rd32(eth, RNPGBE_ETH_DMAC_MCSTCTRL); + mcstctrl |= RNPGBE_MCSTCTRL_UNICASE_TBL_EN; + eth_wr32(eth, RNPGBE_ETH_DMAC_MCSTCTRL, mcstctrl); + + return 0; +} + +/** + * rnpgbe_eth_clear_rar_n500 - Remove Rx address register + * @eth: pointer to eth structure + * @index: Receive address register to write + * + * rnpgbe_eth_clear_rar_n500 clears an ethernet address + * from a receive address register. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_eth_clear_rar_n500(struct mucse_eth_info *eth, + u32 index) +{ + u32 rar_entries = eth->num_rar_entries; + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) + return -EINVAL; + + eth_wr32(eth, RNPGBE_ETH_RAR_RL(index), 0); + eth_wr32(eth, RNPGBE_ETH_RAR_RH(index), 0); + + return 0; +} + +/** + * rnpgbe_eth_clr_mc_addr_n500 - clear all multicast table + * @eth: pointer to eth structure + **/ +static void rnpgbe_eth_clr_mc_addr_n500(struct mucse_eth_info *eth) +{ + int i; + + for (i = 0; i < eth->mcft_size; i++) + eth_wr32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(i), 0); +} + +static struct mucse_eth_operations eth_ops_n500 = { + .set_rar = &rnpgbe_eth_set_rar_n500, + .clear_rar = &rnpgbe_eth_clear_rar_n500, + .clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500 +}; + /** * rnpgbe_init_hw_ops_n500 - Init hardware * @hw: hw information structure @@ -29,6 +111,27 @@ static int rnpgbe_init_hw_ops_n500(struct mucse_hw *hw) return status; } +/** + * rnpgbe_get_permtion_mac - Get permition mac + * @hw: hw information structure + * @mac_addr: pointer to store mac + * + * rnpgbe_get_permtion_mac tries to get mac from hw. + * It use eth_random_addr if failed. + **/ +static void rnpgbe_get_permtion_mac(struct mucse_hw *hw, + u8 *mac_addr) +{ + if (mucse_fw_get_macaddr(hw, hw->pfvfnum, mac_addr, hw->nr_lane)) { + eth_random_addr(mac_addr); + } else { + if (!is_valid_ether_addr(mac_addr)) + eth_random_addr(mac_addr); + } + + hw->flags |= M_FLAGS_INIT_MAC_ADDRESS; +} + /** * rnpgbe_reset_hw_ops_n500 - Do a hardware reset * @hw: hw information structure @@ -49,6 +152,13 @@ static int rnpgbe_reset_hw_ops_n500(struct mucse_hw *hw) err = mucse_mbx_fw_reset_phy(hw); if (err) return err; + /* Store the permanent mac address */ + if (!(hw->flags & M_FLAGS_INIT_MAC_ADDRESS)) { + rnpgbe_get_permtion_mac(hw, hw->perm_addr); + memcpy(hw->addr, hw->perm_addr, ETH_ALEN); + } + + hw->ops.init_rx_addrs(hw); eth_wr32(eth, RNPGBE_ETH_ERR_MASK_VECTOR, RNPGBE_PKT_LEN_ERR | RNPGBE_HDR_LEN_ERR); dma_wr32(dma, RNPGBE_DMA_RX_PROG_FULL_THRESH, 0xa); @@ -106,10 +216,46 @@ static void rnpgbe_driver_status_hw_ops_n500(struct mucse_hw *hw, } } +/** + * rnpgbe_init_rx_addrs_hw_ops_n500 - Init rx addr setup to hw + * @hw: hw information structure + * + * rnpgbe_init_rx_addrs_hw_ops_n500 setup hw->addr to hw, it + * reset to hw->perm_addr if hw->addr is invalid. + **/ +static void rnpgbe_init_rx_addrs_hw_ops_n500(struct mucse_hw *hw) +{ + struct mucse_eth_info *eth = &hw->eth; + u32 rar_entries; + int i; + u32 v; + + rar_entries = eth->num_rar_entries; + /* hw->addr maybe set by sw */ + if (!is_valid_ether_addr(hw->addr)) + memcpy(hw->addr, hw->perm_addr, ETH_ALEN); + else + eth->ops.set_rar(eth, 0, hw->addr); + + hw->addr_ctrl.rar_used_count = 1; + /* Clear other rar addresses. */ + for (i = 1; i < rar_entries; i++) + eth->ops.clear_rar(eth, i); + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + v = eth_rd32(eth, RNPGBE_ETH_DMAC_MCSTCTRL); + v &= (~0x3); + v |= eth->mc_filter_type; + eth_wr32(eth, RNPGBE_ETH_DMAC_MCSTCTRL, v); + eth->ops.clr_mc_addr(eth); +} + static struct mucse_hw_operations hw_ops_n500 = { .init_hw = &rnpgbe_init_hw_ops_n500, .reset_hw = &rnpgbe_reset_hw_ops_n500, .start_hw = &rnpgbe_start_hw_ops_n500, + .init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops_n500, .driver_status = &rnpgbe_driver_status_hw_ops_n500, }; @@ -137,6 +283,7 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) dma->max_rx_queues = RNPGBE_MAX_QUEUES; dma->back = hw; /* setup eth info */ + memcpy(&hw->eth.ops, ð_ops_n500, sizeof(hw->eth.ops)); eth->eth_base_addr = hw->hw_addr + RNPGBE_ETH_BASE; eth->back = hw; eth->mc_filter_type = 0; @@ -191,6 +338,8 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) hw->usecstocount = 125; hw->max_vfs_noari = 1; hw->max_vfs = 7; + hw->min_len_cap = RNPGBE_MIN_LEN; + hw->max_len_cap = RNPGBE_MAX_LEN; memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops)); } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h index 35e3cb77a38b..bcb4da45feac 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -29,6 +29,12 @@ #define RNPGBE_ETH_DEFAULT_RX_RING (0x806c) #define RNPGBE_PKT_LEN_ERR (2) #define RNPGBE_HDR_LEN_ERR (1) +#define RNPGBE_MCSTCTRL_UNICASE_TBL_EN BIT(3) +#define RNPGBE_ETH_DMAC_MCSTCTRL (0x9114) +#define RNPGBE_RAH_AV (0x80000000) +#define RNPGBE_ETH_RAR_RL(n) (0xa000 + 0x04 * (n)) +#define RNPGBE_ETH_RAR_RH(n) (0xa400 + 0x04 * (n)) +#define RNPGBE_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * (n)) /* chip resourse */ #define RNPGBE_MAX_QUEUES (8) /* multicast control table */ @@ -36,7 +42,8 @@ /* vlan filter table */ #define RNPGBE_VFT_TBL_SIZE (128) #define RNPGBE_RAR_ENTRIES (32) - +#define RNPGBE_MIN_LEN (68) +#define RNPGBE_MAX_LEN (9722) #define RNPGBE_MII_ADDR 0x00000010 /* MII Address */ #define RNPGBE_MII_DATA 0x00000014 /* MII Data */ #endif /* _RNPGBE_HW_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index ba21e3858c0e..1338ef01f545 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -46,6 +46,27 @@ static struct pci_device_id rnpgbe_pci_tbl[] = { **/ static int rnpgbe_sw_init(struct mucse *mucse) { + struct pci_dev *pdev = mucse->pdev; + struct mucse_hw *hw = &mucse->hw; + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + mucse->napi_budge = 64; + /* set default work limits */ + mucse->tx_work_limit = M_DEFAULT_TX_WORK; + mucse->tx_usecs = M_PKT_TIMEOUT_TX; + mucse->tx_frames = M_TX_PKT_POLL_BUDGET; + mucse->rx_usecs = M_PKT_TIMEOUT; + mucse->rx_frames = M_RX_PKT_POLL_BUDGET; + mucse->priv_flags &= ~M_PRIV_FLAG_RX_COALESCE; + mucse->priv_flags &= ~M_PRIV_FLAG_TX_COALESCE; + /* set default ring sizes */ + mucse->tx_ring_item_count = M_DEFAULT_TXD; + mucse->rx_ring_item_count = M_DEFAULT_RXD; + set_bit(__MUCSE_DOWN, &mucse->state); + return 0; } @@ -162,6 +183,14 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, goto err_free_net; } + netdev->min_mtu = hw->min_len_cap; + netdev->max_mtu = hw->max_len_cap - (ETH_HLEN + 2 * ETH_FCS_LEN); + netdev->features |= NETIF_F_HIGHDMA; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->priv_flags |= IFF_SUPP_NOFCS; + eth_hw_addr_set(netdev, hw->perm_addr); + memcpy(netdev->perm_addr, hw->perm_addr, netdev->addr_len); + return 0; err_free_net: diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c index 18f57ef8b1ad..37ef75121898 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -393,3 +393,66 @@ int mucse_mbx_fw_reset_phy(struct mucse_hw *hw) return mucse_fw_send_cmd_wait(hw, &req, &reply); } } + +/** + * mucse_fw_get_macaddr - Posts a mbx req to request macaddr + * @hw: Pointer to the HW structure + * @pfvfnum: Index of pf/vf num + * @mac_addr: Pointer to store mac_addr + * @nr_lane: Lane index + * + * mucse_fw_get_macaddr posts a mbx req to firmware to get mac_addr. + * It uses mucse_fw_send_cmd_wait if no irq, and mucse_mbx_fw_post_req + * if other irq is registered. + * + * @return: 0 on success, negative on failure + **/ +int mucse_fw_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, + int nr_lane) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err = 0; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (!mac_addr) + return -EINVAL; + + if (hw->mbx.irq_enabled) { + struct mbx_req_cookie *cookie = + mbx_cookie_zalloc(sizeof(reply.mac_addr)); + struct mac_addr *mac = (struct mac_addr *)cookie->priv; + + if (!cookie) + return -ENOMEM; + + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, cookie); + err = mucse_mbx_fw_post_req(hw, &req, cookie); + if (err) { + kfree(cookie); + goto out; + } + + if ((1 << nr_lane) & mac->lanes) + memcpy(mac_addr, mac->addrs[nr_lane].mac, ETH_ALEN); + else + err = -ENODATA; + + kfree(cookie); + } else { + build_get_macaddress_req(&req, 1 << nr_lane, pfvfnum, &req); + err = mucse_fw_send_cmd_wait(hw, &req, &reply); + if (err) + goto out; + + if ((1 << nr_lane) & reply.mac_addr.lanes) + memcpy(mac_addr, reply.mac_addr.addrs[nr_lane].mac, 6); + else + err = -ENODATA; + } +out: + return err; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h index 9e07858f2733..65a4f74c7090 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -632,11 +632,28 @@ static inline void build_reset_phy_req(struct mbx_fw_cmd_req *req, req->cookie = cookie; } +static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req, + int lane_mask, int pfvfnum, + void *cookie) +{ + req->flags = 0; + req->opcode = cpu_to_le32(GET_MAC_ADDRES); + req->datalen = cpu_to_le32(sizeof(req->get_mac_addr)); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + + req->get_mac_addr.lane_mask = cpu_to_le32(lane_mask); + req->get_mac_addr.pfvf_num = cpu_to_le32(pfvfnum); +} + int mucse_mbx_get_capability(struct mucse_hw *hw); int rnpgbe_mbx_lldp_get(struct mucse_hw *hw); int mucse_mbx_ifinsmod(struct mucse_hw *hw, int status); int mucse_mbx_ifsuspuse(struct mucse_hw *hw, int status); int mucse_mbx_ifforce_control_mac(struct mucse_hw *hw, int status); int mucse_mbx_fw_reset_phy(struct mucse_hw *hw); +int mucse_fw_get_macaddr(struct mucse_hw *hw, int pfvfnum, + u8 *mac_addr, int nr_lane); #endif /* _RNPGBE_MBX_FW_H */ -- 2.25.1 Initialize irq functions for driver use. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/Makefile | 3 +- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 152 ++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 2 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 500 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 28 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 145 ++++- 6 files changed, 827 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c create mode 100644 drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h diff --git a/drivers/net/ethernet/mucse/rnpgbe/Makefile b/drivers/net/ethernet/mucse/rnpgbe/Makefile index db7d3a8140b2..c5a41406fd60 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/Makefile +++ b/drivers/net/ethernet/mucse/rnpgbe/Makefile @@ -9,4 +9,5 @@ rnpgbe-objs := rnpgbe_main.o \ rnpgbe_chip.o \ rnpgbe_mbx.o \ rnpgbe_mbx_fw.o \ - rnpgbe_sfc.o + rnpgbe_sfc.o \ + rnpgbe_lib.o diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 30b5400241c3..212e5b8fd7b4 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -7,6 +7,7 @@ #include #include #include +#include extern const struct rnpgbe_info rnpgbe_n500_info; extern const struct rnpgbe_info rnpgbe_n210_info; @@ -234,6 +235,7 @@ struct mucse_hw { struct mucse_addr_filter_info addr_ctrl; u32 feature_flags; u32 flags; +#define M_FLAG_MSI_CAPABLE BIT(0) #define M_FLAGS_INIT_MAC_ADDRESS BIT(27) u32 driver_version; u16 usecstocount; @@ -256,6 +258,136 @@ enum mucse_state_t { __MUCSE_EEE_REMOVE, }; +enum irq_mode_enum { + irq_mode_legency, + irq_mode_msi, + irq_mode_msix, +}; + +struct mucse_queue_stats { + u64 packets; + u64 bytes; +}; + +struct mucse_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; + u64 tx_done_old; + u64 clean_desc; + u64 poll_count; + u64 irq_more_count; + u64 send_bytes; + u64 send_bytes_to_hw; + u64 todo_update; + u64 send_done_bytes; + u64 vlan_add; + u64 tx_next_to_clean; + u64 tx_irq_miss; + u64 tx_equal_count; + u64 tx_clean_times; + u64 tx_clean_count; +}; + +struct mucse_rx_queue_stats { + u64 driver_drop_packets; + u64 rsc_count; + u64 rsc_flush; + u64 non_eop_descs; + u64 alloc_rx_page_failed; + u64 alloc_rx_buff_failed; + u64 alloc_rx_page; + u64 csum_err; + u64 csum_good; + u64 poll_again_count; + u64 vlan_remove; + u64 rx_next_to_clean; + u64 rx_irq_miss; + u64 rx_equal_count; + u64 rx_clean_times; + u64 rx_clean_count; +}; + +struct mucse_ring { + struct mucse_ring *next; + struct mucse_q_vector *q_vector; + struct net_device *netdev; + struct device *dev; + void *desc; + union { + struct mucse_tx_buffer *tx_buffer_info; + struct mucse_rx_buffer *rx_buffer_info; + }; + unsigned long last_rx_timestamp; + unsigned long state; + u8 __iomem *ring_addr; + u8 __iomem *tail; + u8 __iomem *dma_int_stat; + u8 __iomem *dma_int_mask; + u8 __iomem *dma_int_clr; + dma_addr_t dma; + unsigned int size; + u32 ring_flags; +#define M_RING_FLAG_DELAY_SETUP_RX_LEN BIT(0) +#define M_RING_FLAG_CHANGE_RX_LEN BIT(1) +#define M_RING_FLAG_DO_RESET_RX_LEN BIT(2) +#define M_RING_SKIP_TX_START BIT(3) +#define M_RING_NO_TUNNEL_SUPPORT BIT(4) +#define M_RING_SIZE_CHANGE_FIX BIT(5) +#define M_RING_SCATER_SETUP BIT(6) +#define M_RING_STAGS_SUPPORT BIT(7) +#define M_RING_DOUBLE_VLAN_SUPPORT BIT(8) +#define M_RING_VEB_MULTI_FIX BIT(9) +#define M_RING_IRQ_MISS_FIX BIT(10) +#define M_RING_OUTER_VLAN_FIX BIT(11) +#define M_RING_CHKSM_FIX BIT(12) +#define M_RING_LOWER_ITR BIT(13) + u8 pfvfnum; + u16 count; + u16 temp_count; + u16 reset_count; + u8 queue_index; + u8 rnpgbe_queue_idx; + u16 next_to_use; + u16 next_to_clean; + u16 device_id; + struct mucse_queue_stats stats; + struct u64_stats_sync syncp; + union { + struct mucse_tx_queue_stats tx_stats; + struct mucse_rx_queue_stats rx_stats; + }; +} ____cacheline_internodealigned_in_smp; + +struct mucse_ring_container { + struct mucse_ring *ring; + u16 work_limit; + u16 count; +}; + +struct mucse_q_vector { + struct mucse *mucse; + int v_idx; + u16 itr_rx; + u16 itr_tx; + struct mucse_ring_container rx, tx; + struct napi_struct napi; + cpumask_t affinity_mask; + struct irq_affinity_notify affinity_notify; + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + u32 vector_flags; +#define M_QVECTOR_FLAG_IRQ_MISS_CHECK BIT(0) +#define M_QVECTOR_FLAG_ITR_FEATURE BIT(1) +#define M_QVECTOR_FLAG_REDUCE_TX_IRQ_MISS BIT(2) + char name[IFNAMSIZ + 17]; + /* for dynamic allocation of rings associated with this q_vector */ + struct mucse_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +#define MAX_TX_QUEUES (8) +#define MAX_RX_QUEUES (8) +#define MAX_Q_VECTORS (64) + struct mucse { struct net_device *netdev; struct pci_dev *pdev; @@ -264,19 +396,37 @@ struct mucse { /* board number */ u16 bd_number; u16 tx_work_limit; + u32 flags; +#define M_FLAG_NEED_LINK_UPDATE BIT(0) +#define M_FLAG_MSIX_ENABLED BIT(1) +#define M_FLAG_MSI_ENABLED BIT(2) u32 flags2; #define M_FLAG2_NO_NET_REG BIT(0) +#define M_FLAG2_INSMOD BIT(1) u32 priv_flags; #define M_PRIV_FLAG_TX_COALESCE BIT(25) #define M_PRIV_FLAG_RX_COALESCE BIT(26) + struct mucse_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; int tx_ring_item_count; + int num_tx_queues; + struct mucse_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp; int rx_ring_item_count; + int num_rx_queues; + int num_other_vectors; + int irq_mode; + struct msix_entry *msix_entries; + struct mucse_q_vector *q_vector[MAX_Q_VECTORS]; + int num_q_vectors; + int max_q_vectors; + int q_vector_off; int napi_budge; u16 rx_usecs; u16 rx_frames; u16 tx_frames; u16 tx_usecs; unsigned long state; + struct timer_list service_timer; + struct work_struct service_task; char name[60]; }; @@ -320,4 +470,6 @@ struct rnpgbe_info { #define mucse_dbg(mucse, fmt, arg...) \ dev_dbg(&(mucse)->pdev->dev, fmt, ##arg) +void rnpgbe_service_event_schedule(struct mucse *mucse); + #endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index fc179eb8c516..fa8317ae7642 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -340,6 +340,8 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) hw->max_vfs = 7; hw->min_len_cap = RNPGBE_MIN_LEN; hw->max_len_cap = RNPGBE_MAX_LEN; + hw->max_msix_vectors = 26; + hw->flags |= M_FLAG_MSI_CAPABLE; memcpy(&hw->ops, &hw_ops_n500, sizeof(hw->ops)); } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c new file mode 100644 index 000000000000..2bf8a7f7f303 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#include "rnpgbe.h" +#include "rnpgbe_lib.h" + +/** + * rnpgbe_set_rss_queues - Allocate queues for RSS + * @mucse: pointer to private structure + * + * Try to determine queue num with rss. + * + * @return: true on success, negative on failure + **/ +static bool rnpgbe_set_rss_queues(struct mucse *mucse) +{ + return true; +} + +/** + * rnpgbe_set_num_queues - Allocate queues for device, feature dependent + * @mucse: pointer to private structure + * + * Determine tx/rx queue nums + **/ +static void rnpgbe_set_num_queues(struct mucse *mucse) +{ + /* Start with base case */ + mucse->num_tx_queues = 1; + mucse->num_rx_queues = 1; + + rnpgbe_set_rss_queues(mucse); +} + +/** + * rnpgbe_acquire_msix_vectors - Allocate msix vectors + * @mucse: pointer to private structure + * @vectors: number of msix vectors + **/ +static int rnpgbe_acquire_msix_vectors(struct mucse *mucse, + int vectors) +{ + int err; + + err = pci_enable_msix_range(mucse->pdev, mucse->msix_entries, + vectors, vectors); + if (err < 0) { + kfree(mucse->msix_entries); + mucse->msix_entries = NULL; + return err; + } + + vectors -= mucse->num_other_vectors; + /* setup true q_vectors num */ + mucse->num_q_vectors = min(vectors, mucse->max_q_vectors); + + return 0; +} + +/** + * rnpgbe_set_interrupt_capability - set MSI-X or MSI if supported + * @mucse: pointer to private structure + * + * Attempt to configure the interrupts using the best available + * capabilities of the hardware. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_set_interrupt_capability(struct mucse *mucse) +{ + int irq_mode_back = mucse->irq_mode; + struct mucse_hw *hw = &mucse->hw; + int vector, v_budget, err = 0; + + v_budget = min_t(int, mucse->num_tx_queues, mucse->num_rx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += mucse->num_other_vectors; + v_budget = min_t(int, v_budget, hw->max_msix_vectors); + + if (mucse->irq_mode == irq_mode_msix) { + mucse->msix_entries = kcalloc(v_budget, + sizeof(struct msix_entry), + GFP_KERNEL); + + if (!mucse->msix_entries) + return -ENOMEM; + + for (vector = 0; vector < v_budget; vector++) + mucse->msix_entries[vector].entry = vector; + + err = rnpgbe_acquire_msix_vectors(mucse, v_budget); + if (!err) { + if (mucse->num_other_vectors) + mucse->q_vector_off = 1; + mucse->flags |= M_FLAG_MSIX_ENABLED; + goto out; + } + kfree(mucse->msix_entries); + /* if has msi capability try it */ + if (hw->flags & M_FLAG_MSI_CAPABLE) + mucse->irq_mode = irq_mode_msi; + } + /* if has msi capability or set irq_mode */ + if (mucse->irq_mode == irq_mode_msi) { + err = pci_enable_msi(mucse->pdev); + /* msi mode use only 1 irq */ + if (!err) + mucse->flags |= M_FLAG_MSI_ENABLED; + } + /* write back origin irq_mode for next time */ + mucse->irq_mode = irq_mode_back; + /* legacy and msi only 1 vectors */ + mucse->num_q_vectors = 1; + err = 0; +out: + return err; +} + +/** + * update_ring_count - update ring num + * @mucse: pointer to private structure + * + * update_ring_count only update ring num when driver insmod + **/ +static void update_ring_count(struct mucse *mucse) +{ + if (mucse->flags2 & M_FLAG2_INSMOD) + return; + + mucse->flags2 |= M_FLAG2_INSMOD; + /* limit ring count if in msi or legacy mode */ + if (!(mucse->flags & M_FLAG_MSIX_ENABLED)) { + mucse->num_tx_queues = 1; + mucse->num_rx_queues = 1; + } +} + +/** + * mucse_add_ring - add ring to ring container + * @ring: ring to be added + * @head: ring container + **/ +static void mucse_add_ring(struct mucse_ring *ring, + struct mucse_ring_container *head) +{ + ring->next = head->ring; + head->ring = ring; + head->count++; +} + +/** + * rnpgbe_poll - NAPI polling RX/TX cleanup routine + * @napi: napi struct with our devices info in it + * @budget: amount of work driver is allowed to do this pass, in packets + * + * Clean all queues associated with a q_vector + * + * @return: amount of work done in this call + **/ +static int rnpgbe_poll(struct napi_struct *napi, int budget) +{ + return 0; +} + +/** + * rnpgbe_alloc_q_vector - Allocate memory for a single interrupt vector + * @mucse: pointer to private structure + * @eth_queue_idx: queue_index idx for this q_vector + * @v_idx: index of vector used for this q_vector + * @r_idx: total number of rings to allocate + * @r_count: ring count + * @step: ring step + * + * @return: 0 on success. If allocation fails we return -ENOMEM. + **/ +static int rnpgbe_alloc_q_vector(struct mucse *mucse, + int eth_queue_idx, int v_idx, int r_idx, + int r_count, int step) +{ + int rxr_idx = r_idx, txr_idx = r_idx; + struct mucse_hw *hw = &mucse->hw; + struct mucse_q_vector *q_vector; + int txr_count, rxr_count, idx; + struct mucse_dma_info *dma; + struct mucse_ring *ring; + int node = NUMA_NO_NODE; + int ring_count, size; + int cpu_offset = 0; + int cpu = -1; + + dma = &hw->dma; + txr_count = r_count; + rxr_count = r_count; + ring_count = txr_count + rxr_count; + size = sizeof(struct mucse_q_vector) + + (sizeof(struct mucse_ring) * ring_count); + + /* should minis mucse->q_vector_off */ + if (cpu_online(cpu_offset + v_idx - mucse->q_vector_off)) { + cpu = cpu_offset + v_idx - mucse->q_vector_off; + node = cpu_to_node(cpu); + } + + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) + q_vector = kzalloc(size, GFP_KERNEL); + if (!q_vector) + return -ENOMEM; + + /* setup affinity mask and node */ + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); + q_vector->numa_node = node; + + netif_napi_add_weight(mucse->netdev, &q_vector->napi, rnpgbe_poll, + mucse->napi_budge); + /* tie q_vector and mucse together */ + mucse->q_vector[v_idx - mucse->q_vector_off] = q_vector; + q_vector->mucse = mucse; + q_vector->v_idx = v_idx; + /* initialize pointer to rings */ + ring = q_vector->ring; + + for (idx = 0; idx < txr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &mucse->pdev->dev; + ring->netdev = mucse->netdev; + /* configure backlink on ring */ + ring->q_vector = q_vector; + /* update q_vector Tx values */ + mucse_add_ring(ring, &q_vector->tx); + + /* apply Tx specific ring traits */ + ring->count = mucse->tx_ring_item_count; + ring->queue_index = eth_queue_idx + idx; + /* it is used to location hw reg */ + ring->rnpgbe_queue_idx = txr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(txr_idx); + ring->dma_int_stat = ring->ring_addr + DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + DMA_INT_CLR; + ring->device_id = mucse->pdev->device; + ring->pfvfnum = hw->pfvfnum; + /* not support tunnel */ + ring->ring_flags |= M_RING_NO_TUNNEL_SUPPORT; + /* assign ring to mucse */ + mucse->tx_ring[ring->queue_index] = ring; + /* update count and index */ + txr_idx += step; + /* push pointer to next ring */ + ring++; + } + + for (idx = 0; idx < rxr_count; idx++) { + /* assign generic ring traits */ + ring->dev = &mucse->pdev->dev; + ring->netdev = mucse->netdev; + /* configure backlink on ring */ + ring->q_vector = q_vector; + /* update q_vector Rx values */ + mucse_add_ring(ring, &q_vector->rx); + /* apply Rx specific ring traits */ + ring->count = mucse->rx_ring_item_count; + /* rnpgbe_queue_idx can be changed after */ + ring->queue_index = eth_queue_idx + idx; + ring->rnpgbe_queue_idx = rxr_idx; + ring->ring_addr = dma->dma_ring_addr + RING_OFFSET(rxr_idx); + ring->dma_int_stat = ring->ring_addr + DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + DMA_INT_CLR; + ring->device_id = mucse->pdev->device; + ring->pfvfnum = hw->pfvfnum; + + ring->ring_flags |= M_RING_NO_TUNNEL_SUPPORT; + ring->ring_flags |= M_RING_STAGS_SUPPORT; + /* assign ring to mucse */ + mucse->rx_ring[ring->queue_index] = ring; + /* update count and index */ + rxr_idx += step; + /* push pointer to next ring */ + ring++; + } + + q_vector->vector_flags |= M_QVECTOR_FLAG_ITR_FEATURE; + q_vector->itr_rx = mucse->rx_usecs; + + return 0; +} + +/** + * rnpgbe_free_q_vector - Free memory allocated for specific interrupt vector + * @mucse: pointer to private structure + * @v_idx: Index of vector to be freed + * + * This function frees the memory allocated to the q_vector. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnpgbe_free_q_vector(struct mucse *mucse, int v_idx) +{ + struct mucse_q_vector *q_vector = mucse->q_vector[v_idx]; + struct mucse_ring *ring; + + mucse_for_each_ring(ring, q_vector->tx) + mucse->tx_ring[ring->queue_index] = NULL; + + mucse_for_each_ring(ring, q_vector->rx) + mucse->rx_ring[ring->queue_index] = NULL; + + mucse->q_vector[v_idx] = NULL; + netif_napi_del(&q_vector->napi); + kfree_rcu(q_vector, rcu); +} + +/** + * rnpgbe_alloc_q_vectors - Allocate memory for interrupt vectors + * @mucse: pointer to private structure + * + * @return: 0 if success. if allocation fails we return -ENOMEM. + **/ +static int rnpgbe_alloc_q_vectors(struct mucse *mucse) +{ + int err, ring_cnt, v_remaing = mucse->num_q_vectors; + int r_remaing = min_t(int, mucse->num_tx_queues, + mucse->num_rx_queues); + int v_idx = mucse->q_vector_off; + int q_vector_nums = 0; + int eth_queue_idx = 0; + int ring_step = 1; + int ring_idx = 0; + + /* can support multi rings in one q_vector */ + for (; r_remaing > 0 && v_remaing > 0; v_remaing--) { + ring_cnt = DIV_ROUND_UP(r_remaing, v_remaing); + err = rnpgbe_alloc_q_vector(mucse, eth_queue_idx, + v_idx, ring_idx, ring_cnt, + ring_step); + if (err) + goto err_out; + ring_idx += ring_step * ring_cnt; + r_remaing -= ring_cnt; + v_idx++; + q_vector_nums++; + eth_queue_idx += ring_cnt; + } + /* should fix the real used q_vectors_nums */ + mucse->num_q_vectors = q_vector_nums; + + return 0; + +err_out: + mucse->num_tx_queues = 0; + mucse->num_rx_queues = 0; + mucse->num_q_vectors = 0; + + while (v_idx--) + rnpgbe_free_q_vector(mucse, v_idx); + + return -ENOMEM; +} + +/** + * rnpgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @mucse: pointer to private structure + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static void rnpgbe_cache_ring_rss(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + struct mucse_dma_info *dma; + struct mucse_ring *ring; + int ring_step = 1; + int i; + + dma = &hw->dma; + /* some ring alloc rules can be added here */ + for (i = 0; i < mucse->num_rx_queues; i++) { + ring = mucse->tx_ring[i]; + ring->rnpgbe_queue_idx = i * ring_step; + ring->ring_addr = dma->dma_ring_addr + + RING_OFFSET(ring->rnpgbe_queue_idx); + + ring->dma_int_stat = ring->ring_addr + DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + DMA_INT_CLR; + } + + for (i = 0; i < mucse->num_tx_queues; i++) { + ring = mucse->rx_ring[i]; + ring->rnpgbe_queue_idx = i * ring_step; + ring->ring_addr = dma->dma_ring_addr + + RING_OFFSET(ring->rnpgbe_queue_idx); + ring->dma_int_stat = ring->ring_addr + DMA_INT_STAT; + ring->dma_int_mask = ring->ring_addr + DMA_INT_MASK; + ring->dma_int_clr = ring->ring_addr + DMA_INT_CLR; + } +} + +/** + * rnpgbe_cache_ring_register - Descriptor ring to register mapping + * @mucse: pointer to private structure + * + * Reset ring reg here to satisfy feature. + **/ +static void rnpgbe_cache_ring_register(struct mucse *mucse) +{ + rnpgbe_cache_ring_rss(mucse); +} + +/** + * rnpgbe_free_q_vectors - Free memory allocated for interrupt vectors + * @mucse: pointer to private structure + * + * This function frees the memory allocated to the q_vectors. In addition if + * NAPI is enabled it will delete any references to the NAPI struct prior + * to freeing the q_vector. + **/ +static void rnpgbe_free_q_vectors(struct mucse *mucse) +{ + int v_idx = mucse->num_q_vectors; + + mucse->num_rx_queues = 0; + mucse->num_tx_queues = 0; + mucse->num_q_vectors = 0; + + while (v_idx--) + rnpgbe_free_q_vector(mucse, v_idx); +} + +/** + * rnpgbe_reset_interrupt_capability - Reset irq capability setup + * @mucse: pointer to private structure + **/ +static void rnpgbe_reset_interrupt_capability(struct mucse *mucse) +{ + if (mucse->flags & M_FLAG_MSIX_ENABLED) + pci_disable_msix(mucse->pdev); + else if (mucse->flags & M_FLAG_MSI_CAPABLE) + pci_disable_msi(mucse->pdev); + + kfree(mucse->msix_entries); + mucse->msix_entries = NULL; + mucse->q_vector_off = 0; + mucse->flags &= (~M_FLAG_MSIX_ENABLED); + mucse->flags &= (~M_FLAG_MSI_ENABLED); +} + +/** + * rnpgbe_init_interrupt_scheme - Determine proper interrupt scheme + * @mucse: pointer to private structure + * + * We determine which interrupt scheme to use based on... + * - Hardware queue count + * - cpu numbers + * - irq mode (msi/legacy force 1) + * + * @return: 0 on success, negative on failure + **/ +int rnpgbe_init_interrupt_scheme(struct mucse *mucse) +{ + int err; + + /* Number of supported queues */ + rnpgbe_set_num_queues(mucse); + /* Set interrupt mode */ + err = rnpgbe_set_interrupt_capability(mucse); + if (err) + goto err_set_interrupt; + /* update ring num only init */ + update_ring_count(mucse); + err = rnpgbe_alloc_q_vectors(mucse); + if (err) + goto err_alloc_q_vectors; + rnpgbe_cache_ring_register(mucse); + set_bit(__MUCSE_DOWN, &mucse->state); + + return 0; + +err_alloc_q_vectors: + rnpgbe_reset_interrupt_capability(mucse); +err_set_interrupt: + return err; +} + +/** + * rnpgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @mucse: pointer to private structure + * + * Clear interrupt specific resources and reset the structure + **/ +void rnpgbe_clear_interrupt_scheme(struct mucse *mucse) +{ + mucse->num_tx_queues = 0; + mucse->num_rx_queues = 0; + rnpgbe_free_q_vectors(mucse); + rnpgbe_reset_interrupt_capability(mucse); +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h new file mode 100644 index 000000000000..0df519a50185 --- /dev/null +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 - 2025 Mucse Corporation. */ + +#ifndef _RNPGBE_LIB_H +#define _RNPGBE_LIB_H + +#include "rnpgbe.h" + +#define RING_OFFSET(n) (0x100 * (n)) +#define DMA_RX_START (0x10) +#define DMA_RX_READY (0x14) +#define DMA_TX_START (0x18) +#define DMA_TX_READY (0x1c) +#define DMA_INT_MASK (0x24) +#define TX_INT_MASK (0x02) +#define RX_INT_MASK (0x01) +#define DMA_INT_CLR (0x28) +#define DMA_INT_STAT (0x20) + +#define mucse_for_each_ring(pos, head)\ + for (typeof((head).ring) __pos = (head).ring;\ + __pos ? ({ pos = __pos; 1; }) : 0;\ + __pos = __pos->next) + +int rnpgbe_init_interrupt_scheme(struct mucse *mucse); +void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); + +#endif /* _RNPGBE_LIB_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 1338ef01f545..8fc1af1c00bc 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -12,6 +12,7 @@ #include "rnpgbe.h" #include "rnpgbe_mbx_fw.h" #include "rnpgbe_sfc.h" +#include "rnpgbe_lib.h" char rnpgbe_driver_name[] = "rnpgbe"; static const struct rnpgbe_info *rnpgbe_info_tbl[] = { @@ -38,6 +39,50 @@ static struct pci_device_id rnpgbe_pci_tbl[] = { {0, }, }; +static struct workqueue_struct *rnpgbe_wq; + +/** + * rnpgbe_service_event_schedule - schedule task + * @mucse: pointer to private structure + **/ +void rnpgbe_service_event_schedule(struct mucse *mucse) +{ + if (!test_bit(__MUCSE_DOWN, &mucse->state) && + !test_and_set_bit(__MUCSE_SERVICE_SCHED, &mucse->state)) + queue_work(rnpgbe_wq, &mucse->service_task); +} + +/** + * rnpgbe_service_timer - Timer Call-back + * @t: pointer to timer_list + **/ +static void rnpgbe_service_timer(struct timer_list *t) +{ + struct mucse *mucse = timer_container_of(mucse, t, service_timer); + unsigned long next_event_offset; + bool ready = true; + + /* poll faster when waiting for link */ + if (mucse->flags & M_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else + next_event_offset = HZ; + /* Reset the timer */ + if (!test_bit(__MUCSE_REMOVE, &mucse->state)) + mod_timer(&mucse->service_timer, next_event_offset + jiffies); + + if (ready) + rnpgbe_service_event_schedule(mucse); +} + +/** + * rnpgbe_service_task - manages and runs subtasks + * @work: pointer to work_struct containing our data + **/ +static void rnpgbe_service_task(struct work_struct *work) +{ +} + /** * rnpgbe_sw_init - Init driver private status * @mucse: pointer to private structure @@ -65,11 +110,84 @@ static int rnpgbe_sw_init(struct mucse *mucse) /* set default ring sizes */ mucse->tx_ring_item_count = M_DEFAULT_TXD; mucse->rx_ring_item_count = M_DEFAULT_RXD; + mucse->irq_mode = irq_mode_msix; + mucse->max_q_vectors = hw->max_msix_vectors; + mucse->num_other_vectors = 1; set_bit(__MUCSE_DOWN, &mucse->state); return 0; } +/** + * remove_mbx_irq - Remove mbx Routine + * @mucse: pointer to private structure + **/ +static void remove_mbx_irq(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + + if (mucse->num_other_vectors == 0) + return; + /* only msix use indepented intr */ + if (mucse->flags & M_FLAG_MSIX_ENABLED) { + hw->mbx.ops.configure(hw, + mucse->msix_entries[0].entry, + false); + if (hw->mbx.irq_enabled) { + free_irq(mucse->msix_entries[0].vector, mucse); + hw->mbx.irq_enabled = false; + } + } +} + +/** + * rnpgbe_msix_other - Other irq handler + * @irq: irq num + * @data: private data + * + * @return: IRQ_HANDLED + **/ +static irqreturn_t rnpgbe_msix_other(int irq, void *data) +{ + struct mucse *mucse = (struct mucse *)data; + + set_bit(__MUCSE_IN_IRQ, &mucse->state); + clear_bit(__MUCSE_IN_IRQ, &mucse->state); + + return IRQ_HANDLED; +} + +/** + * register_mbx_irq - Regist mbx Routine + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +static int register_mbx_irq(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + struct net_device *netdev = mucse->netdev; + int err = 0; + + /* for mbx:vector0 */ + if (mucse->num_other_vectors == 0) + return err; + /* only do this in msix mode */ + if (mucse->flags & M_FLAG_MSIX_ENABLED) { + err = request_irq(mucse->msix_entries[0].vector, + rnpgbe_msix_other, 0, netdev->name, + mucse); + if (err) + goto err_mbx; + hw->mbx.ops.configure(hw, + mucse->msix_entries[0].entry, + true); + hw->mbx.irq_enabled = true; + } +err_mbx: + return err; +} + /** * rnpgbe_add_adapter - add netdev for this pci_dev * @pdev: PCI device information structure @@ -165,7 +283,6 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, hw->mbx.ops.init_params(hw); /* echo fw driver insmod */ hw->ops.driver_status(hw, true, mucse_driver_insmod); - err = mucse_mbx_get_capability(hw); if (err) { dev_err(&pdev->dev, @@ -190,9 +307,20 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, netdev->priv_flags |= IFF_SUPP_NOFCS; eth_hw_addr_set(netdev, hw->perm_addr); memcpy(netdev->perm_addr, hw->perm_addr, netdev->addr_len); + ether_addr_copy(hw->addr, hw->perm_addr); + timer_setup(&mucse->service_timer, rnpgbe_service_timer, 0); + INIT_WORK(&mucse->service_task, rnpgbe_service_task); + clear_bit(__MUCSE_SERVICE_SCHED, &mucse->state); + err = rnpgbe_init_interrupt_scheme(mucse); + if (err) + goto err_free_net; + err = register_mbx_irq(mucse); + if (err) + goto err_free_irq; return 0; - +err_free_irq: + rnpgbe_clear_interrupt_scheme(mucse); err_free_net: free_netdev(netdev); return err; @@ -261,7 +389,11 @@ static void rnpgbe_rm_adapter(struct mucse *mucse) rnpgbe_devlink_unregister(mucse); netdev = mucse->netdev; + cancel_work_sync(&mucse->service_task); + timer_delete_sync(&mucse->service_timer); hw->ops.driver_status(hw, false, mucse_driver_insmod); + remove_mbx_irq(mucse); + rnpgbe_clear_interrupt_scheme(mucse); free_netdev(netdev); mucse->netdev = NULL; } @@ -300,6 +432,8 @@ static void rnpgbe_dev_shutdown(struct pci_dev *pdev, *enable_wake = false; netif_device_detach(netdev); + remove_mbx_irq(mucse); + rnpgbe_clear_interrupt_scheme(mucse); pci_disable_device(pdev); } @@ -341,6 +475,12 @@ static int __init rnpgbe_init_module(void) { int ret; + rnpgbe_wq = create_singlethread_workqueue(rnpgbe_driver_name); + if (!rnpgbe_wq) { + pr_err("%s: Failed to create workqueue\n", rnpgbe_driver_name); + return -ENOMEM; + } + ret = pci_register_driver(&rnpgbe_driver); if (ret) return ret; @@ -358,6 +498,7 @@ module_init(rnpgbe_init_module); static void __exit rnpgbe_exit_module(void) { pci_unregister_driver(&rnpgbe_driver); + destroy_workqueue(rnpgbe_wq); } module_exit(rnpgbe_exit_module); -- 2.25.1 Initialize tx/rx memory for tx/rx desc. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 140 +++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 355 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 2 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 94 ++++- 4 files changed, 589 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 212e5b8fd7b4..cb0d73589687 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -248,6 +248,7 @@ struct mucse_hw { }; enum mucse_state_t { + __MUCSE_TESTING, __MUCSE_DOWN, __MUCSE_SERVICE_SCHED, __MUCSE_PTP_TX_IN_PROGRESS, @@ -307,6 +308,129 @@ struct mucse_rx_queue_stats { u64 rx_clean_count; }; +union rnpgbe_rx_desc { + struct { + union { + __le64 pkt_addr; + struct { + __le32 addr_lo; + __le32 addr_hi; + }; + }; + __le64 resv_cmd; +#define M_RXD_FLAG_RS (0) + }; + struct { + __le32 rss_hash; + __le16 mark; + __le16 rev1; +#define M_RX_L3_TYPE_MASK BIT(15) +#define VEB_VF_PKG BIT(1) +#define VEB_VF_IGNORE_VLAN BIT(0) +#define REV_OUTER_VLAN BIT(5) + __le16 len; + __le16 padding_len; + __le16 vlan; + __le16 cmd; +#define M_RXD_STAT_VLAN_VALID BIT(15) +#define M_RXD_STAT_STAG BIT(14) +#define M_RXD_STAT_TUNNEL_NVGRE (0x02 << 13) +#define M_RXD_STAT_TUNNEL_VXLAN (0x01 << 13) +#define M_RXD_STAT_TUNNEL_MASK (0x03 << 13) +#define M_RXD_STAT_ERR_MASK (0x1f << 8) +#define M_RXD_STAT_SCTP_MASK (0x04 << 8) +#define M_RXD_STAT_L4_MASK (0x02 << 8) +#define M_RXD_STAT_L4_SCTP (0x02 << 6) +#define M_RXD_STAT_L4_TCP (0x01 << 6) +#define M_RXD_STAT_L4_UDP (0x03 << 6) +#define M_RXD_STAT_IPV6 BIT(5) +#define M_RXD_STAT_IPV4 (0 << 5) +#define M_RXD_STAT_PTP BIT(4) +#define M_RXD_STAT_DD BIT(1) +#define M_RXD_STAT_EOP BIT(0) + } wb; +} __packed; + +struct rnpgbe_tx_desc { + union { + __le64 pkt_addr; + struct { + __le32 adr_lo; + __le32 adr_hi; + }; + }; + union { + __le64 vlan_cmd_bsz; + struct { + __le32 blen_mac_ip_len; + __le32 vlan_cmd; + }; + }; +#define M_TXD_FLAGS_VLAN_PRIO_MASK 0xe000 +#define M_TX_FLAGS_VLAN_PRIO_SHIFT 13 +#define M_TX_FLAGS_VLAN_CFI_SHIFT 12 +#define M_TXD_VLAN_VALID (0x80000000) +#define M_TXD_SVLAN_TYPE (0x02000000) +#define M_TXD_VLAN_CTRL_NOP (0x00 << 13) +#define M_TXD_VLAN_CTRL_RM_VLAN (0x20000000) +#define M_TXD_VLAN_CTRL_INSERT_VLAN (0x40000000) +#define M_TXD_L4_CSUM (0x10000000) +#define M_TXD_IP_CSUM (0x8000000) +#define M_TXD_TUNNEL_MASK (0x3000000) +#define M_TXD_TUNNEL_VXLAN (0x1000000) +#define M_TXD_TUNNEL_NVGRE (0x2000000) +#define M_TXD_L4_TYPE_UDP (0xc00000) +#define M_TXD_L4_TYPE_TCP (0x400000) +#define M_TXD_L4_TYPE_SCTP (0x800000) +#define M_TXD_FLAG_IPv4 (0) +#define M_TXD_FLAG_IPv6 (0x200000) +#define M_TXD_FLAG_TSO (0x100000) +#define M_TXD_FLAG_PTP (0x4000000) +#define M_TXD_CMD_RS (0x040000) +#define M_TXD_CMD_INNER_VLAN (0x08000000) +#define M_TXD_STAT_DD (0x020000) +#define M_TXD_CMD_EOP (0x010000) +#define M_TXD_PAD_CTRL (0x01000000) +}; + +struct mucse_tx_buffer { + struct rnpgbe_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + unsigned short gso_segs; + bool gso_need_padding; + __be16 protocol; + __be16 priv_tags; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + union { + u32 mss_len_vf_num; + struct { + u16 mss_len; + u8 vf_num; + u8 l4_hdr_len; + }; + }; + union { + u32 inner_vlan_tunnel_len; + struct { + u8 tunnel_hdr_len; + u8 inner_vlan_l; + u8 inner_vlan_h; + u8 resv; + }; + }; + bool ctx_flag; +}; + +struct mucse_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + __u32 page_offset; +}; + struct mucse_ring { struct mucse_ring *next; struct mucse_q_vector *q_vector; @@ -350,6 +474,7 @@ struct mucse_ring { u16 next_to_use; u16 next_to_clean; u16 device_id; + u16 next_to_alloc; struct mucse_queue_stats stats; struct u64_stats_sync syncp; union { @@ -436,6 +561,21 @@ struct rnpgbe_info { void (*get_invariants)(struct mucse_hw *hw); }; +static inline struct netdev_queue *txring_txq(const struct mucse_ring *ring) +{ + return netdev_get_tx_queue(ring->netdev, ring->queue_index); +} + +#define M_RXBUFFER_1536 (1536) +static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) +{ + return (M_RXBUFFER_1536 - NET_IP_ALIGN); +} + +#define M_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i])) +#define M_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i])) + +#define M_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) /* Device IDs */ #ifndef PCI_VENDOR_ID_MUCSE #define PCI_VENDOR_ID_MUCSE 0x8848 diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 2bf8a7f7f303..abf3eef3291a 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2020 - 2025 Mucse Corporation. */ +#include + #include "rnpgbe.h" #include "rnpgbe_lib.h" @@ -498,3 +500,356 @@ void rnpgbe_clear_interrupt_scheme(struct mucse *mucse) rnpgbe_free_q_vectors(mucse); rnpgbe_reset_interrupt_capability(mucse); } + +/** + * rnpgbe_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void rnpgbe_clean_tx_ring(struct mucse_ring *tx_ring) +{ + struct mucse_tx_buffer *tx_buffer; + u16 i = tx_ring->next_to_clean; + unsigned long size; + + tx_buffer = &tx_ring->tx_buffer_info[i]; + /* ring already cleared, nothing to do */ + if (!tx_ring->tx_buffer_info) + return; + + while (i != tx_ring->next_to_use) { + struct rnpgbe_tx_desc *eop_desc, *tx_desc; + + dev_kfree_skb_any(tx_buffer->skb); + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + eop_desc = tx_buffer->next_to_watch; + tx_desc = M_TX_DESC(tx_ring, i); + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = M_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + i++; + if (unlikely(i == tx_ring->count)) { + i = 0; + tx_buffer = tx_ring->tx_buffer_info; + } + } + + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct mucse_tx_buffer) * tx_ring->count; + memset(tx_ring->tx_buffer_info, 0, size); + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * rnpgbe_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void rnpgbe_free_tx_resources(struct mucse_ring *tx_ring) +{ + rnpgbe_clean_tx_ring(tx_ring); + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + /* if not set, then don't free */ + if (!tx_ring->desc) + return; + + dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * rnpgbe_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: tx descriptor ring (for a specific queue) to setup + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_setup_tx_resources(struct mucse_ring *tx_ring, + struct mucse *mucse) +{ + struct device *dev = tx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct mucse_tx_buffer) * tx_ring->count; + + if (tx_ring->q_vector) + numa_node = tx_ring->q_vector->numa_node; + tx_ring->tx_buffer_info = vzalloc_node(size, numa_node); + if (!tx_ring->tx_buffer_info) + tx_ring->tx_buffer_info = vzalloc(size); + if (!tx_ring->tx_buffer_info) + goto err; + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct rnpgbe_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + set_dev_node(dev, numa_node); + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!tx_ring->desc) + tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, + &tx_ring->dma, + GFP_KERNEL); + if (!tx_ring->desc) + goto err; + + memset(tx_ring->desc, 0, tx_ring->size); + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + return 0; + +err: + vfree(tx_ring->tx_buffer_info); + tx_ring->tx_buffer_info = NULL; + return -ENOMEM; +} + +/** + * rnpgbe_setup_all_tx_resources - allocate all queues Tx resources + * @mucse: pointer to private structure + * + * Allocate memory for tx_ring. + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_setup_all_tx_resources(struct mucse *mucse) +{ + int i, err = 0; + + for (i = 0; i < (mucse->num_tx_queues); i++) { + err = rnpgbe_setup_tx_resources(mucse->tx_ring[i], mucse); + if (!err) + continue; + + goto err_setup_tx; + } + + return 0; +err_setup_tx: + while (i--) + rnpgbe_free_tx_resources(mucse->tx_ring[i]); + return err; +} + +/** + * rnpgbe_free_all_tx_resources - Free Tx Resources for All Queues + * @mucse: pointer to private structure + * + * Free all transmit software resources + **/ +static void rnpgbe_free_all_tx_resources(struct mucse *mucse) +{ + int i; + + for (i = 0; i < (mucse->num_tx_queues); i++) + rnpgbe_free_tx_resources(mucse->tx_ring[i]); +} + +/** + * rnpgbe_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: rx descriptor ring (for a specific queue) to setup + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_setup_rx_resources(struct mucse_ring *rx_ring, + struct mucse *mucse) +{ + struct device *dev = rx_ring->dev; + int orig_node = dev_to_node(dev); + int numa_node = NUMA_NO_NODE; + int size; + + size = sizeof(struct mucse_rx_buffer) * rx_ring->count; + if (rx_ring->q_vector) + numa_node = rx_ring->q_vector->numa_node; + + rx_ring->rx_buffer_info = vzalloc_node(size, numa_node); + if (!rx_ring->rx_buffer_info) + rx_ring->rx_buffer_info = vzalloc(size); + + if (!rx_ring->rx_buffer_info) + goto err; + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union rnpgbe_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); + set_dev_node(dev, numa_node); + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + set_dev_node(dev, orig_node); + if (!rx_ring->desc) + rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + &rx_ring->dma, + GFP_KERNEL); + if (!rx_ring->desc) + goto err; + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + return 0; +err: + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + return -ENOMEM; +} + +/** + * rnpgbe_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: ring to free buffers from + **/ +static void rnpgbe_clean_rx_ring(struct mucse_ring *rx_ring) +{ + struct mucse_rx_buffer *rx_buffer; + u16 i = rx_ring->next_to_clean; + + rx_buffer = &rx_ring->rx_buffer_info[i]; + /* Free all the Rx ring sk_buffs */ + while (i != rx_ring->next_to_alloc) { + if (rx_buffer->skb) { + struct sk_buff *skb = rx_buffer->skb; + + dev_kfree_skb(skb); + rx_buffer->skb = NULL; + } + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, + mucse_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + rx_buffer->page = NULL; + i++; + rx_buffer++; + if (i == rx_ring->count) { + i = 0; + rx_buffer = rx_ring->rx_buffer_info; + } + } + + rx_ring->next_to_alloc = 0; + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; +} + +/** + * rnpgbe_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void rnpgbe_free_rx_resources(struct mucse_ring *rx_ring) +{ + rnpgbe_clean_rx_ring(rx_ring); + vfree(rx_ring->rx_buffer_info); + rx_ring->rx_buffer_info = NULL; + /* if not set, then don't free */ + if (!rx_ring->desc) + return; + + dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * rnpgbe_setup_all_rx_resources - allocate all queues Rx resources + * @mucse: pointer to private structure + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_setup_all_rx_resources(struct mucse *mucse) +{ + int i, err = 0; + + for (i = 0; i < mucse->num_rx_queues; i++) { + err = rnpgbe_setup_rx_resources(mucse->rx_ring[i], mucse); + if (!err) + continue; + + goto err_setup_rx; + } + + return 0; +err_setup_rx: + while (i--) + rnpgbe_free_rx_resources(mucse->rx_ring[i]); + return err; +} + +/** + * rnpgbe_free_all_rx_resources - Free Rx Resources for All Queues + * @mucse: pointer to private structure + * + * Free all receive software resources + **/ +static void rnpgbe_free_all_rx_resources(struct mucse *mucse) +{ + int i; + + for (i = 0; i < (mucse->num_rx_queues); i++) { + if (mucse->rx_ring[i]->desc) + rnpgbe_free_rx_resources(mucse->rx_ring[i]); + } +} + +/** + * rnpgbe_setup_txrx - Allocate Tx/Rx Resources for All Queues + * @mucse: pointer to private structure + * + * Allocate all send/receive software resources + * + * @return: 0 on success, negative on failure + **/ +int rnpgbe_setup_txrx(struct mucse *mucse) +{ + int err; + + err = rnpgbe_setup_all_tx_resources(mucse); + if (err) + return err; + + err = rnpgbe_setup_all_rx_resources(mucse); + if (err) + goto err_setup_rx; + return 0; +err_setup_rx: + rnpgbe_free_all_tx_resources(mucse); + return err; +} + +/** + * rnpgbe_free_txrx - Clean Tx/Rx Resources for All Queues + * @mucse: pointer to private structure + * + * Free all send/receive software resources + **/ +void rnpgbe_free_txrx(struct mucse *mucse) +{ + rnpgbe_free_all_tx_resources(mucse); + rnpgbe_free_all_rx_resources(mucse); +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h index 0df519a50185..6b2f68320c9e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -24,5 +24,7 @@ int rnpgbe_init_interrupt_scheme(struct mucse *mucse); void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); +int rnpgbe_setup_txrx(struct mucse *mucse); +void rnpgbe_free_txrx(struct mucse *mucse); #endif /* _RNPGBE_LIB_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 8fc1af1c00bc..16a111a10862 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "rnpgbe.h" #include "rnpgbe_mbx_fw.h" @@ -83,6 +84,78 @@ static void rnpgbe_service_task(struct work_struct *work) { } +/** + * rnpgbe_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). + * + * @return: 0 on success, negative value on failure + **/ +static int rnpgbe_open(struct net_device *netdev) +{ + struct mucse *mucse = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__MUCSE_TESTING, &mucse->state)) + return -EBUSY; + + netif_carrier_off(netdev); + err = rnpgbe_setup_txrx(mucse); + + return err; +} + +/** + * rnpgbe_close - Disables a network interface + * @netdev: network interface device structure + * + * The close entry point is called when an interface is de-activated + * by the OS. + * + * @return: 0, this is not allowed to fail + **/ +static int rnpgbe_close(struct net_device *netdev) +{ + struct mucse *mucse = netdev_priv(netdev); + + rnpgbe_free_txrx(mucse); + + return 0; +} + +/** + * rnpgbe_xmit_frame - Send a skb to driver + * @skb: skb structure to be sent + * @netdev: network interface device structure + * + * @return: NETDEV_TX_OK or NETDEV_TX_BUSY + **/ +static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +const struct net_device_ops rnpgbe_netdev_ops = { + .ndo_open = rnpgbe_open, + .ndo_stop = rnpgbe_close, + .ndo_start_xmit = rnpgbe_xmit_frame, +}; + +/** + * rnpgbe_assign_netdev_ops - Assign netdev ops to the device + * @dev: network interface device structure + **/ +static void rnpgbe_assign_netdev_ops(struct net_device *dev) +{ + dev->netdev_ops = &rnpgbe_netdev_ops; + dev->watchdog_timeo = 5 * HZ; +} + /** * rnpgbe_sw_init - Init driver private status * @mucse: pointer to private structure @@ -289,7 +362,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, "mucse_mbx_get_capability failed!\n"); goto err_free_net; } - + rnpgbe_assign_netdev_ops(netdev); err = rnpgbe_sw_init(mucse); if (err) goto err_free_net; @@ -305,6 +378,7 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= netdev->features; eth_hw_addr_set(netdev, hw->perm_addr); memcpy(netdev->perm_addr, hw->perm_addr, netdev->addr_len); ether_addr_copy(hw->addr, hw->perm_addr); @@ -314,11 +388,17 @@ static int rnpgbe_add_adapter(struct pci_dev *pdev, err = rnpgbe_init_interrupt_scheme(mucse); if (err) goto err_free_net; + err = register_mbx_irq(mucse); if (err) goto err_free_irq; - + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) + goto err_register; return 0; +err_register: + remove_mbx_irq(mucse); err_free_irq: rnpgbe_clear_interrupt_scheme(mucse); err_free_net: @@ -389,8 +469,14 @@ static void rnpgbe_rm_adapter(struct mucse *mucse) rnpgbe_devlink_unregister(mucse); netdev = mucse->netdev; + if (mucse->flags2 & M_FLAG2_NO_NET_REG) { + free_netdev(netdev); + return; + } cancel_work_sync(&mucse->service_task); timer_delete_sync(&mucse->service_timer); + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); hw->ops.driver_status(hw, false, mucse_driver_insmod); remove_mbx_irq(mucse); rnpgbe_clear_interrupt_scheme(mucse); @@ -432,6 +518,10 @@ static void rnpgbe_dev_shutdown(struct pci_dev *pdev, *enable_wake = false; netif_device_detach(netdev); + rtnl_lock(); + if (netif_running(netdev)) + rnpgbe_free_txrx(mucse); + rtnl_unlock(); remove_mbx_irq(mucse); rnpgbe_clear_interrupt_scheme(mucse); pci_disable_device(pdev); -- 2.25.1 Initialize irq for tx/rx in open func. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 14 + .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 81 +++++ drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 11 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 280 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 30 ++ .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 36 ++- 6 files changed, 450 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index cb0d73589687..23fb93157b98 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -67,7 +67,14 @@ struct mii_regs { unsigned int clk_csr_mask; }; +struct mucse_mac_info; + +struct mucse_mac_operations { + void (*set_mac)(struct mucse_mac_info *mac, u8 *addr, int index); +}; + struct mucse_mac_info { + struct mucse_mac_operations ops; u8 __iomem *mac_addr; void *back; struct mii_regs mii; @@ -174,6 +181,9 @@ struct mucse_hw_operations { void (*init_rx_addrs)(struct mucse_hw *hw); /* ops to fw */ void (*driver_status)(struct mucse_hw *hw, bool enable, int mode); + void (*update_hw_info)(struct mucse_hw *hw); + void (*set_mac)(struct mucse_hw *hw, u8 *mac); + void (*set_irq_mode)(struct mucse_hw *hw, bool legacy); }; enum { @@ -603,6 +613,10 @@ static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) #define dma_rd32(dma, reg) m_rd_reg((dma)->dma_base_addr + (reg)) #define eth_wr32(eth, reg, val) m_wr_reg((eth)->eth_base_addr + (reg), (val)) #define eth_rd32(eth, reg) m_rd_reg((eth)->eth_base_addr + (reg)) +#define mac_wr32(mac, reg, val) m_wr_reg((mac)->mac_addr + (reg), (val)) +#define mac_rd32(mac, reg) m_rd_reg((mac)->mac_addr + (reg)) +#define ring_wr32(eth, reg, val) m_wr_reg((eth)->ring_addr + (reg), (val)) +#define ring_rd32(eth, reg) m_rd_reg((eth)->ring_addr + (reg)) #define mucse_err(mucse, fmt, arg...) \ dev_err(&(mucse)->pdev->dev, fmt, ##arg) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index fa8317ae7642..266dc95c4ff2 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -91,6 +91,30 @@ static struct mucse_eth_operations eth_ops_n500 = { .clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500 }; +/** + * rnpgbe_mac_set_mac_n500 - Setup mac address to mac module in hw + * @mac: pointer to mac structure + * @addr: pointer to addr + * @index: Receive address register to write + * + * Setup a mac address to mac module. + **/ +static void rnpgbe_mac_set_mac_n500(struct mucse_mac_info *mac, + u8 *addr, int index) +{ + u32 rar_low, rar_high = 0; + + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + rar_high = M_RAH_AV | ((u32)addr[4] | (u32)addr[5] << 8); + mac_wr32(mac, RNPGBE_MAC_UNICAST_HIGH(index), rar_high); + mac_wr32(mac, RNPGBE_MAC_UNICAST_LOW(index), rar_low); +} + +static struct mucse_mac_operations mac_ops_n500 = { + .set_mac = &rnpgbe_mac_set_mac_n500, +}; + /** * rnpgbe_init_hw_ops_n500 - Init hardware * @hw: hw information structure @@ -251,12 +275,68 @@ static void rnpgbe_init_rx_addrs_hw_ops_n500(struct mucse_hw *hw) eth->ops.clr_mc_addr(eth); } +/** + * rnpgbe_set_mac_hw_ops_n500 - Setup mac address to hw + * @hw: pointer to hw structure + * @mac: pointer to mac addr + * + * Setup a mac address to hw. + **/ +static void rnpgbe_set_mac_hw_ops_n500(struct mucse_hw *hw, u8 *mac) +{ + struct mucse_eth_info *eth = &hw->eth; + struct mucse_mac_info *mac_info = &hw->mac; + + /* use idx 0 */ + eth->ops.set_rar(eth, 0, mac); + mac_info->ops.set_mac(mac_info, mac, 0); +} + +/** + * rnpgbe_update_hw_info_hw_ops_n500 - Update status to hw + * @hw: pointer to hw structure + * + * Setup status info to hw, such as some fifo, en regs. + **/ +static void rnpgbe_update_hw_info_hw_ops_n500(struct mucse_hw *hw) +{ + struct mucse_dma_info *dma = &hw->dma; + struct mucse_eth_info *eth = &hw->eth; + + /* 1 enable eth filter */ + eth_wr32(eth, RNPGBE_HOST_FILTER_EN, 1); + /* 2 open redir en */ + eth_wr32(eth, RNPGBE_REDIR_EN, 1); + /* 3 setup tso fifo */ + dma_wr32(dma, DMA_PKT_FIFO_DATA_PROG_FULL_THRESH, 36); +} + +/** + * rnpgbe_set_irq_mode_n500 - Setup hw irq mode + * @hw: pointer to hw structure + * @legacy: is legacy irq or not + * + * Setup irq mode to hw. + **/ +static void rnpgbe_set_irq_mode_n500(struct mucse_hw *hw, bool legacy) +{ + if (legacy) { + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 1); + hw_wr32(hw, RNPGBE_LEGANCY_TIME, 0x200); + } else { + hw_wr32(hw, RNPGBE_LEGANCY_ENABLE, 1); + } +} + static struct mucse_hw_operations hw_ops_n500 = { .init_hw = &rnpgbe_init_hw_ops_n500, .reset_hw = &rnpgbe_reset_hw_ops_n500, .start_hw = &rnpgbe_start_hw_ops_n500, .init_rx_addrs = &rnpgbe_init_rx_addrs_hw_ops_n500, .driver_status = &rnpgbe_driver_status_hw_ops_n500, + .set_mac = &rnpgbe_set_mac_hw_ops_n500, + .update_hw_info = &rnpgbe_update_hw_info_hw_ops_n500, + .set_irq_mode = &rnpgbe_set_irq_mode_n500, }; /** @@ -291,6 +371,7 @@ static void rnpgbe_get_invariants_n500(struct mucse_hw *hw) eth->vft_size = RNPGBE_VFT_TBL_SIZE; eth->num_rar_entries = RNPGBE_RAR_ENTRIES; /* setup mac info */ + memcpy(&hw->mac.ops, &mac_ops_n500, sizeof(hw->mac.ops)); mac->mac_addr = hw->hw_addr + RNPGBE_MAC_BASE; mac->back = hw; /* set mac->mii */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h index bcb4da45feac..98031600801b 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -22,9 +22,13 @@ #define RX_AXI_RW_EN (0x03 << 0) #define TX_AXI_RW_EN (0x03 << 2) #define RNPGBE_DMA_RX_PROG_FULL_THRESH (0x00a0) +#define DMA_PKT_FIFO_DATA_PROG_FULL_THRESH (0x0098) #define RING_VECTOR(n) (0x04 * (n)) + /* eth regs */ #define RNPGBE_ETH_BYPASS (0x8000) +#define RNPGBE_HOST_FILTER_EN (0x800c) +#define RNPGBE_REDIR_EN (0x8030) #define RNPGBE_ETH_ERR_MASK_VECTOR (0x8060) #define RNPGBE_ETH_DEFAULT_RX_RING (0x806c) #define RNPGBE_PKT_LEN_ERR (2) @@ -35,6 +39,13 @@ #define RNPGBE_ETH_RAR_RL(n) (0xa000 + 0x04 * (n)) #define RNPGBE_ETH_RAR_RH(n) (0xa400 + 0x04 * (n)) #define RNPGBE_ETH_MUTICAST_HASH_TABLE(n) (0xac00 + 0x04 * (n)) + +#define RNPGBE_LEGANCY_ENABLE (0xd004) +#define RNPGBE_LEGANCY_TIME (0xd000) +/* mac regs */ +#define M_RAH_AV 0x80000000 +#define RNPGBE_MAC_UNICAST_LOW(i) (0x44 + (i) * 0x08) +#define RNPGBE_MAC_UNICAST_HIGH(i) (0x40 + (i) * 0x08) /* chip resourse */ #define RNPGBE_MAX_QUEUES (8) /* multicast control table */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index abf3eef3291a..2ba1f5f5aa6c 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -2,6 +2,7 @@ /* Copyright(c) 2020 - 2025 Mucse Corporation. */ #include +#include #include "rnpgbe.h" #include "rnpgbe_lib.h" @@ -853,3 +854,282 @@ void rnpgbe_free_txrx(struct mucse *mucse) rnpgbe_free_all_tx_resources(mucse); rnpgbe_free_all_rx_resources(mucse); } + +/** + * rnpgbe_configure_tx_ring - Configure Tx ring after Reset + * @mucse: pointer to private structure + * @ring: structure containing ring specific data + * + * Configure the Tx descriptor ring after a reset. + **/ +static void rnpgbe_configure_tx_ring(struct mucse *mucse, + struct mucse_ring *ring) +{ + struct mucse_hw *hw = &mucse->hw; + u32 status = 0; + + ring_wr32(ring, DMA_TX_START, 0); + ring_wr32(ring, DMA_REG_TX_DESC_BUF_BASE_ADDR_LO, (u32)ring->dma); + ring_wr32(ring, DMA_REG_TX_DESC_BUF_BASE_ADDR_HI, + (u32)(((u64)ring->dma) >> 32) | (hw->pfvfnum << 24)); + ring_wr32(ring, DMA_REG_TX_DESC_BUF_LEN, ring->count); + ring->next_to_clean = ring_rd32(ring, DMA_REG_TX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + ring->tail = ring->ring_addr + DMA_REG_TX_DESC_BUF_TAIL; + m_wr_reg(ring->tail, ring->next_to_use); + ring_wr32(ring, DMA_REG_TX_DESC_FETCH_CTRL, + (8 << 0) | (TX_DEFAULT_BURST << 16)); + ring_wr32(ring, DMA_REG_TX_INT_DELAY_TIMER, + mucse->tx_usecs * hw->usecstocount); + ring_wr32(ring, DMA_REG_TX_INT_DELAY_PKTCNT, mucse->tx_frames); + read_poll_timeout(ring_rd32, status, status == 1, + 100, 20000, false, ring, DMA_TX_READY); + ring_wr32(ring, DMA_TX_START, 1); +} + +/** + * rnpgbe_configure_tx - Configure Transmit Unit after Reset + * @mucse: pointer to private structure + * + * Configure the Tx DMA after a reset. + **/ +void rnpgbe_configure_tx(struct mucse *mucse) +{ + u32 i; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + for (i = 0; i < (mucse->num_tx_queues); i++) + rnpgbe_configure_tx_ring(mucse, mucse->tx_ring[i]); +} + +/** + * rnpgbe_disable_rx_queue - Disable start for ring + * @ring: structure containing ring specific data + **/ +void rnpgbe_disable_rx_queue(struct mucse_ring *ring) +{ + ring_wr32(ring, DMA_RX_START, 0); +} + +/** + * rnpgbe_configure_rx_ring - Configure Rx ring after Reset + * @mucse: pointer to private structure + * @ring: structure containing ring specific data + * + * Configure the Rx descriptor ring after a reset. + **/ +static void rnpgbe_configure_rx_ring(struct mucse *mucse, + struct mucse_ring *ring) +{ + struct mucse_hw *hw = &mucse->hw; + u64 desc_phy = ring->dma; + int split_size; + /* disable queue to avoid issues while updating state */ + rnpgbe_disable_rx_queue(ring); + + /* set descripts registers*/ + ring_wr32(ring, DMA_REG_RX_DESC_BUF_BASE_ADDR_LO, (u32)desc_phy); + ring_wr32(ring, DMA_REG_RX_DESC_BUF_BASE_ADDR_HI, + ((u32)(desc_phy >> 32)) | (hw->pfvfnum << 24)); + ring_wr32(ring, DMA_REG_RX_DESC_BUF_LEN, ring->count); + ring->tail = ring->ring_addr + DMA_REG_RX_DESC_BUF_TAIL; + ring->next_to_clean = ring_rd32(ring, DMA_REG_RX_DESC_BUF_HEAD); + ring->next_to_use = ring->next_to_clean; + + /* we use fixed sg size */ + split_size = 96; + ring_wr32(ring, DMA_REG_RX_SCATTER_LENGTH, split_size); + ring_wr32(ring, DMA_REG_RX_DESC_FETCH_CTRL, + 0 | (RX_DEFAULT_LINE << 0) | + (RX_DEFAULT_BURST << 16)); + /* if ncsi card, drop packets if no rx-desc in 100000 clks */ + if (hw->ncsi_en) + ring_wr32(ring, DMA_REG_RX_DESC_TIMEOUT_TH, 100000); + else + ring_wr32(ring, DMA_REG_RX_DESC_TIMEOUT_TH, 0); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, + mucse->rx_usecs * hw->usecstocount); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, mucse->rx_frames); +} + +/** + * rnpgbe_configure_rx - Configure Receive Unit after Reset + * @mucse: pointer to private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +void rnpgbe_configure_rx(struct mucse *mucse) +{ + int i; + + for (i = 0; i < mucse->num_rx_queues; i++) + rnpgbe_configure_rx_ring(mucse, mucse->rx_ring[i]); +} + +/** + * rnpgbe_msix_clean_rings - msix irq handler for ring irq + * @irq: irq num + * @data: private data + * + * rnpgbe_msix_clean_rings handle irq from ring, start napi + **/ +static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data) +{ + return IRQ_HANDLED; +} + +static void rnpgbe_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct mucse_q_vector *q_vector = + container_of(notify, struct mucse_q_vector, affinity_notify); + + cpumask_copy(&q_vector->affinity_mask, mask); +} + +static void rnpgbe_irq_affinity_release(struct kref *ref) +{ +} + +/** + * rnpgbe_request_msix_irqs - Initialize MSI-X interrupts + * @mucse: pointer to private structure + * + * rnpgbe_request_msix_irqs allocates MSI-X vectors and requests + * interrupts from the kernel. + **/ +static int rnpgbe_request_msix_irqs(struct mucse *mucse) +{ + struct net_device *netdev = mucse->netdev; + int q_off = mucse->q_vector_off; + struct msix_entry *entry; + int i = 0; + int err; + + for (i = 0; i < mucse->num_q_vectors; i++) { + struct mucse_q_vector *q_vector = mucse->q_vector[i]; + + entry = &mucse->msix_entries[i + q_off]; + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", i); + } else { + /* skip this unused q_vector */ + continue; + } + err = request_irq(entry->vector, &rnpgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) + goto free_queue_irqs; + /* register for affinity change notifications */ + q_vector->affinity_notify.notify = rnpgbe_irq_affinity_notify; + q_vector->affinity_notify.release = rnpgbe_irq_affinity_release; + irq_set_affinity_notifier(entry->vector, + &q_vector->affinity_notify); + irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask); + } + + return 0; + +free_queue_irqs: + while (i) { + i--; + entry = &mucse->msix_entries[i + q_off]; + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, mucse->q_vector[i]); + irq_set_affinity_notifier(entry->vector, NULL); + irq_set_affinity_hint(entry->vector, NULL); + } + return err; +} + +/** + * rnpgbe_intr - msi/legacy irq handler + * @irq: irq num + * @data: private data + **/ +static irqreturn_t rnpgbe_intr(int irq, void *data) +{ + return IRQ_HANDLED; +} + +/** + * rnpgbe_request_irq - initialize interrupts + * @mucse: pointer to private structure + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +int rnpgbe_request_irq(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + int err; + + if (mucse->flags & M_FLAG_MSIX_ENABLED) { + err = rnpgbe_request_msix_irqs(mucse); + hw->ops.set_irq_mode(hw, 0); + } else if (mucse->flags & M_FLAG_MSI_ENABLED) { + /* in this case one for all */ + err = request_irq(mucse->pdev->irq, rnpgbe_intr, 0, + mucse->netdev->name, mucse); + mucse->hw.mbx.irq_enabled = true; + hw->ops.set_irq_mode(hw, 0); + } else { + err = request_irq(mucse->pdev->irq, rnpgbe_intr, IRQF_SHARED, + mucse->netdev->name, mucse); + hw->ops.set_irq_mode(hw, 1); + mucse->hw.mbx.irq_enabled = true; + } + return err; +} + +/** + * rnpgbe_free_msix_irqs - Free MSI-X interrupts + * @mucse: pointer to private structure + * + * rnpgbe_free_msix_irqs free MSI-X vectors and requests + * interrupts. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_free_msix_irqs(struct mucse *mucse) +{ + int q_off = mucse->q_vector_off; + struct mucse_q_vector *q_vector; + struct msix_entry *entry; + int i; + + for (i = 0; i < mucse->num_q_vectors; i++) { + q_vector = mucse->q_vector[i]; + entry = &mucse->msix_entries[i + q_off]; + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + /* clear the affinity notifier in the IRQ descriptor */ + irq_set_affinity_notifier(entry->vector, NULL); + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + free_irq(entry->vector, q_vector); + } + return 0; +} + +/** + * rnpgbe_free_irq - free interrupts + * @mucse: pointer to private structure + * + * Attempts to free interrupts according initialized type. + **/ +void rnpgbe_free_irq(struct mucse *mucse) +{ + if (mucse->flags & M_FLAG_MSIX_ENABLED) { + rnpgbe_free_msix_irqs(mucse); + } else if (mucse->flags & M_FLAG_MSI_ENABLED) { + /* in this case one for all */ + free_irq(mucse->pdev->irq, mucse); + mucse->hw.mbx.irq_enabled = false; + } else { + free_irq(mucse->pdev->irq, mucse); + mucse->hw.mbx.irq_enabled = false; + } +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h index 6b2f68320c9e..24859649199f 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -16,6 +16,31 @@ #define RX_INT_MASK (0x01) #define DMA_INT_CLR (0x28) #define DMA_INT_STAT (0x20) +#define DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30) +#define DMA_REG_RX_DESC_BUF_BASE_ADDR_LO (0x34) +#define DMA_REG_RX_DESC_BUF_LEN (0x38) +#define DMA_REG_RX_DESC_BUF_HEAD (0x3c) +#define DMA_REG_RX_DESC_BUF_TAIL (0x40) +#define DMA_REG_RX_DESC_FETCH_CTRL (0x44) +#define DMA_REG_RX_INT_DELAY_TIMER (0x48) +#define DMA_REG_RX_INT_DELAY_PKTCNT (0x4c) +#define DMA_REG_RX_ARB_DEF_LVL (0x50) +#define DMA_REG_RX_DESC_TIMEOUT_TH (0x54) +#define DMA_REG_RX_SCATTER_LENGTH (0x58) +#define DMA_REG_TX_DESC_BUF_BASE_ADDR_HI (0x60) +#define DMA_REG_TX_DESC_BUF_BASE_ADDR_LO (0x64) +#define DMA_REG_TX_DESC_BUF_LEN (0x68) +#define DMA_REG_TX_DESC_BUF_HEAD (0x6c) +#define DMA_REG_TX_DESC_BUF_TAIL (0x70) +#define DMA_REG_TX_DESC_FETCH_CTRL (0x74) +#define DMA_REG_TX_INT_DELAY_TIMER (0x78) +#define DMA_REG_TX_INT_DELAY_PKTCNT (0x7c) +#define DMA_REG_TX_ARB_DEF_LVL (0x80) +#define DMA_REG_TX_FLOW_CTRL_TH (0x84) +#define DMA_REG_TX_FLOW_CTRL_TM (0x88) +#define TX_DEFAULT_BURST (8) +#define RX_DEFAULT_LINE (32) +#define RX_DEFAULT_BURST (16) #define mucse_for_each_ring(pos, head)\ for (typeof((head).ring) __pos = (head).ring;\ @@ -26,5 +51,10 @@ int rnpgbe_init_interrupt_scheme(struct mucse *mucse); void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); int rnpgbe_setup_txrx(struct mucse *mucse); void rnpgbe_free_txrx(struct mucse *mucse); +void rnpgbe_configure_tx(struct mucse *mucse); +void rnpgbe_disable_rx_queue(struct mucse_ring *ring); +void rnpgbe_configure_rx(struct mucse *mucse); +int rnpgbe_request_irq(struct mucse *mucse); +void rnpgbe_free_irq(struct mucse *mucse); #endif /* _RNPGBE_LIB_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 16a111a10862..dc0990daf8b8 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -84,6 +84,22 @@ static void rnpgbe_service_task(struct work_struct *work) { } +/** + * rnpgbe_configure - Configure info to hw + * @mucse: pointer to private structure + * + * rnpgbe_configure configure mac, tx, rx regs to hw + **/ +static void rnpgbe_configure(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + + hw->ops.set_mac(hw, hw->addr); + hw->ops.update_hw_info(hw); + rnpgbe_configure_tx(mucse); + rnpgbe_configure_rx(mucse); +} + /** * rnpgbe_open - Called when a network interface is made active * @netdev: network interface device structure @@ -104,7 +120,20 @@ static int rnpgbe_open(struct net_device *netdev) netif_carrier_off(netdev); err = rnpgbe_setup_txrx(mucse); - + rnpgbe_configure(mucse); + err = rnpgbe_request_irq(mucse); + if (err) + goto err_req_irq; + err = netif_set_real_num_tx_queues(netdev, mucse->num_tx_queues); + if (err) + goto err_set_queues; + err = netif_set_real_num_rx_queues(netdev, mucse->num_rx_queues); + if (err) + goto err_set_queues; +err_req_irq: + rnpgbe_free_txrx(mucse); +err_set_queues: + rnpgbe_free_irq(mucse); return err; } @@ -121,6 +150,7 @@ static int rnpgbe_close(struct net_device *netdev) { struct mucse *mucse = netdev_priv(netdev); + rnpgbe_free_irq(mucse); rnpgbe_free_txrx(mucse); return 0; @@ -519,8 +549,10 @@ static void rnpgbe_dev_shutdown(struct pci_dev *pdev, *enable_wake = false; netif_device_detach(netdev); rtnl_lock(); - if (netif_running(netdev)) + if (netif_running(netdev)) { + rnpgbe_free_irq(mucse); rnpgbe_free_txrx(mucse); + } rtnl_unlock(); remove_mbx_irq(mucse); rnpgbe_clear_interrupt_scheme(mucse); -- 2.25.1 Initialize ring-vector setup up hw in open func. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 4 + .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 24 ++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 111 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 72 +++++++++++- .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 56 +++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 108 +++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 31 ++++- 7 files changed, 404 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 23fb93157b98..624e0eec562a 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -184,6 +184,8 @@ struct mucse_hw_operations { void (*update_hw_info)(struct mucse_hw *hw); void (*set_mac)(struct mucse_hw *hw, u8 *mac); void (*set_irq_mode)(struct mucse_hw *hw, bool legacy); + void (*set_mbx_link_event)(struct mucse_hw *hw, int enable); + void (*set_mbx_ifup)(struct mucse_hw *hw, int enable); }; enum { @@ -528,6 +530,7 @@ struct mucse { struct pci_dev *pdev; struct devlink *dl; struct mucse_hw hw; + u16 msg_enable; /* board number */ u16 bd_number; u16 tx_work_limit; @@ -560,6 +563,7 @@ struct mucse { u16 tx_frames; u16 tx_usecs; unsigned long state; + unsigned long link_check_timeout; struct timer_list service_timer; struct work_struct service_task; char name[60]; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index 266dc95c4ff2..b85d4d0e3dbc 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -328,6 +328,28 @@ static void rnpgbe_set_irq_mode_n500(struct mucse_hw *hw, bool legacy) } } +/** + * rnpgbe_set_mbx_link_event_hw_ops_n500 - Request link event status to hw + * @hw: hw information structure + * @enable: true for event on + **/ +static void rnpgbe_set_mbx_link_event_hw_ops_n500(struct mucse_hw *hw, + int enable) +{ + mucse_mbx_link_event_enable(hw, enable); +} + +/** + * rnpgbe_set_mbx_ifup_hw_ops_n500 - Request phy status to hw + * @hw: hw information structure + * @enable: true for phy up + **/ +static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct mucse_hw *hw, + int enable) +{ + mucse_mbx_ifup_down(hw, enable); +} + static struct mucse_hw_operations hw_ops_n500 = { .init_hw = &rnpgbe_init_hw_ops_n500, .reset_hw = &rnpgbe_reset_hw_ops_n500, @@ -337,6 +359,8 @@ static struct mucse_hw_operations hw_ops_n500 = { .set_mac = &rnpgbe_set_mac_hw_ops_n500, .update_hw_info = &rnpgbe_update_hw_info_hw_ops_n500, .set_irq_mode = &rnpgbe_set_irq_mode_n500, + .set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500, + .set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500, }; /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 2ba1f5f5aa6c..0686bfbf55bf 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -1133,3 +1133,114 @@ void rnpgbe_free_irq(struct mucse *mucse) mucse->hw.mbx.irq_enabled = false; } } + +/** + * rnpgbe_napi_enable_all - enable all napi + * @mucse: pointer to private structure + * + * Enable all napi for this net. + **/ +void rnpgbe_napi_enable_all(struct mucse *mucse) +{ + int q_idx; + + for (q_idx = 0; q_idx < mucse->num_q_vectors; q_idx++) + napi_enable(&mucse->q_vector[q_idx]->napi); +} + +/** + * rnpgbe_napi_disable_all - disable all napi + * @mucse: pointer to private structure + * + * Disable all napi for this net. + **/ +void rnpgbe_napi_disable_all(struct mucse *mucse) +{ + int q_idx; + + for (q_idx = 0; q_idx < mucse->num_q_vectors; q_idx++) + napi_disable(&mucse->q_vector[q_idx]->napi); +} + +/** + * rnpgbe_set_ring_vector - set the ring_vector registers, + * mapping interrupt causes to vectors + * @mucse: pointer to private structure + * @queue: queue to map the corresponding interrupt to + * @msix_vector: the vector to map to the corresponding queue + * + */ +static void rnpgbe_set_ring_vector(struct mucse *mucse, + u8 queue, u8 msix_vector) +{ + struct mucse_hw *hw = &mucse->hw; + u32 data = 0; + + data = hw->pfvfnum << 24; + data |= (msix_vector << 8); + data |= (msix_vector << 0); + m_wr_reg(hw->ring_msix_base + RING_VECTOR(queue), data); +} + +/** + * rnpgbe_configure_msix - Configure MSI-X hardware + * @mucse: pointer to private structure + * + * rnpgbe_configure_msix sets up the hardware to properly generate MSI-X + * interrupts. + **/ +void rnpgbe_configure_msix(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + struct mucse_q_vector *q_vector; + int i; + + /* configure ring-msix Registers table */ + for (i = 0; i < mucse->num_q_vectors; i++) { + struct mucse_ring *ring; + + q_vector = mucse->q_vector[i]; + mucse_for_each_ring(ring, q_vector->rx) { + rnpgbe_set_ring_vector(mucse, ring->rnpgbe_queue_idx, + q_vector->v_idx); + } + } + /* n500 should mask other */ + if (hw->hw_type == rnpgbe_hw_n500 || + hw->hw_type == rnpgbe_hw_n210 || + hw->hw_type == rnpgbe_hw_n210L) { + /* + * 8 lpi | PMT + * 9 BMC_RX_IRQ | + * 10 PHY_IRQ | LPI_IRQ + * 11 BMC_TX_IRQ | + * may DMAR error if set pf to vm + */ +#define OTHER_VECTOR_START (8) +#define OTHER_VECTOR_STOP (11) +#define MSIX_UNUSED (0x0f0f) + for (i = OTHER_VECTOR_START; i <= OTHER_VECTOR_STOP; i++) { + if (hw->feature_flags & M_HW_SOFT_MASK_OTHER_IRQ) { + m_wr_reg(hw->ring_msix_base + + RING_VECTOR(i), + MSIX_UNUSED); + } else { + m_wr_reg(hw->ring_msix_base + + RING_VECTOR(i), 0); + } + } + if (hw->feature_flags & M_HW_FEATURE_EEE) { +#define LPI_IRQ (8) + /* only open lpi irq */ + if (hw->feature_flags & M_HW_SOFT_MASK_OTHER_IRQ) { + m_wr_reg(hw->ring_msix_base + + RING_VECTOR(LPI_IRQ), + 0x000f); + } else { + m_wr_reg(hw->ring_msix_base + + RING_VECTOR(LPI_IRQ), + 0x0000); + } + } + } +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h index 24859649199f..bdb8a393dad8 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -7,6 +7,7 @@ #include "rnpgbe.h" #define RING_OFFSET(n) (0x100 * (n)) +#define DMA_DUMY (0xc) #define DMA_RX_START (0x10) #define DMA_RX_READY (0x14) #define DMA_TX_START (0x18) @@ -14,6 +15,8 @@ #define DMA_INT_MASK (0x24) #define TX_INT_MASK (0x02) #define RX_INT_MASK (0x01) +#define DMA_INT_TRIG (0x2c) +#define INT_VALID (0x3 << 16) #define DMA_INT_CLR (0x28) #define DMA_INT_STAT (0x20) #define DMA_REG_RX_DESC_BUF_BASE_ADDR_HI (0x30) @@ -41,11 +44,75 @@ #define TX_DEFAULT_BURST (8) #define RX_DEFAULT_LINE (32) #define RX_DEFAULT_BURST (16) - +#define RING_VECTOR(n) (0x04 * (n)) #define mucse_for_each_ring(pos, head)\ for (typeof((head).ring) __pos = (head).ring;\ __pos ? ({ pos = __pos; 1; }) : 0;\ __pos = __pos->next) +#define e_info(msglvl, format, arg...) \ + netif_info(mucse, msglvl, mucse->netdev, format, ##arg) + +enum link_event_mask { + EVT_LINK_UP = 1, + EVT_NO_MEDIA = 2, + EVT_LINK_FAULT = 3, + EVT_PHY_TEMP_ALARM = 4, + EVT_EXCESSIVE_ERRORS = 5, + EVT_SIGNAL_DETECT = 6, + EVT_AUTO_NEGOTIATION_DONE = 7, + EVT_MODULE_QUALIFICATION_FAILD = 8, + EVT_PORT_TX_SUSPEND = 9, +}; + +static inline void rnpgbe_irq_enable_queues(struct mucse *mucse, + struct mucse_q_vector *q_vector) +{ + struct mucse_ring *ring; + + mucse_for_each_ring(ring, q_vector->rx) { + m_wr_reg(ring->dma_int_mask, ~(RX_INT_MASK | TX_INT_MASK)); + ring_wr32(ring, DMA_INT_TRIG, INT_VALID | TX_INT_MASK | + RX_INT_MASK); + } +} + +static inline void rnpgbe_irq_enable(struct mucse *mucse) +{ + int i; + + for (i = 0; i < mucse->num_q_vectors; i++) + rnpgbe_irq_enable_queues(mucse, mucse->q_vector[i]); +} + +static inline void rnpgbe_irq_disable_queues(struct mucse_q_vector *q_vector) +{ + struct mucse_ring *ring; + + mucse_for_each_ring(ring, q_vector->tx) { + ring_wr32(ring, DMA_INT_TRIG, + (0x3 << 16) | (~(TX_INT_MASK | RX_INT_MASK))); + m_wr_reg(ring->dma_int_mask, (RX_INT_MASK | TX_INT_MASK)); + } +} + +/** + * rnpgbe_irq_disable - Mask off interrupt generation on the NIC + * @mucse: pointer to private structure + **/ +static inline void rnpgbe_irq_disable(struct mucse *mucse) +{ + int i, j; + + for (i = 0; i < mucse->num_q_vectors; i++) { + rnpgbe_irq_disable_queues(mucse->q_vector[i]); + j = i + mucse->q_vector_off; + + if (mucse->flags & M_FLAG_MSIX_ENABLED) + synchronize_irq(mucse->msix_entries[j].vector); + else + synchronize_irq(mucse->pdev->irq); + } +} int rnpgbe_init_interrupt_scheme(struct mucse *mucse); void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); @@ -56,5 +123,8 @@ void rnpgbe_disable_rx_queue(struct mucse_ring *ring); void rnpgbe_configure_rx(struct mucse *mucse); int rnpgbe_request_irq(struct mucse *mucse); void rnpgbe_free_irq(struct mucse *mucse); +void rnpgbe_napi_enable_all(struct mucse *mucse); +void rnpgbe_napi_disable_all(struct mucse *mucse); +void rnpgbe_configure_msix(struct mucse *mucse); #endif /* _RNPGBE_LIB_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index dc0990daf8b8..27beb0e6e705 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -100,6 +100,38 @@ static void rnpgbe_configure(struct mucse *mucse) rnpgbe_configure_rx(mucse); } +/** + * rnpgbe_up_complete - Final step for port up + * @mucse: pointer to private structure + **/ +static void rnpgbe_up_complete(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + int i; + + rnpgbe_configure_msix(mucse); + /* we need this */ + smp_mb__before_atomic(); + clear_bit(__MUCSE_DOWN, &mucse->state); + rnpgbe_napi_enable_all(mucse); + /* clear any pending interrupts*/ + rnpgbe_irq_enable(mucse); + /* enable transmits */ + netif_tx_start_all_queues(mucse->netdev); + /* enable rx transmit */ + for (i = 0; i < mucse->num_rx_queues; i++) + ring_wr32(mucse->rx_ring[i], DMA_RX_START, 1); + + /* bring the link up in the watchdog */ + mucse->flags |= M_FLAG_NEED_LINK_UPDATE; + mucse->link_check_timeout = jiffies; + mod_timer(&mucse->service_timer, jiffies); + + hw->link = 0; + hw->ops.set_mbx_link_event(hw, 1); + hw->ops.set_mbx_ifup(hw, 1); +} + /** * rnpgbe_open - Called when a network interface is made active * @netdev: network interface device structure @@ -130,6 +162,7 @@ static int rnpgbe_open(struct net_device *netdev) err = netif_set_real_num_rx_queues(netdev, mucse->num_rx_queues); if (err) goto err_set_queues; + rnpgbe_up_complete(mucse); err_req_irq: rnpgbe_free_txrx(mucse); err_set_queues: @@ -137,6 +170,28 @@ static int rnpgbe_open(struct net_device *netdev) return err; } +/** + * rnpgbe_down - Down a network interface + * @mucse: pointer to private structure + **/ +static void rnpgbe_down(struct mucse *mucse) +{ + struct mucse_hw *hw = &mucse->hw; + struct net_device *netdev = mucse->netdev; + + set_bit(__MUCSE_DOWN, &mucse->state); + hw->ops.set_mbx_link_event(hw, 0); + hw->ops.set_mbx_ifup(hw, 0); + if (netif_carrier_ok(netdev)) + e_info(drv, "NIC Link is Down\n"); + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + rnpgbe_irq_disable(mucse); + netif_tx_disable(netdev); + rnpgbe_napi_disable_all(mucse); + mucse->flags &= ~M_FLAG_NEED_LINK_UPDATE; +} + /** * rnpgbe_close - Disables a network interface * @netdev: network interface device structure @@ -150,6 +205,7 @@ static int rnpgbe_close(struct net_device *netdev) { struct mucse *mucse = netdev_priv(netdev); + rnpgbe_down(mucse); rnpgbe_free_irq(mucse); rnpgbe_free_txrx(mucse); diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c index 37ef75121898..291cdfbd16f3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -4,6 +4,7 @@ #include #include "rnpgbe.h" +#include "rnpgbe_lib.h" #include "rnpgbe_mbx_fw.h" /** @@ -221,6 +222,45 @@ static int mucse_mbx_fw_post_req(struct mucse_hw *hw, return err; } +/** + * mucse_mbx_write_posted_locked - Posts a mbx req to firmware and + * polling until hw has read out. + * @hw: Pointer to the HW structure + * @req: Pointer to the cmd req structure + * + * mucse_mbx_write_posted_locked posts a mbx req to firmware and + * polling until hw has read out. + * + * @return: 0 on success, negative on failure + **/ +static int mucse_mbx_write_posted_locked(struct mucse_hw *hw, + struct mbx_fw_cmd_req *req) +{ + int len = le32_to_cpu(req->datalen) + MBX_REQ_HDR_LEN; + int retry = 3; + int err = 0; + + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; +try_again: + retry--; + if (retry < 0) { + mutex_unlock(&hw->mbx.lock); + return -EIO; + } + + err = hw->mbx.ops.write_posted(hw, (u32 *)req, + L_WD(len), + MBX_FW); + if (err) + goto try_again; + + mutex_unlock(&hw->mbx.lock); + + return err; +} + /** * rnpgbe_mbx_lldp_get - Get lldp status from hw * @hw: Pointer to the HW structure @@ -456,3 +496,71 @@ int mucse_fw_get_macaddr(struct mucse_hw *hw, int pfvfnum, out: return err; } + +/** + * mucse_mbx_link_event_enable - Echo link event status to hw + * @hw: Pointer to the HW structure + * @enable: true for event on, false for event off + * + * mucse_mbx_link_event_enable echo driver link event status to hw. + * The status is used to echo link status change to driver. + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_link_event_enable(struct mucse_hw *hw, int enable) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + if (enable) + hw_wr32(hw, DMA_DUMY, 0xa0000000); + + build_link_set_event_mask(&req, BIT(EVT_LINK_UP), + (enable & 1) << EVT_LINK_UP, &req); + + err = mucse_mbx_write_posted_locked(hw, &req); + if (!enable) + hw_wr32(hw, DMA_DUMY, 0); + + return err; +} + +/** + * mucse_mbx_ifup_down - Echo phy up/down status to hw + * @hw: Pointer to the HW structure + * @up: true for phy up, false for phy down + * + * mucse_mbx_ifup_down echo driver phy status to hw. + * + * @return: 0 on success, negative on failure + **/ +int mucse_mbx_ifup_down(struct mucse_hw *hw, int up) +{ + struct mbx_fw_cmd_reply reply; + struct mbx_fw_cmd_req req; + int err; + int len; + + memset(&req, 0, sizeof(req)); + memset(&reply, 0, sizeof(reply)); + + build_ifup_down(&req, hw->nr_lane, up); + err = mutex_lock_interruptible(&hw->mbx.lock); + if (err) + return err; + len = le32_to_cpu(req.datalen) + MBX_REQ_HDR_LEN; + err = hw->mbx.ops.write_posted(hw, + (u32 *)&req, + L_WD(len), + MBX_FW); + + mutex_unlock(&hw->mbx.lock); + if (up) + hw_wr32(hw, DMA_DUMY, 0xa0000000); + + return err; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h index 65a4f74c7090..08f5e1950ae3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -647,6 +647,34 @@ static inline void build_get_macaddress_req(struct mbx_fw_cmd_req *req, req->get_mac_addr.pfvf_num = cpu_to_le32(pfvfnum); } +static inline void build_link_set_event_mask(struct mbx_fw_cmd_req *req, + unsigned short event_mask, + unsigned short enable, + void *cookie) +{ + req->flags = 0; + req->opcode = cpu_to_le32(SET_EVENT_MASK); + req->datalen = cpu_to_le32(sizeof(req->stat_event_mask)); + req->cookie = cookie; + req->reply_lo = 0; + req->reply_hi = 0; + req->stat_event_mask.event_mask = cpu_to_le16(event_mask); + req->stat_event_mask.enable_stat = cpu_to_le16(enable); +} + +static inline void build_ifup_down(struct mbx_fw_cmd_req *req, + unsigned int nr_lane, int up) +{ + req->flags = 0; + req->opcode = cpu_to_le32(IFUP_DOWN); + req->datalen = cpu_to_le32(sizeof(req->ifup)); + req->cookie = NULL; + req->reply_lo = 0; + req->reply_hi = 0; + req->ifup.lane = cpu_to_le32(nr_lane); + req->ifup.up = cpu_to_le32(up); +} + int mucse_mbx_get_capability(struct mucse_hw *hw); int rnpgbe_mbx_lldp_get(struct mucse_hw *hw); int mucse_mbx_ifinsmod(struct mucse_hw *hw, int status); @@ -655,5 +683,6 @@ int mucse_mbx_ifforce_control_mac(struct mucse_hw *hw, int status); int mucse_mbx_fw_reset_phy(struct mucse_hw *hw); int mucse_fw_get_macaddr(struct mucse_hw *hw, int pfvfnum, u8 *mac_addr, int nr_lane); - +int mucse_mbx_link_event_enable(struct mucse_hw *hw, int enable); +int mucse_mbx_ifup_down(struct mucse_hw *hw, int up); #endif /* _RNPGBE_MBX_FW_H */ -- 2.25.1 Initialize link status handler Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 53 +++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 26 +++ .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 7 + .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 139 +++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h | 1 + .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c | 187 ++++++++++++++++++ .../net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h | 7 + 7 files changed, 420 insertions(+) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 624e0eec562a..b241740d9cc5 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -26,6 +26,15 @@ enum rnpgbe_hw_type { rnpgbe_hw_unknow }; +enum speed_enum { + speed_10, + speed_100, + speed_1000, + speed_10000, + speed_25000, + speed_40000, +}; + struct mucse_dma_info { u8 __iomem *dma_base_addr; u8 __iomem *dma_ring_addr; @@ -121,6 +130,31 @@ struct mucse_mbx_operations { bool enable); }; +/* Flow Control Settings */ +enum mucse_fc_mode { + mucse_fc_none = 0, + mucse_fc_rx_pause, + mucse_fc_tx_pause, + mucse_fc_full, + mucse_fc_default +}; + +#define PAUSE_TX (0x1) +#define PAUSE_RX (0x2) +#define PAUSE_AUTO (0x10) +#define ASYM_PAUSE BIT(11) +#define SYM_PAUSE BIT(10) + +#define M_MAX_TRAFFIC_CLASS (4) +/* Flow control parameters */ +struct mucse_fc_info { + u32 high_water[M_MAX_TRAFFIC_CLASS]; + u32 low_water[M_MAX_TRAFFIC_CLASS]; + u16 pause_time; + enum mucse_fc_mode current_mode; + enum mucse_fc_mode requested_mode; +}; + struct mucse_mbx_stats { u32 msgs_tx; u32 msgs_rx; @@ -186,6 +220,8 @@ struct mucse_hw_operations { void (*set_irq_mode)(struct mucse_hw *hw, bool legacy); void (*set_mbx_link_event)(struct mucse_hw *hw, int enable); void (*set_mbx_ifup)(struct mucse_hw *hw, int enable); + void (*check_link)(struct mucse_hw *hw, u32 *speed, bool *link_up, + bool *duplex); }; enum { @@ -224,6 +260,7 @@ struct mucse_hw { struct mucse_dma_info dma; struct mucse_eth_info eth; struct mucse_mac_info mac; + struct mucse_fc_info fc; struct mucse_mbx_info mbx; #define M_NET_FEATURE_SG BIT(0) #define M_NET_FEATURE_TX_CHECKSUM BIT(1) @@ -254,6 +291,9 @@ struct mucse_hw { u16 max_msix_vectors; int nr_lane; struct lldp_status lldp_status; + int speed; + u32 duplex; + u32 tp_mdx; int link; u8 addr[ETH_ALEN]; u8 perm_addr[ETH_ALEN]; @@ -261,6 +301,7 @@ struct mucse_hw { enum mucse_state_t { __MUCSE_TESTING, + __MUCSE_RESETTING, __MUCSE_DOWN, __MUCSE_SERVICE_SCHED, __MUCSE_PTP_TX_IN_PROGRESS, @@ -544,6 +585,7 @@ struct mucse { u32 priv_flags; #define M_PRIV_FLAG_TX_COALESCE BIT(25) #define M_PRIV_FLAG_RX_COALESCE BIT(26) +#define M_PRIV_FLAG_LLDP BIT(27) struct mucse_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; int tx_ring_item_count; int num_tx_queues; @@ -562,6 +604,9 @@ struct mucse { u16 rx_frames; u16 tx_frames; u16 tx_usecs; + bool link_up; + u32 link_speed; + bool duplex; unsigned long state; unsigned long link_check_timeout; struct timer_list service_timer; @@ -610,9 +655,17 @@ static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) #define M_PKT_TIMEOUT (30) #define M_RX_PKT_POLL_BUDGET (64) +#define M_LINK_SPEED_UNKNOWN 0 +#define M_LINK_SPEED_10_FULL BIT(2) +#define M_LINK_SPEED_100_FULL BIT(3) +#define M_LINK_SPEED_1GB_FULL BIT(4) + +#define M_TRY_LINK_TIMEOUT (4 * HZ) + #define m_rd_reg(reg) readl(reg) #define m_wr_reg(reg, val) writel((val), reg) #define hw_wr32(hw, reg, val) m_wr_reg((hw)->hw_addr + (reg), (val)) +#define hw_rd32(hw, reg) m_rd_reg((hw)->hw_addr + (reg)) #define dma_wr32(dma, reg, val) m_wr_reg((dma)->dma_base_addr + (reg), (val)) #define dma_rd32(dma, reg) m_rd_reg((dma)->dma_base_addr + (reg)) #define eth_wr32(eth, reg, val) m_wr_reg((eth)->eth_base_addr + (reg), (val)) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index b85d4d0e3dbc..16eebe59915e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -350,6 +350,31 @@ static void rnpgbe_set_mbx_ifup_hw_ops_n500(struct mucse_hw *hw, mucse_mbx_ifup_down(hw, enable); } +/** + * rnpgbe_check_link_hw_ops_n500 - Check link status from hw + * @hw: hw information structure + * @speed: store speed + * @link_up: store link status + * @duplex: store duplex status + **/ +static void rnpgbe_check_link_hw_ops_n500(struct mucse_hw *hw, + u32 *speed, + bool *link_up, + bool *duplex) +{ + if (hw->speed == 10) + *speed = M_LINK_SPEED_10_FULL; + else if (hw->speed == 100) + *speed = M_LINK_SPEED_100_FULL; + else if (hw->speed == 1000) + *speed = M_LINK_SPEED_1GB_FULL; + else + *speed = M_LINK_SPEED_UNKNOWN; + + *link_up = !!hw->link; + *duplex = !!hw->duplex; +} + static struct mucse_hw_operations hw_ops_n500 = { .init_hw = &rnpgbe_init_hw_ops_n500, .reset_hw = &rnpgbe_reset_hw_ops_n500, @@ -361,6 +386,7 @@ static struct mucse_hw_operations hw_ops_n500 = { .set_irq_mode = &rnpgbe_set_irq_mode_n500, .set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500, .set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500, + .check_link = &rnpgbe_check_link_hw_ops_n500, }; /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 0686bfbf55bf..b646aba48348 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -6,6 +6,7 @@ #include "rnpgbe.h" #include "rnpgbe_lib.h" +#include "rnpgbe_mbx_fw.h" /** * rnpgbe_set_rss_queues - Allocate queues for RSS @@ -1050,6 +1051,12 @@ static int rnpgbe_request_msix_irqs(struct mucse *mucse) **/ static irqreturn_t rnpgbe_intr(int irq, void *data) { + struct mucse *mucse = (struct mucse *)data; + + set_bit(__MUCSE_IN_IRQ, &mucse->state); + /* handle fw req and ack */ + rnpgbe_fw_msg_handler(mucse); + clear_bit(__MUCSE_IN_IRQ, &mucse->state); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 27beb0e6e705..90b4858597c1 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -76,12 +76,147 @@ static void rnpgbe_service_timer(struct timer_list *t) rnpgbe_service_event_schedule(mucse); } +/** + * rnpgbe_service_event_complete - Call when service_task end + * @mucse: pointer to the device private structure + **/ +static void rnpgbe_service_event_complete(struct mucse *mucse) +{ + /* flush memory to make sure state is correct before next watchdog */ + smp_mb__before_atomic(); + clear_bit(__MUCSE_SERVICE_SCHED, &mucse->state); +} + +/** + * rnpgbe_watchdog_update_link - update the link status + * @mucse: pointer to the device private structure + **/ +static void rnpgbe_watchdog_update_link(struct mucse *mucse) +{ + bool flow_rx = true, flow_tx = true; + u32 link_speed = mucse->link_speed; + struct mucse_hw *hw = &mucse->hw; + bool link_up; + bool duplex; + + if (!(mucse->flags & M_FLAG_NEED_LINK_UPDATE)) + return; + + if (hw->ops.check_link) { + hw->ops.check_link(hw, &link_speed, &link_up, &duplex); + } else { + /* always assume link is up, if no check link function */ + link_speed = M_LINK_SPEED_1GB_FULL; + link_up = true; + duplex = true; + } + + if (link_up || time_after(jiffies, (mucse->link_check_timeout + + M_TRY_LINK_TIMEOUT))) { + mucse->flags &= ~M_FLAG_NEED_LINK_UPDATE; + } + mucse->link_up = link_up; + mucse->link_speed = link_speed; + mucse->duplex = duplex; + + switch (hw->fc.current_mode) { + case mucse_fc_none: + flow_rx = false; + flow_tx = false; + break; + case mucse_fc_tx_pause: + flow_rx = false; + flow_tx = true; + break; + case mucse_fc_rx_pause: + flow_rx = true; + flow_tx = false; + break; + + case mucse_fc_full: + flow_rx = true; + flow_tx = true; + break; + default: + flow_rx = false; + flow_tx = false; + } + + if (mucse->link_up) { + e_info(drv, "NIC Link is Up %s, %s Duplex, Flow Control: %s\n", + (link_speed == M_LINK_SPEED_1GB_FULL ? "1000 Mbps" : + (link_speed == M_LINK_SPEED_100_FULL ? "100 Mbps" : + (link_speed == M_LINK_SPEED_10_FULL ? "10 Mbps" : + "unknown speed"))), + ((duplex) ? "Full" : "Half"), + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : (flow_tx ? "TX" : "None")))); + } +} + +/** + * rnpgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message + * @mucse: pointer to the device private structure + **/ +static void rnpgbe_watchdog_link_is_up(struct mucse *mucse) +{ + struct net_device *netdev = mucse->netdev; + + /* only continue if link was previously down */ + if (netif_carrier_ok(netdev)) + return; + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); +} + +/** + * rnpgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message + * @mucse: pointer to the private structure + **/ +static void rnpgbe_watchdog_link_is_down(struct mucse *mucse) +{ + struct net_device *netdev = mucse->netdev; + + mucse->link_up = false; + mucse->link_speed = 0; + /* only continue if link was up previously */ + if (!netif_carrier_ok(netdev)) + return; + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); +} + +/** + * rnpgbe_watchdog_subtask - check and bring link up + * @mucse: pointer to the device private structure + **/ +static void rnpgbe_watchdog_subtask(struct mucse *mucse) +{ + /* if interface is down do nothing */ + if (test_bit(__MUCSE_DOWN, &mucse->state) || + test_bit(__MUCSE_RESETTING, &mucse->state)) + return; + + rnpgbe_watchdog_update_link(mucse); + if (mucse->link_up) + rnpgbe_watchdog_link_is_up(mucse); + else + rnpgbe_watchdog_link_is_down(mucse); +} + /** * rnpgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ static void rnpgbe_service_task(struct work_struct *work) { + struct mucse *mucse = container_of(work, struct mucse, service_task); + + rnpgbe_watchdog_subtask(mucse); + rnpgbe_service_event_complete(mucse); } /** @@ -163,6 +298,8 @@ static int rnpgbe_open(struct net_device *netdev) if (err) goto err_set_queues; rnpgbe_up_complete(mucse); + + return 0; err_req_irq: rnpgbe_free_txrx(mucse); err_set_queues: @@ -311,6 +448,8 @@ static irqreturn_t rnpgbe_msix_other(int irq, void *data) struct mucse *mucse = (struct mucse *)data; set_bit(__MUCSE_IN_IRQ, &mucse->state); + /* handle fw req and ack */ + rnpgbe_fw_msg_handler(mucse); clear_bit(__MUCSE_IN_IRQ, &mucse->state); return IRQ_HANDLED; diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h index 0b4183e53e61..0f554f3eff82 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h @@ -8,6 +8,7 @@ /* 14 words */ #define MUCSE_VFMAILBOX_SIZE 14 +#define MUCSE_FW_MAILBOX_SIZE MUCSE_VFMAILBOX_SIZE /* ================ PF <--> VF mailbox ================ */ #define SHARE_MEM_BYTES 64 static inline u32 PF_VF_SHM(struct mucse_mbx_info *mbx, int vf) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c index 291cdfbd16f3..412d9d5da191 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c @@ -564,3 +564,190 @@ int mucse_mbx_ifup_down(struct mucse_hw *hw, int up) return err; } + +/** + * rnpgbe_link_stat_mark - Mark driver link status in reg + * @hw: Pointer to the HW structure + * @up: true for link up, false for link down + * + * rnpgbe_link_stat_mark echo driver link status to hw by DMA_DUMY. + * Fw will echo true link status if mismatch it. + **/ +static void rnpgbe_link_stat_mark(struct mucse_hw *hw, int up) +{ + struct mucse *mucse = (struct mucse *)hw->back; + u32 v; + + v = hw_rd32(hw, DMA_DUMY); + v &= ~(0x0f000f11); + v |= 0xa0000000; + if (up) { + v |= BIT(0); + switch (hw->speed) { + case 10: + v |= (speed_10 << 8); + break; + case 100: + v |= (speed_100 << 8); + break; + case 1000: + v |= (speed_1000 << 8); + break; + case 10000: + v |= (speed_10000 << 8); + break; + case 25000: + v |= (speed_25000 << 8); + break; + case 40000: + v |= (speed_40000 << 8); + break; + } + v |= (hw->duplex << 4); + v |= (hw->fc.current_mode << 24); + } else { + v &= ~BIT(0); + } + /* we should update lldp_status */ + if (hw->fw_version >= 0x00010500) { + if (mucse->priv_flags & M_PRIV_FLAG_LLDP) + v |= BIT(6); + else + v &= (~BIT(6)); + } + hw_wr32(hw, DMA_DUMY, v); +} + +/** + * rnpgbe_mbx_fw_reply_handler - handle fw reply + * @mucse: pointer to the device private structure + * @reply: pointer to reply data + * + * rnpgbe_mbx_fw_reply_handler handler fw reply, it copy reply data + * to cookie->priv if no err. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_mbx_fw_reply_handler(struct mucse *mucse, + struct mbx_fw_cmd_reply *reply) +{ + struct mbx_req_cookie *cookie; + + cookie = reply->cookie; + if (!cookie || cookie->magic != COOKIE_MAGIC) + return -EIO; + + if (cookie->priv_len > 0) + memcpy(cookie->priv, reply->data, cookie->priv_len); + + cookie->done = 1; + + if (reply->flags & FLAGS_ERR) + cookie->errcode = -EIO; + else + cookie->errcode = 0; + wake_up_interruptible(&cookie->wait); + return 0; +} + +/** + * rnpgbe_mbx_fw_req_handler - handle fw req + * @mucse: pointer to the device private structure + * @req: pointer to req data + * + * rnpgbe_mbx_fw_req_handler handler fw req, such as a link event req. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_mbx_fw_req_handler(struct mucse *mucse, + struct mbx_fw_cmd_req *req) +{ + u32 magic = le32_to_cpu(req->link_stat.port_st_magic); + struct mucse_hw *hw = &mucse->hw; + + switch (le16_to_cpu(req->opcode)) { + case LINK_STATUS_EVENT: + if (le16_to_cpu(req->link_stat.lane_status)) + hw->link = 1; + else + hw->link = 0; + port_stat_update_host_endian(&req->link_stat.st[0]); + if (hw->hw_type == rnpgbe_hw_n500 || + hw->hw_type == rnpgbe_hw_n210 || + hw->hw_type == rnpgbe_hw_n210L) { + if (req->link_stat.st[0].v_host.lldp_status) + mucse->priv_flags |= M_PRIV_FLAG_LLDP; + else + mucse->priv_flags &= (~M_PRIV_FLAG_LLDP); + } + if (magic == SPEED_VALID_MAGIC) { + hw->speed = le16_to_cpu(req->link_stat.st[0].speed); + hw->duplex = req->link_stat.st[0].duplex; + if (hw->hw_type == rnpgbe_hw_n500 || + hw->hw_type == rnpgbe_hw_n210 || + hw->hw_type == rnpgbe_hw_n210L) { + hw->fc.current_mode = + req->link_stat.st[0].v_host.pause; + hw->tp_mdx = req->link_stat.st[0].v_host.tp_mdx; + } + } + if (req->link_stat.lane_status) + rnpgbe_link_stat_mark(hw, 1); + else + rnpgbe_link_stat_mark(hw, 0); + + mucse->flags |= M_FLAG_NEED_LINK_UPDATE; + break; + } + return 0; +} + +/** + * rnpgbe_rcv_msg_from_fw - Read msg from fw and handle it + * @mucse: pointer to the device private structure + * + * rnpgbe_rcv_msg_from_fw tries to read mbx from hw and check + * the mbx is req or reply. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_rcv_msg_from_fw(struct mucse *mucse) +{ + u32 msgbuf[MUCSE_FW_MAILBOX_SIZE]; + struct mucse_hw *hw = &mucse->hw; + int retval; + + retval = mucse_read_mbx(hw, msgbuf, MUCSE_FW_MAILBOX_SIZE, MBX_FW); + if (retval) + return retval; + /* this is a message we already processed, do nothing */ + if (((unsigned short *)msgbuf)[0] & FLAGS_DD) { + return rnpgbe_mbx_fw_reply_handler(mucse, + (struct mbx_fw_cmd_reply *)msgbuf); + } else { + return rnpgbe_mbx_fw_req_handler(mucse, + (struct mbx_fw_cmd_req *)msgbuf); + } +} + +static void rnpgbe_rcv_ack_from_fw(struct mucse *mucse) +{ + /* do-nothing */ +} + +/** + * rnpgbe_fw_msg_handler - Irq handler for mbx irq + * @mucse: pointer to the device private structure + * @return: 0 always + **/ +int rnpgbe_fw_msg_handler(struct mucse *mucse) +{ + /* check fw-req */ + if (!mucse_check_for_msg(&mucse->hw, MBX_FW)) + rnpgbe_rcv_msg_from_fw(mucse); + /* process any acks */ + if (!mucse_check_for_ack(&mucse->hw, MBX_FW)) + rnpgbe_rcv_ack_from_fw(mucse); + + return 0; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h index 08f5e1950ae3..88c140832c5e 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h @@ -232,6 +232,12 @@ struct port_stat { }; } __packed; +static inline void port_stat_update_host_endian(struct port_stat *stat) +{ + u16 host_val = le16_to_cpu(stat->stat); + + stat->v_host = *(typeof(stat->v_host) *)&host_val; +} #define FLAGS_DD BIT(0) /* driver clear 0, FW must set 1 */ /* driver clear 0, FW must set only if it reporting an error */ #define FLAGS_ERR BIT(2) @@ -685,4 +691,5 @@ int mucse_fw_get_macaddr(struct mucse_hw *hw, int pfvfnum, u8 *mac_addr, int nr_lane); int mucse_mbx_link_event_enable(struct mucse_hw *hw, int enable); int mucse_mbx_ifup_down(struct mucse_hw *hw, int up); +int rnpgbe_fw_msg_handler(struct mucse *mucse); #endif /* _RNPGBE_MBX_FW_H */ -- 2.25.1 Initialize tx-map and tx clean functions Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 19 + .../net/ethernet/mucse/rnpgbe/rnpgbe_chip.c | 65 ++- drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h | 6 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 431 +++++++++++++++++- .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 14 +- .../net/ethernet/mucse/rnpgbe/rnpgbe_main.c | 79 +++- 6 files changed, 607 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index b241740d9cc5..26774c214da0 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -51,6 +51,7 @@ struct mucse_eth_operations { int (*set_rar)(struct mucse_eth_info *eth, u32 index, u8 *addr); int (*clear_rar)(struct mucse_eth_info *eth, u32 index); void (*clr_mc_addr)(struct mucse_eth_info *eth); + void (*set_rx)(struct mucse_eth_info *eth, bool status); }; #define RNPGBE_MAX_MTA 128 @@ -80,6 +81,7 @@ struct mucse_mac_info; struct mucse_mac_operations { void (*set_mac)(struct mucse_mac_info *mac, u8 *addr, int index); + void (*set_mac_rx)(struct mucse_mac_info *mac, bool status); }; struct mucse_mac_info { @@ -222,6 +224,7 @@ struct mucse_hw_operations { void (*set_mbx_ifup)(struct mucse_hw *hw, int enable); void (*check_link)(struct mucse_hw *hw, u32 *speed, bool *link_up, bool *duplex); + void (*set_mac_rx)(struct mucse_hw *hw, bool status); }; enum { @@ -538,6 +541,8 @@ struct mucse_ring { struct mucse_ring_container { struct mucse_ring *ring; + unsigned int total_bytes; + unsigned int total_packets; u16 work_limit; u16 count; }; @@ -620,11 +625,25 @@ struct rnpgbe_info { void (*get_invariants)(struct mucse_hw *hw); }; +static inline u16 mucse_desc_unused(struct mucse_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + static inline struct netdev_queue *txring_txq(const struct mucse_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); } +static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) +{ + return cpu_to_le64(((u64)vlan_cmd << 32) | ((u64)mac_ip_len << 16) | + ((u64)size)); +} + #define M_RXBUFFER_1536 (1536) static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) { diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c index 16eebe59915e..d7894891e098 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c @@ -85,10 +85,28 @@ static void rnpgbe_eth_clr_mc_addr_n500(struct mucse_eth_info *eth) eth_wr32(eth, RNPGBE_ETH_MUTICAST_HASH_TABLE(i), 0); } +/** + * rnpgbe_eth_set_rx_n500 - set eth rx status + * @eth: pointer to eth structure + * @status: true is enable rx, false is disable + **/ +static void rnpgbe_eth_set_rx_n500(struct mucse_eth_info *eth, + bool status) +{ + if (status) { + eth_wr32(eth, RNPGBE_ETH_EXCEPT_DROP_PROC, 0); + eth_wr32(eth, RNPGBE_ETH_TX_MUX_DROP, 0); + } else { + eth_wr32(eth, RNPGBE_ETH_EXCEPT_DROP_PROC, 1); + eth_wr32(eth, RNPGBE_ETH_TX_MUX_DROP, 1); + } +} + static struct mucse_eth_operations eth_ops_n500 = { .set_rar = &rnpgbe_eth_set_rar_n500, .clear_rar = &rnpgbe_eth_clear_rar_n500, - .clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500 + .clr_mc_addr = &rnpgbe_eth_clr_mc_addr_n500, + .set_rx = &rnpgbe_eth_set_rx_n500, }; /** @@ -111,8 +129,31 @@ static void rnpgbe_mac_set_mac_n500(struct mucse_mac_info *mac, mac_wr32(mac, RNPGBE_MAC_UNICAST_LOW(index), rar_low); } +/** + * rnpgbe_mac_set_rx_n500 - Setup mac rx status + * @mac: pointer to mac structure + * @status: true for rx on / false for rx off + * + * Setup mac rx status. + **/ +static void rnpgbe_mac_set_rx_n500(struct mucse_mac_info *mac, + bool status) +{ + u32 value = mac_rd32(mac, R_MAC_CONTROL); + + if (status) + value |= MAC_CONTROL_TE | MAC_CONTROL_RE; + else + value &= ~(MAC_CONTROL_RE); + + mac_wr32(mac, R_MAC_CONTROL, value); + value = mac_rd32(mac, R_MAC_FRAME_FILTER); + mac_wr32(mac, R_MAC_FRAME_FILTER, value | 1); +} + static struct mucse_mac_operations mac_ops_n500 = { .set_mac = &rnpgbe_mac_set_mac_n500, + .set_mac_rx = &rnpgbe_mac_set_rx_n500, }; /** @@ -375,6 +416,27 @@ static void rnpgbe_check_link_hw_ops_n500(struct mucse_hw *hw, *duplex = !!hw->duplex; } +/** + * rnpgbe_set_mac_rx_hw_ops_n500 - Setup hw rx status + * @hw: hw information structure + * @status: true for rx on / false for rx off + * + * rnpgbe_set_mac_rx_hw_ops_n500 setup eth, mac rx status. + **/ +static void rnpgbe_set_mac_rx_hw_ops_n500(struct mucse_hw *hw, bool status) +{ + struct mucse_eth_info *eth = &hw->eth; + struct mucse_mac_info *mac = &hw->mac; + + if (status) { + mac->ops.set_mac_rx(mac, status); + eth->ops.set_rx(eth, status); + } else { + eth->ops.set_rx(eth, status); + mac->ops.set_mac_rx(mac, status); + } +} + static struct mucse_hw_operations hw_ops_n500 = { .init_hw = &rnpgbe_init_hw_ops_n500, .reset_hw = &rnpgbe_reset_hw_ops_n500, @@ -387,6 +449,7 @@ static struct mucse_hw_operations hw_ops_n500 = { .set_mbx_link_event = &rnpgbe_set_mbx_link_event_hw_ops_n500, .set_mbx_ifup = &rnpgbe_set_mbx_ifup_hw_ops_n500, .check_link = &rnpgbe_check_link_hw_ops_n500, + .set_mac_rx = &rnpgbe_set_mac_rx_hw_ops_n500, }; /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h index 98031600801b..71a408c941e3 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h @@ -26,6 +26,8 @@ #define RING_VECTOR(n) (0x04 * (n)) /* eth regs */ +#define RNPGBE_ETH_TX_MUX_DROP (0x98) +#define RNPGBE_ETH_EXCEPT_DROP_PROC (0x0470) #define RNPGBE_ETH_BYPASS (0x8000) #define RNPGBE_HOST_FILTER_EN (0x800c) #define RNPGBE_REDIR_EN (0x8030) @@ -43,6 +45,10 @@ #define RNPGBE_LEGANCY_ENABLE (0xd004) #define RNPGBE_LEGANCY_TIME (0xd000) /* mac regs */ +#define R_MAC_CONTROL (0) +#define MAC_CONTROL_TE (0x8) +#define MAC_CONTROL_RE (0x4) +#define R_MAC_FRAME_FILTER (0x4) #define M_RAH_AV 0x80000000 #define RNPGBE_MAC_UNICAST_LOW(i) (0x44 + (i) * 0x08) #define RNPGBE_MAC_UNICAST_HIGH(i) (0x40 + (i) * 0x08) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index b646aba48348..1e1919750a9b 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -152,6 +152,139 @@ static void mucse_add_ring(struct mucse_ring *ring, head->count++; } +/** + * rnpgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean + * @napi_budget: how many packets driver is allowed to clean + * @return: true is no tx packets. + **/ +static bool rnpgbe_clean_tx_irq(struct mucse_q_vector *q_vector, + struct mucse_ring *tx_ring, + int napi_budget) +{ + u64 total_bytes = 0, total_packets = 0; + struct mucse *mucse = q_vector->mucse; + int budget = q_vector->tx.work_limit; + struct mucse_tx_buffer *tx_buffer; + struct rnpgbe_tx_desc *tx_desc; + int i = tx_ring->next_to_clean; + + if (test_bit(__MUCSE_DOWN, &mucse->state)) + return true; + + tx_ring->tx_stats.poll_count++; + tx_buffer = &tx_ring->tx_buffer_info[i]; + tx_desc = M_TX_DESC(tx_ring, i); + i -= tx_ring->count; + + do { + struct rnpgbe_tx_desc *eop_desc = tx_buffer->next_to_watch; + + /* if next_to_watch is not set then there is no work pending */ + if (!eop_desc) + break; + + /* prevent any other reads prior to eop_desc */ + rmb(); + + /* if eop DD is not set pending work has not been completed */ + if (!(eop_desc->vlan_cmd & cpu_to_le32(M_TXD_STAT_DD))) + break; + /* clear next_to_watch to prevent false hangs */ + tx_buffer->next_to_watch = NULL; + + /* update the statistics for this packet */ + total_bytes += tx_buffer->bytecount; + total_packets += tx_buffer->gso_segs; + + /* free the skb */ + napi_consume_skb(tx_buffer->skb, napi_budget); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + + /* unmap remaining buffers */ + while (tx_desc != eop_desc) { + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = M_TX_DESC(tx_ring, 0); + } + + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } + budget--; + } + + /* move us one more past the eop_desc for start of next pkt */ + tx_buffer++; + tx_desc++; + i++; + if (unlikely(!i)) { + i -= tx_ring->count; + tx_buffer = tx_ring->tx_buffer_info; + tx_desc = M_TX_DESC(tx_ring, 0); + } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget > 0)); + netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, + total_bytes); + i += tx_ring->count; + tx_ring->next_to_clean = i; + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + tx_ring->tx_stats.tx_clean_count += total_packets; + tx_ring->tx_stats.tx_clean_times++; + if (tx_ring->tx_stats.tx_clean_times > 10) { + tx_ring->tx_stats.tx_clean_times = 0; + tx_ring->tx_stats.tx_clean_count = 0; + } + + u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; + tx_ring->tx_stats.send_done_bytes += total_bytes; + +#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) + if (likely(netif_carrier_ok(tx_ring->netdev) && + (mucse_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && + !test_bit(__MUCSE_DOWN, &mucse->state)) { + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } + } + + return total_bytes == 0; +} + /** * rnpgbe_poll - NAPI polling RX/TX cleanup routine * @napi: napi struct with our devices info in it @@ -163,7 +296,31 @@ static void mucse_add_ring(struct mucse_ring *ring, **/ static int rnpgbe_poll(struct napi_struct *napi, int budget) { - return 0; + struct mucse_q_vector *q_vector = + container_of(napi, struct mucse_q_vector, napi); + struct mucse *mucse = q_vector->mucse; + bool clean_complete = true; + struct mucse_ring *ring; + int work_done = 0; + + mucse_for_each_ring(ring, q_vector->tx) + clean_complete = rnpgbe_clean_tx_irq(q_vector, ring, budget); + + if (!netif_running(mucse->netdev)) + clean_complete = true; + /* force done */ + if (test_bit(__MUCSE_DOWN, &mucse->state)) + clean_complete = true; + + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + if (likely(napi_complete_done(napi, work_done))) { + if (!test_bit(__MUCSE_DOWN, &mucse->state)) + rnpgbe_irq_enable_queues(mucse, q_vector); + } + + return min(work_done, budget - 1); } /** @@ -896,8 +1053,15 @@ static void rnpgbe_configure_tx_ring(struct mucse *mucse, **/ void rnpgbe_configure_tx(struct mucse *mucse) { - u32 i; + struct mucse_hw *hw = &mucse->hw; + struct mucse_dma_info *dma; + u32 i, dma_axi_ctl; + dma = &hw->dma; + /* dma_axi_en.tx_en must be before Tx queues are enabled */ + dma_axi_ctl = dma_rd32(dma, DMA_AXI_EN); + dma_axi_ctl |= TX_AXI_RW_EN; + dma_wr32(dma, DMA_AXI_EN, dma_axi_ctl); /* Setup the HW Tx Head and Tail descriptor pointers */ for (i = 0; i < (mucse->num_tx_queues); i++) rnpgbe_configure_tx_ring(mucse, mucse->tx_ring[i]); @@ -961,10 +1125,30 @@ static void rnpgbe_configure_rx_ring(struct mucse *mucse, **/ void rnpgbe_configure_rx(struct mucse *mucse) { - int i; + struct mucse_hw *hw = &mucse->hw; + struct mucse_dma_info *dma; + int i, dma_axi_ctl; + dma = &hw->dma; for (i = 0; i < mucse->num_rx_queues; i++) rnpgbe_configure_rx_ring(mucse, mucse->rx_ring[i]); + + /* dma_axi_en.tx_en must be before Tx queues are enabled */ + dma_axi_ctl = dma_rd32(dma, DMA_AXI_EN); + dma_axi_ctl |= RX_AXI_RW_EN; + dma_wr32(dma, DMA_AXI_EN, dma_axi_ctl); +} + +/** + * rnpgbe_clean_all_tx_rings - Free Tx Buffers for all queues + * @mucse: pointer to private structure + **/ +void rnpgbe_clean_all_tx_rings(struct mucse *mucse) +{ + int i; + + for (i = 0; i < mucse->num_tx_queues; i++) + rnpgbe_clean_tx_ring(mucse->tx_ring[i]); } /** @@ -976,6 +1160,13 @@ void rnpgbe_configure_rx(struct mucse *mucse) **/ static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data) { + struct mucse_q_vector *q_vector = (struct mucse_q_vector *)data; + + rnpgbe_irq_disable_queues(q_vector); + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + return IRQ_HANDLED; } @@ -1052,11 +1243,17 @@ static int rnpgbe_request_msix_irqs(struct mucse *mucse) static irqreturn_t rnpgbe_intr(int irq, void *data) { struct mucse *mucse = (struct mucse *)data; + struct mucse_q_vector *q_vector; + q_vector = mucse->q_vector[0]; + rnpgbe_irq_disable_queues(q_vector); set_bit(__MUCSE_IN_IRQ, &mucse->state); /* handle fw req and ack */ rnpgbe_fw_msg_handler(mucse); clear_bit(__MUCSE_IN_IRQ, &mucse->state); + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule_irqoff(&q_vector->napi); + return IRQ_HANDLED; } @@ -1251,3 +1448,231 @@ void rnpgbe_configure_msix(struct mucse *mucse) } } } + +/** + * rnpgbe_unmap_and_free_tx_resource - Free tx resource + * @ring: ring to be freed + * @tx_buffer: pointer to tx_buffer + **/ +static void rnpgbe_unmap_and_free_tx_resource(struct mucse_ring *ring, + struct mucse_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) + dma_unmap_single(ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(ring->dev, dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + } + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +} + +/** + * rnpgbe_tx_map - map skb to desc, and update tx tail + * @tx_ring: ring to send + * @first: pointer to first tx_buffer for this skb + * @mac_ip_len: mac_ip_len value + * @tx_flags: tx flags for this skb + * + * rnpgbe_tx_map tries to map first->skb to multi descs, and + * then update tx tail to echo hw. + * + * @return: 0 on success, negative on failure + **/ +static int rnpgbe_tx_map(struct mucse_ring *tx_ring, + struct mucse_tx_buffer *first, u32 mac_ip_len, + u32 tx_flags) +{ + u64 fun_id = ((u64)(tx_ring->pfvfnum) << (56)); + struct mucse_tx_buffer *tx_buffer; + struct sk_buff *skb = first->skb; + struct rnpgbe_tx_desc *tx_desc; + u16 i = tx_ring->next_to_use; + unsigned int data_len, size; + skb_frag_t *frag; + dma_addr_t dma; + + tx_desc = M_TX_DESC(tx_ring, i); + size = skb_headlen(skb); + data_len = skb->data_len; + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + tx_buffer = first; + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + if (dma_mapping_error(tx_ring->dev, dma)) + goto dma_error; + + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); + + /* 1st desc */ + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + + while (unlikely(size > M_MAX_DATA_PER_TXD)) { + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, + mac_ip_len, + M_MAX_DATA_PER_TXD); + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = M_TX_DESC(tx_ring, 0); + i = 0; + } + dma += M_MAX_DATA_PER_TXD; + size -= M_MAX_DATA_PER_TXD; + tx_desc->pkt_addr = cpu_to_le64(dma | fun_id); + } + + if (likely(!data_len)) + break; + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags, mac_ip_len, size); + /* ==== frag== */ + i++; + tx_desc++; + if (i == tx_ring->count) { + tx_desc = M_TX_DESC(tx_ring, 0); + i = 0; + } + + size = skb_frag_size(frag); + data_len -= size; + dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, + DMA_TO_DEVICE); + tx_buffer = &tx_ring->tx_buffer_info[i]; + } + + /* write last descriptor with RS and EOP bits */ + tx_desc->vlan_cmd_bsz = build_ctob(tx_flags | + M_TXD_CMD_EOP | + M_TXD_CMD_RS, + mac_ip_len, size); + /* set the timestamp */ + first->time_stamp = jiffies; + tx_ring->tx_stats.send_bytes += first->bytecount; + + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered + * memory model archs, such as IA-64). + * + * We also need this memory barrier to make certain all of the + * status bits have been updated before next_to_watch is written. + */ + /* timestamp the skb as late as possible, just prior to notifying + * the MAC that it should transmit this packet + */ + wmb(); + /* set next_to_watch value indicating a packet is present */ + first->next_to_watch = tx_desc; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + skb_tx_timestamp(skb); + netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); + /* notify HW of packet */ + m_wr_reg(tx_ring->tail, i); + return 0; +dma_error: + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { + tx_buffer = &tx_ring->tx_buffer_info[i]; + rnpgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) + break; + if (i == 0) + i += tx_ring->count; + i--; + } + dev_kfree_skb_any(first->skb); + first->skb = NULL; + tx_ring->next_to_use = i; + + return -1; +} + +/** + * rnpgbe_maybe_stop_tx - Stop tx queues if not enough desc count + * @tx_ring: tx ring to check + * @size: expect desc count + * + * @return: 0 for enough + **/ +static int rnpgbe_maybe_stop_tx(struct mucse_ring *tx_ring, u16 size) +{ + if (likely(mucse_desc_unused(tx_ring) >= size)) + return 0; + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(mucse_desc_unused(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! - use start_queue because it doesn't call schedule */ + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + + return 0; +} + +/** + * rnpgbe_xmit_frame_ring - Send a skb to tx ring + * @skb: skb is to be sent + * @mucse: pointer to private structure + * @tx_ring: tx ring to check + * + * @return: NETDEV_TX_OK is ok, or NETDEV_TX_BUSY + **/ +netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb, + struct mucse *mucse, + struct mucse_ring *tx_ring) +{ + u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct mucse_tx_buffer *first; + /* keep it not zero */ + u32 mac_ip_len = 20; + u32 tx_flags = 0; + unsigned short f; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + skb_frag_t *frag_temp = &skb_shinfo(skb)->frags[f]; + + count += TXD_USE_COUNT(skb_frag_size(frag_temp)); + } + + if (rnpgbe_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + /* record the location of the first descriptor for this packet */ + first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + first->skb = skb; + first->bytecount = (skb->len > 60) ? skb->len : 60; + first->gso_segs = 1; + first->priv_tags = 0; + first->mss_len_vf_num = 0; + first->inner_vlan_tunnel_len = 0; + first->ctx_flag = false; + + if (rnpgbe_tx_map(tx_ring, first, mac_ip_len, tx_flags)) + goto skip_check; + rnpgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); + +skip_check: + return NETDEV_TX_OK; +} diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h index bdb8a393dad8..5a3334789f66 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -8,6 +8,9 @@ #define RING_OFFSET(n) (0x100 * (n)) #define DMA_DUMY (0xc) +#define DMA_AXI_EN (0x10) +#define RX_AXI_RW_EN (0x03 << 0) +#define TX_AXI_RW_EN (0x03 << 2) #define DMA_RX_START (0x10) #define DMA_RX_READY (0x14) #define DMA_TX_START (0x18) @@ -52,6 +55,12 @@ #define e_info(msglvl, format, arg...) \ netif_info(mucse, msglvl, mucse->netdev, format, ##arg) +/* now tx max 4k for one desc */ +#define M_MAX_TXD_PWR 12 +#define M_MAX_DATA_PER_TXD (0x1 << M_MAX_TXD_PWR) +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), M_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) + enum link_event_mask { EVT_LINK_UP = 1, EVT_NO_MEDIA = 2, @@ -119,6 +128,7 @@ void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); int rnpgbe_setup_txrx(struct mucse *mucse); void rnpgbe_free_txrx(struct mucse *mucse); void rnpgbe_configure_tx(struct mucse *mucse); +void rnpgbe_clean_all_tx_rings(struct mucse *mucse); void rnpgbe_disable_rx_queue(struct mucse_ring *ring); void rnpgbe_configure_rx(struct mucse *mucse); int rnpgbe_request_irq(struct mucse *mucse); @@ -126,5 +136,7 @@ void rnpgbe_free_irq(struct mucse *mucse); void rnpgbe_napi_enable_all(struct mucse *mucse); void rnpgbe_napi_disable_all(struct mucse *mucse); void rnpgbe_configure_msix(struct mucse *mucse); - +netdev_tx_t rnpgbe_xmit_frame_ring(struct sk_buff *skb, + struct mucse *mucse, + struct mucse_ring *tx_ring); #endif /* _RNPGBE_LIB_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c index 90b4858597c1..31a191b31c79 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c @@ -162,12 +162,14 @@ static void rnpgbe_watchdog_update_link(struct mucse *mucse) static void rnpgbe_watchdog_link_is_up(struct mucse *mucse) { struct net_device *netdev = mucse->netdev; + struct mucse_hw *hw = &mucse->hw; /* only continue if link was previously down */ if (netif_carrier_ok(netdev)) return; netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); + hw->ops.set_mac_rx(hw, true); } /** @@ -178,6 +180,7 @@ static void rnpgbe_watchdog_link_is_up(struct mucse *mucse) static void rnpgbe_watchdog_link_is_down(struct mucse *mucse) { struct net_device *netdev = mucse->netdev; + struct mucse_hw *hw = &mucse->hw; mucse->link_up = false; mucse->link_speed = 0; @@ -187,6 +190,7 @@ static void rnpgbe_watchdog_link_is_down(struct mucse *mucse) e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); + hw->ops.set_mac_rx(hw, false); } /** @@ -317,6 +321,7 @@ static void rnpgbe_down(struct mucse *mucse) struct net_device *netdev = mucse->netdev; set_bit(__MUCSE_DOWN, &mucse->state); + hw->ops.set_mac_rx(hw, false); hw->ops.set_mbx_link_event(hw, 0); hw->ops.set_mbx_ifup(hw, 0); if (netif_carrier_ok(netdev)) @@ -324,6 +329,7 @@ static void rnpgbe_down(struct mucse *mucse) netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); rnpgbe_irq_disable(mucse); + netif_tx_disable(netdev); rnpgbe_napi_disable_all(mucse); mucse->flags &= ~M_FLAG_NEED_LINK_UPDATE; @@ -359,14 +365,83 @@ static int rnpgbe_close(struct net_device *netdev) static netdev_tx_t rnpgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; + struct mucse *mucse = netdev_priv(netdev); + struct mucse_ring *tx_ring; + + if (!netif_carrier_ok(netdev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + if (skb->len < 33) { + if (skb_padto(skb, 33)) + return NETDEV_TX_OK; + skb->len = 33; + } + if (skb->len > 65535) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tx_ring = mucse->tx_ring[skb->queue_mapping]; + return rnpgbe_xmit_frame_ring(skb, mucse, tx_ring); +} + +/** + * rnpgbe_get_stats64 - Get stats for this netdev + * @netdev: network interface device structure + * @stats: stats data + **/ +static void rnpgbe_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct mucse *mucse = netdev_priv(netdev); + int i; + + rcu_read_lock(); + for (i = 0; i < mucse->num_rx_queues; i++) { + struct mucse_ring *ring = READ_ONCE(mucse->rx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->rx_packets += packets; + stats->rx_bytes += bytes; + } + } + + for (i = 0; i < mucse->num_tx_queues; i++) { + struct mucse_ring *ring = READ_ONCE(mucse->tx_ring[i]); + u64 bytes, packets; + unsigned int start; + + if (ring) { + do { + start = u64_stats_fetch_begin(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + stats->tx_packets += packets; + stats->tx_bytes += bytes; + } + } + rcu_read_unlock(); + /* following stats updated by rnpgbe_watchdog_task() */ + stats->multicast = netdev->stats.multicast; + stats->rx_errors = netdev->stats.rx_errors; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; } const struct net_device_ops rnpgbe_netdev_ops = { .ndo_open = rnpgbe_open, .ndo_stop = rnpgbe_close, .ndo_start_xmit = rnpgbe_xmit_frame, + .ndo_get_stats64 = rnpgbe_get_stats64, }; /** -- 2.25.1 Initialize rx clean function. Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 23 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 547 +++++++++++++++++- .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.h | 33 ++ 3 files changed, 602 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 26774c214da0..9d8d939d81a4 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -362,6 +362,7 @@ struct mucse_rx_queue_stats { u64 rx_equal_count; u64 rx_clean_times; u64 rx_clean_count; + u64 rx_resync; }; union rnpgbe_rx_desc { @@ -492,6 +493,7 @@ struct mucse_ring { struct mucse_q_vector *q_vector; struct net_device *netdev; struct device *dev; + struct page_pool *page_pool; void *desc; union { struct mucse_tx_buffer *tx_buffer_info; @@ -584,6 +586,7 @@ struct mucse { #define M_FLAG_NEED_LINK_UPDATE BIT(0) #define M_FLAG_MSIX_ENABLED BIT(1) #define M_FLAG_MSI_ENABLED BIT(2) +#define M_FLAG_SRIOV_ENABLED BIT(23) u32 flags2; #define M_FLAG2_NO_NET_REG BIT(0) #define M_FLAG2_INSMOD BIT(1) @@ -633,6 +636,14 @@ static inline u16 mucse_desc_unused(struct mucse_ring *ring) return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; } +static inline u16 mucse_desc_unused_rx(struct mucse_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 16; +} + static inline struct netdev_queue *txring_txq(const struct mucse_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); @@ -644,12 +655,21 @@ static inline __le64 build_ctob(u32 vlan_cmd, u32 mac_ip_len, u32 size) ((u64)size)); } +#define M_RXBUFFER_256 (256) #define M_RXBUFFER_1536 (1536) static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) { return (M_RXBUFFER_1536 - NET_IP_ALIGN); } +#define M_RX_HDR_SIZE M_RXBUFFER_256 + +static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.cmd & cpu_to_le16(stat_err_bits); +} + #define M_TX_DESC(R, i) (&(((struct rnpgbe_tx_desc *)((R)->desc))[i])) #define M_RX_DESC(R, i) (&(((union rnpgbe_rx_desc *)((R)->desc))[i])) @@ -681,6 +701,7 @@ static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) #define M_TRY_LINK_TIMEOUT (4 * HZ) +#define M_RX_BUFFER_WRITE (16) #define m_rd_reg(reg) readl(reg) #define m_wr_reg(reg, val) writel((val), reg) #define hw_wr32(hw, reg, val) m_wr_reg((hw)->hw_addr + (reg), (val)) @@ -700,6 +721,8 @@ static inline unsigned int mucse_rx_bufsz(struct mucse_ring *ring) #define mucse_dbg(mucse, fmt, arg...) \ dev_dbg(&(mucse)->pdev->dev, fmt, ##arg) +#define M_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) + void rnpgbe_service_event_schedule(struct mucse *mucse); #endif /* _RNPGBE_H */ diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 1e1919750a9b..675ed12cffcb 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -3,11 +3,16 @@ #include #include +#include +#include +#include #include "rnpgbe.h" #include "rnpgbe_lib.h" #include "rnpgbe_mbx_fw.h" +static bool rnpgbe_alloc_rx_buffers(struct mucse_ring *rx_ring, + u16 cleaned_count); /** * rnpgbe_set_rss_queues - Allocate queues for RSS * @mucse: pointer to private structure @@ -285,6 +290,409 @@ static bool rnpgbe_clean_tx_irq(struct mucse_q_vector *q_vector, return total_bytes == 0; } +/** + * rnpgbe_get_buffer - get the rx_buffer to be used + * @rx_ring: pointer to rx ring + * @rx_desc: pointer to rx_ring for this packet + * @skb: pointer skb for this packet + * @size: data size in this desc + * @return: rx_buffer. + **/ +static struct mucse_rx_buffer *rnpgbe_get_buffer(struct mucse_ring *rx_ring, + union rnpgbe_rx_desc *rx_desc, + struct sk_buff **skb, + const unsigned int size) +{ + struct mucse_rx_buffer *rx_buffer; + int time = 0; + u16 *data; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + data = page_address(rx_buffer->page) + rx_buffer->page_offset; + *skb = rx_buffer->skb; + + prefetchw(page_address(rx_buffer->page) + rx_buffer->page_offset); + + /* we are reusing so sync this buffer for CPU use */ +try_sync: + dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, + rx_buffer->page_offset, size, + DMA_FROM_DEVICE); + + if ((*data == CHECK_DATA) && time < 4) { + time++; + udelay(5); + rx_ring->rx_stats.rx_resync++; + goto try_sync; + } + + return rx_buffer; +} + +/** + * rnpgbe_add_rx_frag - Add no-linear data to the skb + * @rx_ring: pointer to rx ring + * @rx_buffer: pointer to rx_buffer + * @skb: pointer skb for this packet + * @size: data size in this desc + **/ +static void rnpgbe_add_rx_frag(struct mucse_ring *rx_ring, + struct mucse_rx_buffer *rx_buffer, + struct sk_buff *skb, + unsigned int size) +{ + unsigned int truesize = SKB_DATA_ALIGN(M_SKB_PAD + size); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); +} + +/** + * rnpgbe_build_skb - Try to build a sbk based on rx_buffer + * @rx_ring: pointer to rx ring + * @rx_buffer: pointer to rx_buffer + * @rx_desc: pointer to rx desc for this data + * @size: data size in this desc + * @return: skb for this rx_buffer + **/ +static struct sk_buff *rnpgbe_build_skb(struct mucse_ring *rx_ring, + struct mucse_rx_buffer *rx_buffer, + union rnpgbe_rx_desc *rx_desc, + unsigned int size) +{ + unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(size + M_SKB_PAD); + void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; + struct sk_buff *skb; + + net_prefetch(va); + /* build an skb around the page buffer */ + skb = build_skb(va - M_SKB_PAD, truesize); + if (unlikely(!skb)) + return NULL; + + /* update pointers within the skb to store the data */ + skb_reserve(skb, M_SKB_PAD); + __skb_put(skb, size); + + skb_mark_for_recycle(skb); + + return skb; +} + +/** + * rnpgbe_put_rx_buffer - clear rx_buffer for next use + * @rx_ring: pointer to rx ring + * @rx_buffer: pointer to rx_buffer + **/ +static void rnpgbe_put_rx_buffer(struct mucse_ring *rx_ring, + struct mucse_rx_buffer *rx_buffer) +{ + /* clear contents of rx_buffer */ + rx_buffer->page = NULL; + rx_buffer->skb = NULL; +} + +/** + * rnpgbe_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + * + * @return: true for not end of packet + **/ +static bool rnpgbe_is_non_eop(struct mucse_ring *rx_ring, + union rnpgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(M_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ + if (likely(rnpgbe_test_staterr(rx_desc, M_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ + rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + + return true; +} + +/** + * rnpgbe_pull_tail - Pull header to linear portion of buffer + * @skb: Current socket buffer containing buffer in progress + **/ +static void rnpgbe_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, M_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * rnpgbe_cleanup_headers - Correct corrupted or empty headers + * @skb: Current socket buffer containing buffer in progress + * @return: true if an error was encountered and skb was freed. + **/ +static bool rnpgbe_cleanup_headers(struct sk_buff *skb) +{ + if (IS_ERR(skb)) + return true; + /* place header in linear portion of buffer */ + if (!skb_headlen(skb)) + rnpgbe_pull_tail(skb); + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; + + return false; +} + +/** + * rnpgbe_rx_hash - Setup hash type for skb + * @ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + **/ +static void rnpgbe_rx_hash(struct mucse_ring *ring, + union rnpgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + int rss_type; + + if (!(ring->netdev->features & NETIF_F_RXHASH)) + return; +#define M_RSS_TYPE_MASK 0xc0 + rss_type = rx_desc->wb.cmd & M_RSS_TYPE_MASK; + skb_set_hash(skb, le32_to_cpu(rx_desc->wb.rss_hash), + rss_type ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); +} + +/** + * rnpgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + **/ +static void rnpgbe_rx_checksum(struct mucse_ring *ring, + union rnpgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + /* Rx csum disabled */ + if (!(ring->netdev->features & NETIF_F_RXCSUM)) + return; + + /* if outer L3/L4 error */ + /* must in promisc mode or rx-all mode */ + if (rnpgbe_test_staterr(rx_desc, M_RXD_STAT_ERR_MASK)) + return; + ring->rx_stats.csum_good++; + /* at least it is a ip packet which has ip checksum */ + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/** + * rnpgbe_process_skb_fields - Setup skb header fields from desc + * @rx_ring: structure containing ring specific data + * @rx_desc: current Rx descriptor being processed + * @skb: skb currently being received and modified + * + * rnpgbe_process_skb_fields checks the ring, descriptor information + * in order to setup the hash, chksum, vlan, protocol, and other + * fields within the skb. + **/ +static void rnpgbe_process_skb_fields(struct mucse_ring *rx_ring, + union rnpgbe_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct net_device *dev = rx_ring->netdev; + struct mucse *mucse = netdev_priv(dev); + + rnpgbe_rx_hash(rx_ring, rx_desc, skb); + rnpgbe_rx_checksum(rx_ring, rx_desc, skb); + + if (((dev->features & NETIF_F_HW_VLAN_CTAG_RX) || + (dev->features & NETIF_F_HW_VLAN_STAG_RX)) && + rnpgbe_test_staterr(rx_desc, M_RXD_STAT_VLAN_VALID) && + !ignore_veb_vlan(mucse, rx_desc)) { + if (rnpgbe_test_ext_cmd(rx_desc, REV_OUTER_VLAN)) { + u16 vid_inner = le16_to_cpu(rx_desc->wb.vlan); + u16 vlan_tci = htons(ETH_P_8021Q); + u16 vid_outer; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid_inner); + /* check outer vlan type */ + if (rnpgbe_test_staterr(rx_desc, M_RXD_STAT_STAG)) + vlan_tci = htons(ETH_P_8021AD); + else + vlan_tci = htons(ETH_P_8021Q); + vid_outer = le16_to_cpu(rx_desc->wb.mark); + /* push outer */ + skb = __vlan_hwaccel_push_inside(skb); + __vlan_hwaccel_put_tag(skb, vlan_tci, vid_outer); + } else { + /* only inner vlan */ + u16 vid = le16_to_cpu(rx_desc->wb.vlan); + /* check vlan type */ + if (rnpgbe_test_staterr(rx_desc, M_RXD_STAT_STAG)) { + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021AD), + vid); + } else { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vid); + } + } + rx_ring->rx_stats.vlan_remove++; + } + skb_record_rx_queue(skb, rx_ring->queue_index); + skb->protocol = eth_type_trans(skb, dev); +} + +/** + * rnpgbe_clean_rx_irq - Clean completed descriptors from Rx ring + * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * rnpgbe_clean_rx_irq tries to check dd in desc, handle this desc + * if dd is set which means data is write-back by hw + * + * @return: amount of work completed. + **/ +static int rnpgbe_clean_rx_irq(struct mucse_q_vector *q_vector, + struct mucse_ring *rx_ring, + int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = mucse_desc_unused_rx(rx_ring); + unsigned int driver_drop_packets = 0; + bool fail_alloc = false; + bool new = false; + + while (likely(total_rx_packets < budget)) { + struct mucse_rx_buffer *rx_buffer; + union rnpgbe_rx_desc *rx_desc; + struct sk_buff *skb; + unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= M_RX_BUFFER_WRITE) { + new = rnpgbe_alloc_rx_buffers(rx_ring, cleaned_count); + fail_alloc = new || fail_alloc; + cleaned_count = 0; + } + rx_desc = M_RX_DESC(rx_ring, rx_ring->next_to_clean); + + if (!rnpgbe_test_staterr(rx_desc, M_RXD_STAT_DD)) + break; + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the + * descriptor has been written back + */ + dma_rmb(); + size = le16_to_cpu(rx_desc->wb.len); + if (!size) + break; + + rx_buffer = rnpgbe_get_buffer(rx_ring, rx_desc, &skb, size); + + if (skb) + rnpgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); + else + skb = rnpgbe_build_skb(rx_ring, rx_buffer, rx_desc, + size); + /* exit if we failed to retrieve a buffer */ + if (!skb) { + page_pool_recycle_direct(rx_ring->page_pool, + rx_buffer->page); + rx_ring->rx_stats.alloc_rx_buff_failed++; + break; + } + + rnpgbe_put_rx_buffer(rx_ring, rx_buffer); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ + if (rnpgbe_is_non_eop(rx_ring, rx_desc, skb)) + continue; + + /* verify the packet layout is correct */ + if (rnpgbe_cleanup_headers(skb)) { + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + continue; + } + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + /* populate checksum, timestamp, VLAN, and protocol */ + rnpgbe_process_skb_fields(rx_ring, rx_desc, skb); + /* we should clean it since we used all info in it */ + rx_desc->wb.cmd = 0; + napi_gro_receive(&q_vector->napi, skb); + /* update budget accounting */ + total_rx_packets++; + } + + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + rx_ring->rx_stats.driver_drop_packets += driver_drop_packets; + rx_ring->rx_stats.rx_clean_count += total_rx_packets; + rx_ring->rx_stats.rx_clean_times++; + if (rx_ring->rx_stats.rx_clean_times > 10) { + rx_ring->rx_stats.rx_clean_times = 0; + rx_ring->rx_stats.rx_clean_count = 0; + } + u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + + if (total_rx_packets >= budget) + rx_ring->rx_stats.poll_again_count++; + return fail_alloc ? budget : total_rx_packets; +} + /** * rnpgbe_poll - NAPI polling RX/TX cleanup routine * @napi: napi struct with our devices info in it @@ -299,12 +707,27 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget) struct mucse_q_vector *q_vector = container_of(napi, struct mucse_q_vector, napi); struct mucse *mucse = q_vector->mucse; + int per_ring_budget, work_done = 0; bool clean_complete = true; struct mucse_ring *ring; - int work_done = 0; + int cleaned_total = 0; mucse_for_each_ring(ring, q_vector->tx) clean_complete = rnpgbe_clean_tx_irq(q_vector, ring, budget); + if (q_vector->rx.count > 1) + per_ring_budget = max(budget / q_vector->rx.count, 1); + else + per_ring_budget = budget; + + mucse_for_each_ring(ring, q_vector->rx) { + int cleaned = 0; + + cleaned = rnpgbe_clean_rx_irq(q_vector, ring, per_ring_budget); + work_done += cleaned; + cleaned_total += cleaned; + if (cleaned >= per_ring_budget) + clean_complete = false; + } if (!netif_running(mucse->netdev)) clean_complete = true; @@ -829,6 +1252,35 @@ static void rnpgbe_free_all_tx_resources(struct mucse *mucse) rnpgbe_free_tx_resources(mucse->tx_ring[i]); } +/** + * mucse_alloc_page_pool - Alloc page poll for tis ring + * @rx_ring: pointer to rx ring + * @return: 0 if success + **/ +static int mucse_alloc_page_pool(struct mucse_ring *rx_ring) +{ + int ret = 0; + + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .order = 0, + .pool_size = rx_ring->size, + .nid = dev_to_node(rx_ring->dev), + .dev = rx_ring->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE, + }; + + rx_ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rx_ring->page_pool)) { + ret = PTR_ERR(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } + + return ret; +} + /** * rnpgbe_setup_rx_resources - allocate Rx resources (Descriptors) * @rx_ring: rx descriptor ring (for a specific queue) to setup @@ -871,6 +1323,8 @@ static int rnpgbe_setup_rx_resources(struct mucse_ring *rx_ring, memset(rx_ring->desc, 0, rx_ring->size); rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + if (mucse_alloc_page_pool(rx_ring) + goto err; return 0; err: @@ -901,6 +1355,7 @@ static void rnpgbe_clean_rx_ring(struct mucse_ring *rx_ring) rx_buffer->page_offset, mucse_rx_bufsz(rx_ring), DMA_FROM_DEVICE); + page_pool_recycle_direct(rx_ring->page_pool, rx_buffer->page); rx_buffer->page = NULL; i++; rx_buffer++; @@ -933,6 +1388,10 @@ static void rnpgbe_free_rx_resources(struct mucse_ring *rx_ring) dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; + if (rx_ring->page_pool) { + page_pool_destroy(rx_ring->page_pool); + rx_ring->page_pool = NULL; + } } /** @@ -1076,6 +1535,91 @@ void rnpgbe_disable_rx_queue(struct mucse_ring *ring) ring_wr32(ring, DMA_RX_START, 0); } +/** + * mucse_alloc_mapped_page - Alloc page for this rx_buffer + * @rx_ring: pointer to rx ring + * @bi: pointer to this rx_buffer structure + * + * mucse_alloc_mapped_page alloc memory page for this rx_buffer + **/ +static bool mucse_alloc_mapped_page(struct mucse_ring *rx_ring, + struct mucse_rx_buffer *bi) +{ + struct page *page = bi->page; + dma_addr_t dma; + + page = page_pool_dev_alloc_pages(rx_ring->page_pool); + dma = page_pool_get_dma_addr(page); + + bi->dma = dma; + bi->page = page; + bi->page_offset = M_SKB_PAD; + + if (likely(page)) + return true; + else + return false; +} + +/** + * rnpgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on + * @cleaned_count: number of buffers to replace + **/ +static bool rnpgbe_alloc_rx_buffers(struct mucse_ring *rx_ring, + u16 cleaned_count) +{ + u64 fun_id = ((u64)(rx_ring->pfvfnum) << (32 + 24)); + union rnpgbe_rx_desc *rx_desc; + u16 i = rx_ring->next_to_use; + struct mucse_rx_buffer *bi; + bool err = false; + u16 *data; + u16 bufsz; + /* nothing to do */ + if (!cleaned_count) + return err; + + rx_desc = M_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; + bufsz = mucse_rx_bufsz(rx_ring); + + do { + if (!mucse_alloc_mapped_page(rx_ring, bi)) { + err = true; + break; + } + + data = page_address(bi->page) + bi->page_offset; + *data = CHECK_DATA; + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, + bi->page_offset, bufsz, + DMA_FROM_DEVICE); + rx_desc->pkt_addr = + cpu_to_le64(bi->dma + bi->page_offset + fun_id); + + /* clean dd */ + rx_desc->resv_cmd = 0; + rx_desc++; + bi++; + i++; + if (unlikely(!i)) { + rx_desc = M_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_buffer_info; + i -= rx_ring->count; + } + cleaned_count--; + } while (cleaned_count); + + i += rx_ring->count; + + if (rx_ring->next_to_use != i) + mucse_update_rx_tail(rx_ring, i); + + return err; +} + /** * rnpgbe_configure_rx_ring - Configure Rx ring after Reset * @mucse: pointer to private structure @@ -1115,6 +1659,7 @@ static void rnpgbe_configure_rx_ring(struct mucse *mucse, ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, mucse->rx_usecs * hw->usecstocount); ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, mucse->rx_frames); + rnpgbe_alloc_rx_buffers(ring, mucse_desc_unused_rx(ring)); } /** diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h index 5a3334789f66..c138919c1b9a 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.h @@ -6,6 +6,7 @@ #include "rnpgbe.h" +#define CHECK_DATA (0xabcd) #define RING_OFFSET(n) (0x100 * (n)) #define DMA_DUMY (0xc) #define DMA_AXI_EN (0x10) @@ -123,6 +124,38 @@ static inline void rnpgbe_irq_disable(struct mucse *mucse) } } +static inline int ignore_veb_vlan(struct mucse *mucse, + union rnpgbe_rx_desc *rx_desc) +{ + if (unlikely((mucse->flags & M_FLAG_SRIOV_ENABLED) && + (cpu_to_le16(rx_desc->wb.rev1) & VEB_VF_IGNORE_VLAN))) { + return 1; + } + return 0; +} + +static inline __le16 rnpgbe_test_ext_cmd(union rnpgbe_rx_desc *rx_desc, + const u16 stat_err_bits) +{ + return rx_desc->wb.rev1 & cpu_to_le16(stat_err_bits); +} + +static inline void mucse_update_rx_tail(struct mucse_ring *rx_ring, + u32 val) +{ + rx_ring->next_to_use = val; + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + m_wr_reg(rx_ring->tail, val); +} + int rnpgbe_init_interrupt_scheme(struct mucse *mucse); void rnpgbe_clear_interrupt_scheme(struct mucse *mucse); int rnpgbe_setup_txrx(struct mucse *mucse); -- 2.25.1 Initialize itr function according to rx packets/bytes Signed-off-by: Dong Yibo --- drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h | 5 + .../net/ethernet/mucse/rnpgbe/rnpgbe_lib.c | 99 ++++++++++++++++++- 2 files changed, 103 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h index 9d8d939d81a4..d24438f458ac 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h @@ -547,6 +547,8 @@ struct mucse_ring_container { unsigned int total_packets; u16 work_limit; u16 count; + u16 itr; + int update_count; }; struct mucse_q_vector { @@ -701,6 +703,9 @@ static inline __le16 rnpgbe_test_staterr(union rnpgbe_rx_desc *rx_desc, #define M_TRY_LINK_TIMEOUT (4 * HZ) +#define M_LOWEREST_ITR (5) +#define M_4K_ITR (980) + #define M_RX_BUFFER_WRITE (16) #define m_rd_reg(reg) readl(reg) #define m_wr_reg(reg, val) writel((val), reg) diff --git a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c index 675ed12cffcb..1211b742223b 100644 --- a/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c +++ b/drivers/net/ethernet/mucse/rnpgbe/rnpgbe_lib.c @@ -693,6 +693,66 @@ static int rnpgbe_clean_rx_irq(struct mucse_q_vector *q_vector, return fail_alloc ? budget : total_rx_packets; } +/** + * rnpgbe_update_ring_itr_rx - Update rx itr according received pacekts/bytes + * @q_vector: structure containing interrupt and ring information + **/ +static void rnpgbe_update_ring_itr_rx(struct mucse_q_vector *q_vector) +{ + struct mucse *mucse = q_vector->mucse; + int new_val = q_vector->itr_rx; + int avg_wire_size = 0; + unsigned int packets; + + switch (mucse->link_speed) { + case M_LINK_SPEED_10_FULL: + case M_LINK_SPEED_100_FULL: + new_val = M_4K_ITR; + goto set_itr_val; + default: + break; + } + + packets = q_vector->rx.total_packets; + if (packets) + avg_wire_size = max_t(u32, avg_wire_size, + q_vector->rx.total_bytes / packets); + + /* if avg_wire_size isn't set no work was done */ + if (!avg_wire_size) + goto clear_counts; + + /* Add 24 bytes to size to account for CRC, preamble, and gap */ + avg_wire_size += 24; + + /* Don't starve jumbo frames */ + avg_wire_size = min(avg_wire_size, 3000); + + /* Give a little boost to mid-size frames */ + if (avg_wire_size > 300 && avg_wire_size < 1200) + new_val = avg_wire_size / 3; + else + new_val = avg_wire_size / 2; + + if (new_val < M_LOWEREST_ITR) + new_val = M_LOWEREST_ITR; + +set_itr_val: + if (q_vector->rx.itr != new_val) { + q_vector->rx.update_count++; + if (q_vector->rx.update_count >= 2) { + q_vector->rx.itr = new_val; + q_vector->rx.update_count = 0; + } + } else { + q_vector->rx.update_count = 0; + } + +clear_counts: + q_vector->rx.total_bytes = 0; + q_vector->rx.total_packets = 0; +} + /** * rnpgbe_poll - NAPI polling RX/TX cleanup routine * @napi: napi struct with our devices info in it @@ -739,6 +799,7 @@ static int rnpgbe_poll(struct napi_struct *napi, int budget) return budget; /* all work done, exit the polling mode */ if (likely(napi_complete_done(napi, work_done))) { + rnpgbe_update_ring_itr_rx(q_vector); if (!test_bit(__MUCSE_DOWN, &mucse->state)) rnpgbe_irq_enable_queues(mucse, q_vector); } @@ -1696,6 +1757,42 @@ void rnpgbe_clean_all_tx_rings(struct mucse *mucse) rnpgbe_clean_tx_ring(mucse->tx_ring[i]); } +/** + * rnpgbe_write_eitr_rx - write new itr to Hw + * @q_vector: structure containing interrupt and ring information + **/ +static void rnpgbe_write_eitr_rx(struct mucse_q_vector *q_vector) +{ + struct mucse *mucse = q_vector->mucse; + u32 new_itr_rx = q_vector->rx.itr; + u32 old_itr_rx = q_vector->rx.itr; + struct mucse_hw *hw = &mucse->hw; + struct mucse_ring *ring; + + new_itr_rx = new_itr_rx * hw->usecstocount; + /* if we are in auto mode write to hw */ + mucse_for_each_ring(ring, q_vector->rx) { + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, new_itr_rx); + if (ring->ring_flags & M_RING_LOWER_ITR) { + /* if we are already in this mode skip */ + if (q_vector->itr_rx == M_LOWEREST_ITR) + continue; + ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, 1); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, + M_LOWEREST_ITR); + q_vector->itr_rx = M_LOWEREST_ITR; + } else { + if (new_itr_rx == q_vector->itr_rx) + continue; + ring_wr32(ring, DMA_REG_RX_INT_DELAY_TIMER, + new_itr_rx); + ring_wr32(ring, DMA_REG_RX_INT_DELAY_PKTCNT, + mucse->rx_frames); + q_vector->itr_rx = old_itr_rx; + } + } +} + /** * rnpgbe_msix_clean_rings - msix irq handler for ring irq * @irq: irq num @@ -1708,7 +1805,7 @@ static irqreturn_t rnpgbe_msix_clean_rings(int irq, void *data) struct mucse_q_vector *q_vector = (struct mucse_q_vector *)data; rnpgbe_irq_disable_queues(q_vector); - + rnpgbe_write_eitr_rx(q_vector); if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule_irqoff(&q_vector->napi); -- 2.25.1