Add tag driver for LAN9645x using a front port as CPU port. This mode is called an NPI port in the datasheet. Use long prefix on extraction (RX) and no prefix on injection (TX). A long prefix on extraction helps get through the conduit port on host side, since it will see a broadcast MAC. The LAN9645x chip is in the same design architecture family as ocelot and lan966x. The tagging protocol has the same structure as these chips, but the particular fields are different or have different sizes. Therefore, this tag driver is similar to tag_ocelot.c, but the differences in fields makes it hard to reuse. LAN9645x supports 3 different tag formats for extraction/injection of frames from a CPU port: long prefix, short prefix and no prefix. The tag is prepended to the frame. The critical data for the chip is contained in an internal frame header (IFH) which is 28 bytes. The prefix formats look like this: Long prefix (16 bytes) + IFH: - DMAC = 0xffffffffffff on extraction. - SMAC = 0xfeffffffffff on extraction. - ETYPE = 0x8880 - payload = 0x0011 - IFH Short prefix (4 bytes) + IFH: - 0x8880 - 0x0011 - IFH No prefix: - IFH The format can be configured asymmetrically on RX and TX. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- MAINTAINERS | 8 ++ include/linux/dsa/lan9645x.h | 290 +++++++++++++++++++++++++++++++++++++++++++ include/net/dsa.h | 2 + net/dsa/Kconfig | 10 ++ net/dsa/Makefile | 1 + net/dsa/tag_lan9645x.c | 143 +++++++++++++++++++++ 6 files changed, 454 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2265e2c9bfbe..2712aaf7cedd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17286,6 +17286,14 @@ L: netdev@vger.kernel.org S: Maintained F: drivers/net/phy/microchip_t1.c +MICROCHIP LAN9645X ETHERNET SWITCH DRIVER +M: Jens Emil Schulz Østergaard +M: UNGLinuxDriver@microchip.com +L: netdev@vger.kernel.org +S: Maintained +F: include/linux/dsa/lan9645x.h +F: net/dsa/tag_lan9645x.c + MICROCHIP LAN966X ETHERNET DRIVER M: Horatiu Vultur M: UNGLinuxDriver@microchip.com diff --git a/include/linux/dsa/lan9645x.h b/include/linux/dsa/lan9645x.h new file mode 100644 index 000000000000..37b74dde9611 --- /dev/null +++ b/include/linux/dsa/lan9645x.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (C) 2026 Microchip Technology Inc. + */ + +#ifndef _NET_DSA_TAG_LAN9645X_H_ +#define _NET_DSA_TAG_LAN9645X_H_ + +#include +#include +#include + +/* LAN9645x supports 3 different formats on an NPI port, long prefix, short + * prefix and no prefix. The format can be configured asymmetrically on RX and + * TX. We use long prefix on extraction (RX), and no prefix on injection. + * The long prefix on extraction helps get through the conduit port on host + * side, since it will see a broadcast MAC. + * + * The internal frame header (IFH) is 28 bytes, and the fields are documented + * below. + * + * Long prefix, 16 bytes + IFH: + * - DMAC = 0xFFFFFFFFFFFF on extraction. + * - SMAC = 0xFEFFFFFFFFFF on extraction. + * - ETYPE = 0x8880 + * - payload = 0x0011 + * - IFH + * + * Short prefix, 4 bytes + IFH: + * - 0x8880 + * - 0x0011 + * - IFH + * + * No prefix: + * - IFH + * + */ +#define LAN9645X_IFH_TAG_TYPE_C 0 +#define LAN9645X_IFH_TAG_TYPE_S 1 +#define LAN9645X_IFH_LEN_U32 7 +#define LAN9645X_IFH_LEN (LAN9645X_IFH_LEN_U32 * sizeof(u32)) +#define LAN9645X_IFH_BITS (LAN9645X_IFH_LEN * BITS_PER_BYTE) +#define LAN9645X_SHORT_PREFIX_LEN 4 +#define LAN9645X_LONG_PREFIX_LEN 16 +#define LAN9645X_TOTAL_TAG_LEN (LAN9645X_LONG_PREFIX_LEN + LAN9645X_IFH_LEN) + +#define IFH_INJ_TIMESTAMP 192 +#define IFH_BYPASS 191 +#define IFH_MASQ 190 +#define IFH_TIMESTAMP 186 +#define IFH_TIMESTAMP_NS 194 +#define IFH_TIMESTAMP_SUBNS 186 +#define IFH_MASQ_PORT 186 +#define IFH_RCT_INJ 185 +#define IFH_LEN 171 +#define IFH_WRDMODE 169 +#define IFH_RTAGD 167 +#define IFH_CUTTHRU 166 +#define IFH_REW_CMD 156 +#define IFH_REW_OAM 155 +#define IFH_PDU_TYPE 151 +#define IFH_FCS_UPD 150 +#define IFH_DP 149 +#define IFH_RTE_INB_UPDATE 148 +#define IFH_POP_CNT 146 +#define IFH_ETYPE_OFS 144 +#define IFH_SRCPORT 140 +#define IFH_SEQ_NUM 120 +#define IFH_TAG_TYPE 119 +#define IFH_TCI 103 +#define IFH_DSCP 97 +#define IFH_QOS_CLASS 94 +#define IFH_CPUQ 86 +#define IFH_LEARN_FLAGS 84 +#define IFH_SFLOW_ID 80 +#define IFH_ACL_HIT 79 +#define IFH_ACL_IDX 73 +#define IFH_ISDX 65 +#define IFH_DSTS 55 +#define IFH_FLOOD 53 +#define IFH_SEQ_OP 51 +#define IFH_IPV 48 +#define IFH_AFI 47 +#define IFH_RTP_ID 37 +#define IFH_RTP_SUBID 36 +#define IFH_PN_DATA_STATUS 28 +#define IFH_PN_TRANSF_STATUS_ZERO 27 +#define IFH_PN_CC 11 +#define IFH_DUPL_DISC_ENA 10 +#define IFH_RCT_AVAIL 9 + +#define IFH_INJ_TIMESTAMP_SZ 32 +#define IFH_BYPASS_SZ 1 +#define IFH_MASQ_SZ 1 +#define IFH_TIMESTAMP_SZ 38 +#define IFH_TIMESTAMP_NS_SZ 30 +#define IFH_TIMESTAMP_SUBNS_SZ 8 +#define IFH_MASQ_PORT_SZ 4 +#define IFH_RCT_INJ_SZ 1 +#define IFH_LEN_SZ 14 +#define IFH_WRDMODE_SZ 2 +#define IFH_RTAGD_SZ 2 +#define IFH_CUTTHRU_SZ 1 +#define IFH_REW_CMD_SZ 10 +#define IFH_REW_OAM_SZ 1 +#define IFH_PDU_TYPE_SZ 4 +#define IFH_FCS_UPD_SZ 1 +#define IFH_DP_SZ 1 +#define IFH_RTE_INB_UPDATE_SZ 1 +#define IFH_POP_CNT_SZ 2 +#define IFH_ETYPE_OFS_SZ 2 +#define IFH_SRCPORT_SZ 4 +#define IFH_SEQ_NUM_SZ 16 +#define IFH_TAG_TYPE_SZ 1 +#define IFH_TCI_SZ 16 +#define IFH_DSCP_SZ 6 +#define IFH_QOS_CLASS_SZ 3 +#define IFH_CPUQ_SZ 8 +#define IFH_LEARN_FLAGS_SZ 2 +#define IFH_SFLOW_ID_SZ 4 +#define IFH_ACL_HIT_SZ 1 +#define IFH_ACL_IDX_SZ 6 +#define IFH_ISDX_SZ 8 +#define IFH_DSTS_SZ 10 +#define IFH_FLOOD_SZ 2 +#define IFH_SEQ_OP_SZ 2 +#define IFH_IPV_SZ 3 +#define IFH_AFI_SZ 1 +#define IFH_RTP_ID_SZ 10 +#define IFH_RTP_SUBID_SZ 1 +#define IFH_PN_DATA_STATUS_SZ 8 +#define IFH_PN_TRANSF_STATUS_ZERO_SZ 1 +#define IFH_PN_CC_SZ 16 +#define IFH_DUPL_DISC_ENA_SZ 1 +#define IFH_RCT_AVAIL_SZ 1 + +#define LAN9645X_VALIDATE_FIELD(_fld, _fld_sz) \ +do { \ + BUILD_BUG_ON_MSG((_fld_sz) > 32, "IFH field size wider than 32.");\ + BUILD_BUG_ON_MSG((_fld_sz) == 0, "IFH field size of 0."); \ + BUILD_BUG_ON_MSG((_fld) + (_fld_sz) > LAN9645X_IFH_BITS, \ + "IFH field overflows IFH"); \ +} while (0) + +#define LAN9645X_IFH_GET(_ifh, _fld) \ +({ \ + LAN9645X_VALIDATE_FIELD(_fld, _fld##_SZ);\ + lan9645x_ifh_get((_ifh), (_fld), _fld##_SZ); \ +}) + +#define LAN9645X_IFH_SET(_ifh, _fld, _val) \ +({ \ + LAN9645X_VALIDATE_FIELD(_fld, _fld##_SZ);\ + lan9645x_ifh_set((_ifh), (_val), (_fld), _fld##_SZ); \ +}) + +#define BTM_MSK(n) ((u8)GENMASK(n, 0)) +#define TOP_MSK(n) ((u8)GENMASK(7, n)) + +static inline void set_merge_mask(u8 *on_zero, u8 on_one, u8 mask) +{ + *on_zero = *on_zero ^ ((*on_zero ^ on_one) & mask); +} + +/* The internal frame header (IFH) is a big-endian 28 byte unpadded bit array. + * Frames can be prepended with an IFH on injection and extraction. There + * are two field layouts, one for extraction and one for injection. + * + * IFH bits go from high to low, for instance + * ifh[0] = [223:215] + * ifh[27] = [7:0] + * + * Here is an example of setting a value starting at bit 13 of bit length 17. + * + * val = 0x1ff + * pos = 13 + * length = 17 + * + * + * IFH[] 0 23 24 25 26 27 + * + * end_u8 start_u8 + * +--------+----------------+--------+--------+--------+--------+--------+ + * | | | | | | | | + * IFH | | .... | | vvvvvvvvvvvvvvvvvvv | | + * | | | | | | | | | | + * +--------+----------------+--------+--+-----+--------+--+-----+--------+ + * Bits 223 39 32 31| 24 23 16 15| 8 7 0 + * | | + * | | + * | | + * v v + * end = 29 pos = 13 + * end_rem = 5 pos_rem = 5 + * end_u8 = 3 start_u8 = 1 + * BTM_MSK(5)= 0x3f TOP_MSK(5) = 0xe0 + * + * + * In end_u8 and start_u8 we must merge the existing IFH byte with the new + * value. In the 'middle' bytes of the value we can overwrite the corresponding + * IFH byte. + */ +static inline void lan9645x_ifh_set(u8 *ifh, u32 val, size_t pos, size_t length) +{ + size_t end = (pos + length) - 1; + size_t start_u8 = pos >> 3; + size_t end_u8 = end >> 3; + size_t end_rem = end & 0x7; + size_t pos_rem = pos & 0x7; + u8 end_mask, start_mask; + size_t vshift; + u8 *ptr; + + end_mask = BTM_MSK(end_rem); + start_mask = TOP_MSK(pos_rem); + + ptr = &ifh[LAN9645X_IFH_LEN - 1 - end_u8]; + + if (end_u8 == start_u8) + return set_merge_mask(ptr, val << pos_rem, + end_mask & start_mask); + + vshift = length - end_rem - 1; + set_merge_mask(ptr++, val >> vshift, end_mask); + + for (size_t j = 1; j < end_u8 - start_u8; j++) { + vshift -= 8; + *ptr++ = val >> vshift; + } + + set_merge_mask(ptr, val << pos_rem, start_mask); +} + +static inline u32 lan9645x_ifh_get(const u8 *ifh, size_t pos, size_t length) +{ + size_t end = (pos + length) - 1; + size_t start_u8 = pos >> 3; + size_t end_u8 = end >> 3; + size_t end_rem = end & 0x7; + size_t pos_rem = pos & 0x7; + u8 end_mask, start_mask; + const u8 *ptr; + u32 val; + + end_mask = BTM_MSK(end_rem); + start_mask = TOP_MSK(pos_rem); + + ptr = &ifh[LAN9645X_IFH_LEN - 1 - end_u8]; + + if (end_u8 == start_u8) + return (*ptr & end_mask & start_mask) >> pos_rem; + + val = *ptr++ & end_mask; + + for (size_t j = 1; j < end_u8 - start_u8; j++) + val = val << 8 | *ptr++; + + return val << (8 - pos_rem) | (*ptr & start_mask) >> pos_rem; +} + +static inline void lan9645x_xmit_get_vlan_info(struct sk_buff *skb, + struct net_device *br, + u32 *vlan_tci, u32 *tag_type) +{ + struct vlan_ethhdr *hdr; + u16 proto, tci; + + if (!br || !br_vlan_enabled(br)) { + *vlan_tci = 0; + *tag_type = LAN9645X_IFH_TAG_TYPE_C; + return; + } + + hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + br_vlan_get_proto(br, &proto); + + if (ntohs(hdr->h_vlan_proto) == proto) { + vlan_remove_tag(skb, &tci); + *vlan_tci = tci; + } else { + rcu_read_lock(); + br_vlan_get_pvid_rcu(br, &tci); + rcu_read_unlock(); + *vlan_tci = tci; + } + + *tag_type = (proto != ETH_P_8021Q) ? LAN9645X_IFH_TAG_TYPE_S : + LAN9645X_IFH_TAG_TYPE_C; +} + +#endif /* _NET_DSA_TAG_LAN9645X_H_ */ diff --git a/include/net/dsa.h b/include/net/dsa.h index 6c17446f3dcc..977b35aa9f16 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -58,6 +58,7 @@ struct tc_action; #define DSA_TAG_PROTO_YT921X_VALUE 30 #define DSA_TAG_PROTO_MXL_GSW1XX_VALUE 31 #define DSA_TAG_PROTO_MXL862_VALUE 32 +#define DSA_TAG_PROTO_LAN9645X_VALUE 33 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -93,6 +94,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_YT921X = DSA_TAG_PROTO_YT921X_VALUE, DSA_TAG_PROTO_MXL_GSW1XX = DSA_TAG_PROTO_MXL_GSW1XX_VALUE, DSA_TAG_PROTO_MXL862 = DSA_TAG_PROTO_MXL862_VALUE, + DSA_TAG_PROTO_LAN9645X = DSA_TAG_PROTO_LAN9645X_VALUE, }; struct dsa_switch; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 5ed8c704636d..8592cccde7ff 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -211,4 +211,14 @@ config NET_DSA_TAG_YT921X Say Y or M if you want to enable support for tagging frames for Motorcomm YT921x switches. +config NET_DSA_TAG_LAN9645X + tristate "Tag driver for Lan9645x switches" + help + Say Y or M if you want to enable NPI tagging for the Lan9645x switches. + In this mode, the frames over the Ethernet CPU port are prepended with + a hardware-defined injection/extraction frame header. + On injection a 28 byte internal frame header (IFH) is used. On + extraction a 16 byte prefix is prepended before the internal frame + header. This prefix starts with a broadcast MAC, to ease passage + through the host side RX filter. endif diff --git a/net/dsa/Makefile b/net/dsa/Makefile index bf7247759a64..dddcd85c81ce 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o obj-$(CONFIG_NET_DSA_TAG_YT921X) += tag_yt921x.o +obj-$(CONFIG_NET_DSA_TAG_LAN9645X) += tag_lan9645x.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/net/dsa/tag_lan9645x.c b/net/dsa/tag_lan9645x.c new file mode 100644 index 000000000000..39eb2fb388e5 --- /dev/null +++ b/net/dsa/tag_lan9645x.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tag.h" + +#define LAN9645X_NAME "lan9645x" + +static struct sk_buff *lan9645x_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct dsa_port *dp = dsa_user_to_port(ndev); + struct dsa_switch *ds = dp->ds; + u32 cpu_port = ds->num_ports; + u32 vlan_tci, tag_type; + u32 qos_class; + void *ifh; + + lan9645x_xmit_get_vlan_info(skb, dsa_port_bridge_dev_get(dp), &vlan_tci, + &tag_type); + + qos_class = netdev_get_num_tc(ndev) ? + netdev_get_prio_tc_map(ndev, skb->priority) : + skb->priority; + + /* Make room for IFH */ + ifh = skb_push(skb, LAN9645X_IFH_LEN); + memset(ifh, 0, LAN9645X_IFH_LEN); + + LAN9645X_IFH_SET(ifh, IFH_BYPASS, 1); + LAN9645X_IFH_SET(ifh, IFH_SRCPORT, cpu_port); + LAN9645X_IFH_SET(ifh, IFH_QOS_CLASS, qos_class); + LAN9645X_IFH_SET(ifh, IFH_TCI, vlan_tci); + LAN9645X_IFH_SET(ifh, IFH_TAG_TYPE, tag_type); + LAN9645X_IFH_SET(ifh, IFH_DSTS, BIT(dp->index)); + + return skb; +} + +static struct sk_buff *lan9645x_rcv(struct sk_buff *skb, + struct net_device *ndev) +{ + u32 src_port, qos_class, vlan_tci, tag_type, popcnt, etype_ofs; + u8 *orig_skb_data = skb->data; + struct dsa_port *dp; + u32 ifh_gap_len = 0; + u16 vlan_tpid; + u8 *ifh; + + /* DSA master already consumed DMAC,SMAC,ETYPE from long prefix. Go back + * to beginning of frame. + */ + skb_push(skb, ETH_HLEN); + /* IFH starts after our long prefix */ + ifh = skb_pull(skb, LAN9645X_LONG_PREFIX_LEN); + + src_port = LAN9645X_IFH_GET(ifh, IFH_SRCPORT); + qos_class = LAN9645X_IFH_GET(ifh, IFH_QOS_CLASS); + tag_type = LAN9645X_IFH_GET(ifh, IFH_TAG_TYPE); + vlan_tci = LAN9645X_IFH_GET(ifh, IFH_TCI); + popcnt = LAN9645X_IFH_GET(ifh, IFH_POP_CNT); + etype_ofs = LAN9645X_IFH_GET(ifh, IFH_ETYPE_OFS); + + /* Set skb->data at start of real header + * + * Since REW_PORT_NO_REWRITE=0 is required on the NPI port, we need to + * account for any tags popped by the hardware, as that will leave a gap + * between the IFH and DMAC. + */ + if (popcnt == 0 && etype_ofs == 0) + ifh_gap_len = 2 * VLAN_HLEN; + else if (popcnt == 3) + ifh_gap_len = VLAN_HLEN; + + skb_pull(skb, LAN9645X_IFH_LEN + ifh_gap_len); + skb_reset_mac_header(skb); + skb_set_network_header(skb, ETH_HLEN); + skb_reset_mac_len(skb); + + /* Reset skb->data past the actual ethernet header. */ + skb_pull(skb, ETH_HLEN); + skb_postpull_rcsum(skb, orig_skb_data, + LAN9645X_TOTAL_TAG_LEN + ifh_gap_len); + + skb->dev = dsa_conduit_find_user(ndev, 0, src_port); + if (WARN_ON_ONCE(!skb->dev)) { + /* This should never happen since we have disabled reflection + * back to CPU_PORT. + */ + return NULL; + } + + dsa_default_offload_fwd_mark(skb); + + skb->priority = qos_class; + + /* While we have REW_PORT_NO_REWRITE=0 on the NPI port, we still disable + * port VLAN tagging with REW_TAG_CFG. Any classified VID, different + * from a VID in the frame, will not be written to the frame, but is + * only communicated via the IFH. So for VLAN-aware ports we add the IFH + * vlan to the skb. + */ + dp = dsa_user_to_port(skb->dev); + vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q; + + if (dsa_port_is_vlan_filtering(dp) && + eth_hdr(skb)->h_proto == htons(vlan_tpid)) { + u16 dummy_vlan_tci; + + skb_push_rcsum(skb, ETH_HLEN); + __skb_vlan_pop(skb, &dummy_vlan_tci); + skb_pull_rcsum(skb, ETH_HLEN); + __vlan_hwaccel_put_tag(skb, htons(vlan_tpid), vlan_tci); + } + + return skb; +} + +static const struct dsa_device_ops lan9645x_netdev_ops = { + .name = LAN9645X_NAME, + .proto = DSA_TAG_PROTO_LAN9645X, + .xmit = lan9645x_xmit, + .rcv = lan9645x_rcv, + .needed_headroom = LAN9645X_TOTAL_TAG_LEN, + .promisc_on_conduit = false, +}; + +MODULE_DESCRIPTION("DSA tag driver for LAN9645x family of switches, using NPI port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_LAN9645X, LAN9645X_NAME); + +module_dsa_tag_driver(lan9645x_netdev_ops); -- 2.52.0 Add bindings for LAN9645X switch. We use a fallback compatible for the smallest SKU microchip,lan96455s-switch. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- .../net/dsa/microchip,lan9645x-switch.yaml | 137 +++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 138 insertions(+) diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,lan9645x-switch.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,lan9645x-switch.yaml new file mode 100644 index 000000000000..4a19dfa7e9d5 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/microchip,lan9645x-switch.yaml @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/microchip,lan9645x-switch.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip LAN9645x Ethernet switch + +maintainers: + - Jens Emil Schulz Østergaard + +description: | + The LAN9645x switch is a multi-port Gigabit AVB/TSN Ethernet switch with + five integrated 10/100/1000Base-T PHYs. In addition to the integrated PHYs, + it supports up to 2 RGMII/RMII, up to 2 BASE-X/SERDES/2.5GBASE-X and one + Quad-SGMII interfaces. + +properties: + compatible: + oneOf: + - enum: + - microchip,lan96455s-switch + - items: + - enum: + - microchip,lan96455f-switch + - microchip,lan96457f-switch + - microchip,lan96459f-switch + - microchip,lan96457s-switch + - microchip,lan96459s-switch + - const: microchip,lan96455s-switch + + reg: + maxItems: 1 + +$ref: dsa.yaml# + +patternProperties: + "^(ethernet-)?ports$": + type: object + additionalProperties: true + patternProperties: + "^(ethernet-)?port@[0-8]$": + type: object + description: Ethernet switch ports + + $ref: dsa-port.yaml# + + properties: + microchip,led-drive-mode: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Set the LED drive mode for the copper PHY associated with + this port. + + 0 - LED1 and LED2 in open-drain mode + 1 - LED1 in active drive mode (can be used for single-LED + configurations requiring active drive) + 2 - Reserved + 3 - LED1 and LED2 in active drive mode + minimum: 0 + maximum: 3 + + unevaluatedProperties: false + +oneOf: + - required: + - ports + - required: + - ethernet-ports + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + soc { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-switch@0 { + reg = <0>; + compatible = "microchip,lan96459f-switch", "microchip,lan96455s-switch"; + pinctrl-0 = <&lan9645x_leds>; + pinctrl-names = "default"; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan0"; + phy-mode = "gmii"; + phy-handle = <&cuphy0>; + }; + + port@1 { + reg = <1>; + label = "lan1"; + phy-mode = "gmii"; + phy-handle = <&cuphy1>; + microchip,led-drive-mode = <3>; + }; + + port@2 { + reg = <2>; + label = "lan2"; + phy-mode = "gmii"; + phy-handle = <&cuphy2>; + }; + + port@3 { + reg = <3>; + label = "lan3"; + phy-mode = "gmii"; + phy-handle = <&cuphy3>; + }; + + port@7 { + reg = <7>; + label = "lan7"; + phy-mode = "rgmii-id"; + ethernet = <&cpu_host_port>; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + }; + }; + +... diff --git a/MAINTAINERS b/MAINTAINERS index 2712aaf7cedd..ab92b342877b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17291,6 +17291,7 @@ M: Jens Emil Schulz Østergaard M: UNGLinuxDriver@microchip.com L: netdev@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/net/dsa/microchip,lan9645x-switch.yaml F: include/linux/dsa/lan9645x.h F: net/dsa/tag_lan9645x.c -- 2.52.0 Add autogenerated register macros and update MAINTAINERS file. The register macros are generated using the same tool we use for lan966x, sparx5 and lan969x. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- MAINTAINERS | 1 + drivers/net/dsa/microchip/lan9645x/lan9645x_regs.h | 1851 ++++++++++++++++++++ 2 files changed, 1852 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index ab92b342877b..dca10e1da477 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -17292,6 +17292,7 @@ M: UNGLinuxDriver@microchip.com L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/dsa/microchip,lan9645x-switch.yaml +F: drivers/net/dsa/microchip/lan9645x/* F: include/linux/dsa/lan9645x.h F: net/dsa/tag_lan9645x.c diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_regs.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_regs.h new file mode 100644 index 000000000000..8aebb9d9ed21 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_regs.h @@ -0,0 +1,1851 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (c) 2026 Microchip Technology Inc. + */ + +/* This file is autogenerated by cml-utils 2026-02-26 15:31:52 +0100. + * Commit ID: 62bbe3a023ed1624baf17f6a8709dac1101def9f + */ + +#ifndef _LAN9645X_REGS_H_ +#define _LAN9645X_REGS_H_ + +#include +#include +#include + +enum lan9645x_target { + TARGET_AFI = 0, + TARGET_ANA = 1, + TARGET_CHIP_TOP = 2, + TARGET_DEV = 5, + TARGET_GCB = 16, + TARGET_HSIO = 17, + TARGET_QS = 26, + TARGET_QSYS = 27, + TARGET_REW = 28, + TARGET_SYS = 29, + NUM_TARGETS = 38 +}; + +#define __REG(...) __VA_ARGS__ + +/* AFI:PORT_TBL:PORT_FRM_OUT */ +#define AFI_PORT_FRM_OUT(g) __REG(TARGET_AFI,\ + 0, 1, 6272, g, 11, 8, 0, 0, 1, 4) + +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT GENMASK(26, 16) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_SET(x)\ + FIELD_PREP(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) +#define AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(x)\ + FIELD_GET(AFI_PORT_FRM_OUT_FRM_OUT_CNT, x) + +/* AFI:PORT_TBL:PORT_CFG */ +#define AFI_PORT_CFG(g) __REG(TARGET_AFI,\ + 0, 1, 6272, g, 11, 8, 4, 0, 1, 4) + +#define AFI_PORT_CFG_FRM_OUT_MAX GENMASK(9, 0) +#define AFI_PORT_CFG_FRM_OUT_MAX_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FRM_OUT_MAX, x) +#define AFI_PORT_CFG_FRM_OUT_MAX_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FRM_OUT_MAX, x) + +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ BIT(16) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(x)\ + FIELD_PREP(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) +#define AFI_PORT_CFG_FC_SKIP_TTI_INJ_GET(x)\ + FIELD_GET(AFI_PORT_CFG_FC_SKIP_TTI_INJ, x) + +/* ANA:ANA:ADVLEARN */ +#define ANA_ADVLEARN __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 0, 0, 1, 4) + +#define ANA_ADVLEARN_VLAN_CHK BIT(0) +#define ANA_ADVLEARN_VLAN_CHK_SET(x)\ + FIELD_PREP(ANA_ADVLEARN_VLAN_CHK, x) +#define ANA_ADVLEARN_VLAN_CHK_GET(x)\ + FIELD_GET(ANA_ADVLEARN_VLAN_CHK, x) + +/* ANA:ANA:VLANMASK */ +#define ANA_VLANMASK __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 8, 0, 1, 4) + +#define ANA_VLANMASK_VLANMASK GENMASK(9, 0) +#define ANA_VLANMASK_VLANMASK_SET(x)\ + FIELD_PREP(ANA_VLANMASK_VLANMASK, x) +#define ANA_VLANMASK_VLANMASK_GET(x)\ + FIELD_GET(ANA_VLANMASK_VLANMASK, x) + +/* ANA:ANA:ANAGEFIL */ +#define ANA_ANAGEFIL __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 12, 0, 1, 4) + +#define ANA_ANAGEFIL_AGE_LOCKED BIT(20) +#define ANA_ANAGEFIL_AGE_LOCKED_SET(x)\ + FIELD_PREP(ANA_ANAGEFIL_AGE_LOCKED, x) +#define ANA_ANAGEFIL_AGE_LOCKED_GET(x)\ + FIELD_GET(ANA_ANAGEFIL_AGE_LOCKED, x) + +#define ANA_ANAGEFIL_PID_EN BIT(19) +#define ANA_ANAGEFIL_PID_EN_SET(x)\ + FIELD_PREP(ANA_ANAGEFIL_PID_EN, x) +#define ANA_ANAGEFIL_PID_EN_GET(x)\ + FIELD_GET(ANA_ANAGEFIL_PID_EN, x) + +#define ANA_ANAGEFIL_PID_VAL GENMASK(18, 14) +#define ANA_ANAGEFIL_PID_VAL_SET(x)\ + FIELD_PREP(ANA_ANAGEFIL_PID_VAL, x) +#define ANA_ANAGEFIL_PID_VAL_GET(x)\ + FIELD_GET(ANA_ANAGEFIL_PID_VAL, x) + +#define ANA_ANAGEFIL_VID_EN BIT(13) +#define ANA_ANAGEFIL_VID_EN_SET(x)\ + FIELD_PREP(ANA_ANAGEFIL_VID_EN, x) +#define ANA_ANAGEFIL_VID_EN_GET(x)\ + FIELD_GET(ANA_ANAGEFIL_VID_EN, x) + +#define ANA_ANAGEFIL_VID_VAL GENMASK(12, 0) +#define ANA_ANAGEFIL_VID_VAL_SET(x)\ + FIELD_PREP(ANA_ANAGEFIL_VID_VAL, x) +#define ANA_ANAGEFIL_VID_VAL_GET(x)\ + FIELD_GET(ANA_ANAGEFIL_VID_VAL, x) + +/* ANA:ANA:AUTOAGE */ +#define ANA_AUTOAGE __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 44, 0, 1, 4) + +#define ANA_AUTOAGE_AGE_FAST BIT(21) +#define ANA_AUTOAGE_AGE_FAST_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AGE_FAST, x) +#define ANA_AUTOAGE_AGE_FAST_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AGE_FAST, x) + +#define ANA_AUTOAGE_AGE_PERIOD GENMASK(20, 1) +#define ANA_AUTOAGE_AGE_PERIOD_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AGE_PERIOD, x) +#define ANA_AUTOAGE_AGE_PERIOD_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AGE_PERIOD, x) + +#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0) +#define ANA_AUTOAGE_AUTOAGE_LOCKED_SET(x)\ + FIELD_PREP(ANA_AUTOAGE_AUTOAGE_LOCKED, x) +#define ANA_AUTOAGE_AUTOAGE_LOCKED_GET(x)\ + FIELD_GET(ANA_AUTOAGE_AUTOAGE_LOCKED, x) + +/* ANA:ANA:FLOODING */ +#define ANA_FLOODING(r) __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 68, r, 8, 4) + +#define ANA_FLOODING_FLD_UNICAST GENMASK(17, 12) +#define ANA_FLOODING_FLD_UNICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_UNICAST, x) +#define ANA_FLOODING_FLD_UNICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_UNICAST, x) + +#define ANA_FLOODING_FLD_BROADCAST GENMASK(11, 6) +#define ANA_FLOODING_FLD_BROADCAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_BROADCAST, x) +#define ANA_FLOODING_FLD_BROADCAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_BROADCAST, x) + +#define ANA_FLOODING_FLD_MULTICAST GENMASK(5, 0) +#define ANA_FLOODING_FLD_MULTICAST_SET(x)\ + FIELD_PREP(ANA_FLOODING_FLD_MULTICAST, x) +#define ANA_FLOODING_FLD_MULTICAST_GET(x)\ + FIELD_GET(ANA_FLOODING_FLD_MULTICAST, x) + +/* ANA:ANA:FLOODING_IPMC */ +#define ANA_FLOODING_IPMC __REG(TARGET_ANA,\ + 0, 1, 27136, 0, 1, 284, 100, 0, 1, 4) + +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL GENMASK(23, 18) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC4_DATA GENMASK(17, 12) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC4_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC4_DATA, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL GENMASK(11, 6) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) +#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_CTRL, x) + +#define ANA_FLOODING_IPMC_FLD_MC6_DATA GENMASK(5, 0) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(x)\ + FIELD_PREP(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) +#define ANA_FLOODING_IPMC_FLD_MC6_DATA_GET(x)\ + FIELD_GET(ANA_FLOODING_IPMC_FLD_MC6_DATA, x) + +/* ANA:PGID:PGID */ +#define ANA_PGID(g) __REG(TARGET_ANA,\ + 0, 1, 27648, g, 90, 8, 0, 0, 1, 4) + +#define ANA_PGID_PGID GENMASK(9, 0) +#define ANA_PGID_PGID_SET(x)\ + FIELD_PREP(ANA_PGID_PGID, x) +#define ANA_PGID_PGID_GET(x)\ + FIELD_GET(ANA_PGID_PGID, x) + +/* ANA:PGID:PGID_CFG */ +#define ANA_PGID_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 27648, g, 90, 8, 4, 0, 1, 4) + +#define ANA_PGID_CFG_SAN_ENA BIT(4) +#define ANA_PGID_CFG_SAN_ENA_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_SAN_ENA, x) +#define ANA_PGID_CFG_SAN_ENA_GET(x)\ + FIELD_GET(ANA_PGID_CFG_SAN_ENA, x) + +#define ANA_PGID_CFG_CPUQ_DST_PGID GENMASK(3, 1) +#define ANA_PGID_CFG_CPUQ_DST_PGID_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_CPUQ_DST_PGID, x) +#define ANA_PGID_CFG_CPUQ_DST_PGID_GET(x)\ + FIELD_GET(ANA_PGID_CFG_CPUQ_DST_PGID, x) + +#define ANA_PGID_CFG_OBEY_VLAN BIT(0) +#define ANA_PGID_CFG_OBEY_VLAN_SET(x)\ + FIELD_PREP(ANA_PGID_CFG_OBEY_VLAN, x) +#define ANA_PGID_CFG_OBEY_VLAN_GET(x)\ + FIELD_GET(ANA_PGID_CFG_OBEY_VLAN, x) + +/* ANA:ANA_TABLES:MACHDATA */ +#define ANA_MACHDATA __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 44, 0, 1, 4) + +#define ANA_MACHDATA_VID GENMASK(28, 16) +#define ANA_MACHDATA_VID_SET(x)\ + FIELD_PREP(ANA_MACHDATA_VID, x) +#define ANA_MACHDATA_VID_GET(x)\ + FIELD_GET(ANA_MACHDATA_VID, x) + +#define ANA_MACHDATA_MACHDATA GENMASK(15, 0) +#define ANA_MACHDATA_MACHDATA_SET(x)\ + FIELD_PREP(ANA_MACHDATA_MACHDATA, x) +#define ANA_MACHDATA_MACHDATA_GET(x)\ + FIELD_GET(ANA_MACHDATA_MACHDATA, x) + +/* ANA:ANA_TABLES:MACLDATA */ +#define ANA_MACLDATA __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 48, 0, 1, 4) + +/* ANA:ANA_TABLES:MACACCESS */ +#define ANA_MACACCESS __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 52, 0, 1, 4) + +#define ANA_MACACCESS_CHANGE2SW BIT(17) +#define ANA_MACACCESS_CHANGE2SW_SET(x)\ + FIELD_PREP(ANA_MACACCESS_CHANGE2SW, x) +#define ANA_MACACCESS_CHANGE2SW_GET(x)\ + FIELD_GET(ANA_MACACCESS_CHANGE2SW, x) + +#define ANA_MACACCESS_MAC_CPU_COPY BIT(16) +#define ANA_MACACCESS_MAC_CPU_COPY_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_CPU_COPY, x) +#define ANA_MACACCESS_MAC_CPU_COPY_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_CPU_COPY, x) + +#define ANA_MACACCESS_SRC_KILL BIT(15) +#define ANA_MACACCESS_SRC_KILL_SET(x)\ + FIELD_PREP(ANA_MACACCESS_SRC_KILL, x) +#define ANA_MACACCESS_SRC_KILL_GET(x)\ + FIELD_GET(ANA_MACACCESS_SRC_KILL, x) + +#define ANA_MACACCESS_IGNORE_VLAN BIT(14) +#define ANA_MACACCESS_IGNORE_VLAN_SET(x)\ + FIELD_PREP(ANA_MACACCESS_IGNORE_VLAN, x) +#define ANA_MACACCESS_IGNORE_VLAN_GET(x)\ + FIELD_GET(ANA_MACACCESS_IGNORE_VLAN, x) + +#define ANA_MACACCESS_AGED_FLAG BIT(13) +#define ANA_MACACCESS_AGED_FLAG_SET(x)\ + FIELD_PREP(ANA_MACACCESS_AGED_FLAG, x) +#define ANA_MACACCESS_AGED_FLAG_GET(x)\ + FIELD_GET(ANA_MACACCESS_AGED_FLAG, x) + +#define ANA_MACACCESS_VALID BIT(12) +#define ANA_MACACCESS_VALID_SET(x)\ + FIELD_PREP(ANA_MACACCESS_VALID, x) +#define ANA_MACACCESS_VALID_GET(x)\ + FIELD_GET(ANA_MACACCESS_VALID, x) + +#define ANA_MACACCESS_ENTRYTYPE GENMASK(11, 10) +#define ANA_MACACCESS_ENTRYTYPE_SET(x)\ + FIELD_PREP(ANA_MACACCESS_ENTRYTYPE, x) +#define ANA_MACACCESS_ENTRYTYPE_GET(x)\ + FIELD_GET(ANA_MACACCESS_ENTRYTYPE, x) + +#define ANA_MACACCESS_DEST_IDX GENMASK(9, 4) +#define ANA_MACACCESS_DEST_IDX_SET(x)\ + FIELD_PREP(ANA_MACACCESS_DEST_IDX, x) +#define ANA_MACACCESS_DEST_IDX_GET(x)\ + FIELD_GET(ANA_MACACCESS_DEST_IDX, x) + +#define ANA_MACACCESS_MAC_TABLE_CMD GENMASK(3, 0) +#define ANA_MACACCESS_MAC_TABLE_CMD_SET(x)\ + FIELD_PREP(ANA_MACACCESS_MAC_TABLE_CMD, x) +#define ANA_MACACCESS_MAC_TABLE_CMD_GET(x)\ + FIELD_GET(ANA_MACACCESS_MAC_TABLE_CMD, x) + +/* ANA:ANA_TABLES:MACTINDX */ +#define ANA_MACTINDX __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 56, 0, 1, 4) + +#define ANA_MACTINDX_BUCKET GENMASK(12, 11) +#define ANA_MACTINDX_BUCKET_SET(x)\ + FIELD_PREP(ANA_MACTINDX_BUCKET, x) +#define ANA_MACTINDX_BUCKET_GET(x)\ + FIELD_GET(ANA_MACTINDX_BUCKET, x) + +#define ANA_MACTINDX_M_INDEX GENMASK(10, 0) +#define ANA_MACTINDX_M_INDEX_SET(x)\ + FIELD_PREP(ANA_MACTINDX_M_INDEX, x) +#define ANA_MACTINDX_M_INDEX_GET(x)\ + FIELD_GET(ANA_MACTINDX_M_INDEX, x) + +/* ANA:ANA_TABLES:VLAN_PORT_MASK */ +#define ANA_VLAN_PORT_MASK __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 60, 0, 1, 4) + +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK GENMASK(9, 0) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_SET(x)\ + FIELD_PREP(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) +#define ANA_VLAN_PORT_MASK_VLAN_PORT_MASK_GET(x)\ + FIELD_GET(ANA_VLAN_PORT_MASK_VLAN_PORT_MASK, x) + +/* ANA:ANA_TABLES:VLANACCESS */ +#define ANA_VLANACCESS __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 64, 0, 1, 4) + +#define ANA_VLANACCESS_VLAN_TBL_CMD GENMASK(1, 0) +#define ANA_VLANACCESS_VLAN_TBL_CMD_SET(x)\ + FIELD_PREP(ANA_VLANACCESS_VLAN_TBL_CMD, x) +#define ANA_VLANACCESS_VLAN_TBL_CMD_GET(x)\ + FIELD_GET(ANA_VLANACCESS_VLAN_TBL_CMD, x) + +/* ANA:ANA_TABLES:VLANTIDX */ +#define ANA_VLANTIDX __REG(TARGET_ANA,\ + 0, 1, 23680, 0, 1, 128, 68, 0, 1, 4) + +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS BIT(18) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) +#define ANA_VLANTIDX_VLAN_PGID_CPU_DIS_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_PGID_CPU_DIS, x) + +#define ANA_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17) +#define ANA_VLANTIDX_VLAN_SEC_FWD_ENA_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_SEC_FWD_ENA, x) +#define ANA_VLANTIDX_VLAN_SEC_FWD_ENA_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_SEC_FWD_ENA, x) + +#define ANA_VLANTIDX_VLAN_FLOOD_DIS BIT(16) +#define ANA_VLANTIDX_VLAN_FLOOD_DIS_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_FLOOD_DIS, x) +#define ANA_VLANTIDX_VLAN_FLOOD_DIS_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_FLOOD_DIS, x) + +#define ANA_VLANTIDX_VLAN_PRIV_VLAN BIT(15) +#define ANA_VLANTIDX_VLAN_PRIV_VLAN_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_PRIV_VLAN, x) +#define ANA_VLANTIDX_VLAN_PRIV_VLAN_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_PRIV_VLAN, x) + +#define ANA_VLANTIDX_VLAN_LEARN_DISABLED BIT(14) +#define ANA_VLANTIDX_VLAN_LEARN_DISABLED_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_LEARN_DISABLED, x) +#define ANA_VLANTIDX_VLAN_LEARN_DISABLED_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_LEARN_DISABLED, x) + +#define ANA_VLANTIDX_VLAN_MIRROR BIT(13) +#define ANA_VLANTIDX_VLAN_MIRROR_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_MIRROR, x) +#define ANA_VLANTIDX_VLAN_MIRROR_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_MIRROR, x) + +#define ANA_VLANTIDX_VLAN_SRC_CHK BIT(12) +#define ANA_VLANTIDX_VLAN_SRC_CHK_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_VLAN_SRC_CHK, x) +#define ANA_VLANTIDX_VLAN_SRC_CHK_GET(x)\ + FIELD_GET(ANA_VLANTIDX_VLAN_SRC_CHK, x) + +#define ANA_VLANTIDX_V_INDEX GENMASK(11, 0) +#define ANA_VLANTIDX_V_INDEX_SET(x)\ + FIELD_PREP(ANA_VLANTIDX_V_INDEX, x) +#define ANA_VLANTIDX_V_INDEX_GET(x)\ + FIELD_GET(ANA_VLANTIDX_V_INDEX, x) + +/* ANA:PORT:VLAN_CFG */ +#define ANA_VLAN_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 24576, g, 10, 256, 0, 0, 1, 4) + +#define ANA_VLAN_CFG_VLAN_PFC_ENA BIT(21) +#define ANA_VLAN_CFG_VLAN_PFC_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_PFC_ENA, x) +#define ANA_VLAN_CFG_VLAN_PFC_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_PFC_ENA, x) + +#define ANA_VLAN_CFG_VLAN_AWARE_ENA BIT(20) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) +#define ANA_VLAN_CFG_VLAN_AWARE_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_AWARE_ENA, x) + +#define ANA_VLAN_CFG_VLAN_POP_CNT GENMASK(19, 18) +#define ANA_VLAN_CFG_VLAN_POP_CNT_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_POP_CNT, x) +#define ANA_VLAN_CFG_VLAN_POP_CNT_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_POP_CNT, x) + +#define ANA_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17) +#define ANA_VLAN_CFG_VLAN_INNER_TAG_ENA_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_INNER_TAG_ENA, x) +#define ANA_VLAN_CFG_VLAN_INNER_TAG_ENA_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_INNER_TAG_ENA, x) + +#define ANA_VLAN_CFG_VLAN_TAG_TYPE BIT(16) +#define ANA_VLAN_CFG_VLAN_TAG_TYPE_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_TAG_TYPE, x) +#define ANA_VLAN_CFG_VLAN_TAG_TYPE_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_TAG_TYPE, x) + +#define ANA_VLAN_CFG_VLAN_PCP GENMASK(15, 13) +#define ANA_VLAN_CFG_VLAN_PCP_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_PCP, x) +#define ANA_VLAN_CFG_VLAN_PCP_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_PCP, x) + +#define ANA_VLAN_CFG_VLAN_DEI BIT(12) +#define ANA_VLAN_CFG_VLAN_DEI_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_DEI, x) +#define ANA_VLAN_CFG_VLAN_DEI_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_DEI, x) + +#define ANA_VLAN_CFG_VLAN_VID GENMASK(11, 0) +#define ANA_VLAN_CFG_VLAN_VID_SET(x)\ + FIELD_PREP(ANA_VLAN_CFG_VLAN_VID, x) +#define ANA_VLAN_CFG_VLAN_VID_GET(x)\ + FIELD_GET(ANA_VLAN_CFG_VLAN_VID, x) + +/* ANA:PORT:DROP_CFG */ +#define ANA_DROP_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 24576, g, 10, 256, 4, 0, 1, 4) + +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA BIT(6) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_UNTAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_UNTAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_S_TAGGED_ENA BIT(5) +#define ANA_DROP_CFG_DROP_S_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_S_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_S_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_S_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_C_TAGGED_ENA BIT(4) +#define ANA_DROP_CFG_DROP_C_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_C_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_C_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_C_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) +#define ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, x) + +#define ANA_DROP_CFG_DROP_NULL_MAC_ENA BIT(1) +#define ANA_DROP_CFG_DROP_NULL_MAC_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_NULL_MAC_ENA, x) +#define ANA_DROP_CFG_DROP_NULL_MAC_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_NULL_MAC_ENA, x) + +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA BIT(0) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) +#define ANA_DROP_CFG_DROP_MC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_DROP_CFG_DROP_MC_SMAC_ENA, x) + +/* ANA:PORT:CPU_FWD_CFG */ +#define ANA_CPU_FWD_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 24576, g, 10, 256, 96, 0, 1, 4) + +#define ANA_CPU_FWD_CFG_NO_HSR_REDIR_ENA BIT(9) +#define ANA_CPU_FWD_CFG_NO_HSR_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_NO_HSR_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_NO_HSR_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_NO_HSR_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_SPV_COPY_ENA BIT(8) +#define ANA_CPU_FWD_CFG_SPV_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_SPV_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_SPV_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_SPV_COPY_ENA, x) + +#define ANA_CPU_FWD_CFG_VRAP_REDIR_ENA BIT(7) +#define ANA_CPU_FWD_CFG_VRAP_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_VRAP_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_VRAP_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_VRAP_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA BIT(6) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_MLD_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_MLD_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA BIT(5) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_IGMP_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IGMP_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA BIT(4) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_IPMC_CTRL_COPY_ENA, x) + +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA BIT(3) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) +#define ANA_CPU_FWD_CFG_SRC_COPY_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_SRC_COPY_ENA, x) + +#define ANA_CPU_FWD_CFG_ALLBRIDGE_DROP_ENA BIT(2) +#define ANA_CPU_FWD_CFG_ALLBRIDGE_DROP_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_ALLBRIDGE_DROP_ENA, x) +#define ANA_CPU_FWD_CFG_ALLBRIDGE_DROP_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_ALLBRIDGE_DROP_ENA, x) + +#define ANA_CPU_FWD_CFG_ALLBRIDGE_REDIR_ENA BIT(1) +#define ANA_CPU_FWD_CFG_ALLBRIDGE_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_ALLBRIDGE_REDIR_ENA, x) +#define ANA_CPU_FWD_CFG_ALLBRIDGE_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_ALLBRIDGE_REDIR_ENA, x) + +#define ANA_CPU_FWD_CFG_OAM_ENA BIT(0) +#define ANA_CPU_FWD_CFG_OAM_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_CFG_OAM_ENA, x) +#define ANA_CPU_FWD_CFG_OAM_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_CFG_OAM_ENA, x) + +/* ANA:PORT:CPU_FWD_BPDU_CFG */ +#define ANA_CPU_FWD_BPDU_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 24576, g, 10, 256, 100, 0, 1, 4) + +#define ANA_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA GENMASK(31, 16) +#define ANA_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA, x) +#define ANA_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA, x) + +#define ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA GENMASK(15, 0) +#define ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_SET(x)\ + FIELD_PREP(ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA, x) +#define ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_GET(x)\ + FIELD_GET(ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA, x) + +/* ANA:PORT:PORT_CFG */ +#define ANA_PORT_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 24576, g, 10, 256, 112, 0, 1, 4) + +#define ANA_PORT_CFG_SRC_MIRROR_ENA BIT(13) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_SRC_MIRROR_ENA, x) +#define ANA_PORT_CFG_SRC_MIRROR_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_SRC_MIRROR_ENA, x) + +#define ANA_PORT_CFG_LIMIT_DROP BIT(12) +#define ANA_PORT_CFG_LIMIT_DROP_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LIMIT_DROP, x) +#define ANA_PORT_CFG_LIMIT_DROP_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LIMIT_DROP, x) + +#define ANA_PORT_CFG_LIMIT_CPU BIT(11) +#define ANA_PORT_CFG_LIMIT_CPU_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LIMIT_CPU, x) +#define ANA_PORT_CFG_LIMIT_CPU_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LIMIT_CPU, x) + +#define ANA_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(10) +#define ANA_PORT_CFG_LOCKED_PORTMOVE_DROP_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LOCKED_PORTMOVE_DROP, x) +#define ANA_PORT_CFG_LOCKED_PORTMOVE_DROP_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LOCKED_PORTMOVE_DROP, x) + +#define ANA_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(9) +#define ANA_PORT_CFG_LOCKED_PORTMOVE_CPU_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LOCKED_PORTMOVE_CPU, x) +#define ANA_PORT_CFG_LOCKED_PORTMOVE_CPU_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LOCKED_PORTMOVE_CPU, x) + +#define ANA_PORT_CFG_LEARNDROP BIT(8) +#define ANA_PORT_CFG_LEARNDROP_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNDROP, x) +#define ANA_PORT_CFG_LEARNDROP_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNDROP, x) + +#define ANA_PORT_CFG_LEARNCPU BIT(7) +#define ANA_PORT_CFG_LEARNCPU_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNCPU, x) +#define ANA_PORT_CFG_LEARNCPU_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNCPU, x) + +#define ANA_PORT_CFG_LEARNAUTO BIT(6) +#define ANA_PORT_CFG_LEARNAUTO_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARNAUTO, x) +#define ANA_PORT_CFG_LEARNAUTO_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARNAUTO, x) + +#define ANA_PORT_CFG_LEARN_ENA BIT(5) +#define ANA_PORT_CFG_LEARN_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_LEARN_ENA, x) +#define ANA_PORT_CFG_LEARN_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_LEARN_ENA, x) + +#define ANA_PORT_CFG_RECV_ENA BIT(4) +#define ANA_PORT_CFG_RECV_ENA_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_RECV_ENA, x) +#define ANA_PORT_CFG_RECV_ENA_GET(x)\ + FIELD_GET(ANA_PORT_CFG_RECV_ENA, x) + +#define ANA_PORT_CFG_PORTID_VAL GENMASK(3, 0) +#define ANA_PORT_CFG_PORTID_VAL_SET(x)\ + FIELD_PREP(ANA_PORT_CFG_PORTID_VAL, x) +#define ANA_PORT_CFG_PORTID_VAL_GET(x)\ + FIELD_GET(ANA_PORT_CFG_PORTID_VAL, x) + +/* ANA:PFC:PFC_CFG */ +#define ANA_PFC_CFG(g) __REG(TARGET_ANA,\ + 0, 1, 28672, g, 9, 64, 0, 0, 1, 4) + +#define ANA_PFC_CFG_RX_PFC_ENA GENMASK(9, 2) +#define ANA_PFC_CFG_RX_PFC_ENA_SET(x)\ + FIELD_PREP(ANA_PFC_CFG_RX_PFC_ENA, x) +#define ANA_PFC_CFG_RX_PFC_ENA_GET(x)\ + FIELD_GET(ANA_PFC_CFG_RX_PFC_ENA, x) + +#define ANA_PFC_CFG_FC_LINK_SPEED GENMASK(1, 0) +#define ANA_PFC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(ANA_PFC_CFG_FC_LINK_SPEED, x) +#define ANA_PFC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(ANA_PFC_CFG_FC_LINK_SPEED, x) + +/* ANA:COMMON:AGGR_CFG */ +#define ANA_AGGR_CFG __REG(TARGET_ANA,\ + 0, 1, 29248, 0, 1, 552, 0, 0, 1, 4) + +#define ANA_AGGR_CFG_AC_RND_ENA BIT(6) +#define ANA_AGGR_CFG_AC_RND_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_RND_ENA, x) +#define ANA_AGGR_CFG_AC_RND_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_RND_ENA, x) + +#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(5) +#define ANA_AGGR_CFG_AC_DMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_DMAC_ENA, x) +#define ANA_AGGR_CFG_AC_DMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_DMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(4) +#define ANA_AGGR_CFG_AC_SMAC_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_SMAC_ENA, x) +#define ANA_AGGR_CFG_AC_SMAC_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_SMAC_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(3) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA, x) + +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(2) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(1) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA, x) + +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(0) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(x)\ + FIELD_PREP(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) +#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_GET(x)\ + FIELD_GET(ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, x) + +/* CHIP_TOP:CUPHY_CFG:CUPHY_PORT_CFG */ +#define CHIP_TOP_CUPHY_PORT_CFG(r) __REG(TARGET_CHIP_TOP,\ + 0, 1, 12, 0, 1, 64, 20, r, 5, 4) + +#define CHIP_TOP_CUPHY_PORT_CFG_AUTO_SQUELCH_ENA BIT(7) +#define CHIP_TOP_CUPHY_PORT_CFG_AUTO_SQUELCH_ENA_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_AUTO_SQUELCH_ENA, x) +#define CHIP_TOP_CUPHY_PORT_CFG_AUTO_SQUELCH_ENA_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_AUTO_SQUELCH_ENA, x) + +#define CHIP_TOP_CUPHY_PORT_CFG_COMA_MODE BIT(6) +#define CHIP_TOP_CUPHY_PORT_CFG_COMA_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_COMA_MODE, x) +#define CHIP_TOP_CUPHY_PORT_CFG_COMA_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_COMA_MODE, x) + +#define CHIP_TOP_CUPHY_PORT_CFG_MODE GENMASK(5, 1) +#define CHIP_TOP_CUPHY_PORT_CFG_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_MODE, x) +#define CHIP_TOP_CUPHY_PORT_CFG_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_MODE, x) + +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA BIT(0) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) +#define CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, x) + +/* CHIP_TOP:CUPHY_CFG:CUPHY_LED_CFG */ +#define CHIP_TOP_CUPHY_LED_CFG(r) __REG(TARGET_CHIP_TOP,\ + 0, 1, 12, 0, 1, 64, 40, r, 5, 4) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_ECO_DIS BIT(11) +#define CHIP_TOP_CUPHY_LED_CFG_LED_ECO_DIS_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_ECO_DIS, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_ECO_DIS_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_ECO_DIS, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_EEE_MODE BIT(10) +#define CHIP_TOP_CUPHY_LED_CFG_LED_EEE_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_EEE_MODE, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_EEE_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_EEE_MODE, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_MODE GENMASK(9, 8) +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_TEST_MODE, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_TEST_MODE, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_VAL GENMASK(7, 6) +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_VAL_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_TEST_VAL, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_TEST_VAL_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_TEST_VAL, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_POLARITY GENMASK(5, 4) +#define CHIP_TOP_CUPHY_LED_CFG_LED_POLARITY_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_POLARITY, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_POLARITY_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_POLARITY, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE GENMASK(3, 2) +#define CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE, x) + +#define CHIP_TOP_CUPHY_LED_CFG_LED_BLINK_MODE GENMASK(1, 0) +#define CHIP_TOP_CUPHY_LED_CFG_LED_BLINK_MODE_SET(x)\ + FIELD_PREP(CHIP_TOP_CUPHY_LED_CFG_LED_BLINK_MODE, x) +#define CHIP_TOP_CUPHY_LED_CFG_LED_BLINK_MODE_GET(x)\ + FIELD_GET(CHIP_TOP_CUPHY_LED_CFG_LED_BLINK_MODE, x) + +/* DEV:PORT_MODE:CLOCK_CFG */ +#define DEV_CLOCK_CFG(t) __REG(TARGET_DEV,\ + t, 9, 0, 0, 1, 20, 0, 0, 1, 4) + +#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) +#define DEV_CLOCK_CFG_MAC_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_TX_RST, x) +#define DEV_CLOCK_CFG_MAC_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_TX_RST, x) + +#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) +#define DEV_CLOCK_CFG_MAC_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_MAC_RX_RST, x) +#define DEV_CLOCK_CFG_MAC_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_MAC_RX_RST, x) + +#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) +#define DEV_CLOCK_CFG_PCS_TX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_TX_RST, x) +#define DEV_CLOCK_CFG_PCS_TX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_TX_RST, x) + +#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4) +#define DEV_CLOCK_CFG_PCS_RX_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PCS_RX_RST, x) +#define DEV_CLOCK_CFG_PCS_RX_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PCS_RX_RST, x) + +#define DEV_CLOCK_CFG_PORT_RST BIT(3) +#define DEV_CLOCK_CFG_PORT_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PORT_RST, x) +#define DEV_CLOCK_CFG_PORT_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PORT_RST, x) + +#define DEV_CLOCK_CFG_PHY_RST BIT(2) +#define DEV_CLOCK_CFG_PHY_RST_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_PHY_RST, x) +#define DEV_CLOCK_CFG_PHY_RST_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_PHY_RST, x) + +#define DEV_CLOCK_CFG_LINK_SPEED GENMASK(1, 0) +#define DEV_CLOCK_CFG_LINK_SPEED_SET(x)\ + FIELD_PREP(DEV_CLOCK_CFG_LINK_SPEED, x) +#define DEV_CLOCK_CFG_LINK_SPEED_GET(x)\ + FIELD_GET(DEV_CLOCK_CFG_LINK_SPEED, x) + +/* DEV:MAC_CFG_STATUS:MAC_ENA_CFG */ +#define DEV_MAC_ENA_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 0, 0, 1, 4) + +#define DEV_MAC_ENA_CFG_RX_ENA BIT(4) +#define DEV_MAC_ENA_CFG_RX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_RX_ENA, x) +#define DEV_MAC_ENA_CFG_RX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_RX_ENA, x) + +#define DEV_MAC_ENA_CFG_TX_ENA BIT(0) +#define DEV_MAC_ENA_CFG_TX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_ENA_CFG_TX_ENA, x) +#define DEV_MAC_ENA_CFG_TX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_ENA_CFG_TX_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MODE_CFG */ +#define DEV_MAC_MODE_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 4, 0, 1, 4) + +#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) +#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) +#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA, x) + +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) +#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_GIGA_MODE_ENA, x) + +#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0) +#define DEV_MAC_MODE_CFG_FDX_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_MODE_CFG_FDX_ENA, x) +#define DEV_MAC_MODE_CFG_FDX_ENA_GET(x)\ + FIELD_GET(DEV_MAC_MODE_CFG_FDX_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_MAXLEN_CFG */ +#define DEV_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 8, 0, 1, 4) + +#define DEV_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\ + FIELD_PREP(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +#define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ + FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) + +/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 12, 0, 1, 4) + +#define DEV_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16) +#define DEV_MAC_TAGS_CFG_TAG_ID_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_TAG_ID, x) +#define DEV_MAC_TAGS_CFG_TAG_ID_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_TAG_ID, x) + +#define DEV_MAC_TAGS_CFG_PB_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_PB_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_PB_ENA, x) +#define DEV_MAC_TAGS_CFG_PB_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_PB_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2) +#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA, x) + +/* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ +#define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 20, 0, 1, 4) + +#define DEV_MAC_IFG_CFG_OLD_IPG_CHECK BIT(17) +#define DEV_MAC_IFG_CFG_OLD_IPG_CHECK_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_OLD_IPG_CHECK, x) +#define DEV_MAC_IFG_CFG_OLD_IPG_CHECK_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_OLD_IPG_CHECK, x) + +#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) +#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_REDUCED_TX_IFG, x) +#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_REDUCED_TX_IFG, x) + +#define DEV_MAC_IFG_CFG_TX_IFG GENMASK(12, 8) +#define DEV_MAC_IFG_CFG_TX_IFG_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_TX_IFG, x) +#define DEV_MAC_IFG_CFG_TX_IFG_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_TX_IFG, x) + +#define DEV_MAC_IFG_CFG_RX_IFG2 GENMASK(7, 4) +#define DEV_MAC_IFG_CFG_RX_IFG2_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG2, x) +#define DEV_MAC_IFG_CFG_RX_IFG2_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG2, x) + +#define DEV_MAC_IFG_CFG_RX_IFG1 GENMASK(3, 0) +#define DEV_MAC_IFG_CFG_RX_IFG1_SET(x)\ + FIELD_PREP(DEV_MAC_IFG_CFG_RX_IFG1, x) +#define DEV_MAC_IFG_CFG_RX_IFG1_GET(x)\ + FIELD_GET(DEV_MAC_IFG_CFG_RX_IFG1, x) + +/* DEV:MAC_CFG_STATUS:MAC_HDX_CFG */ +#define DEV_MAC_HDX_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 24, 0, 1, 4) + +#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) +#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_BYPASS_COL_SYNC, x) +#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_BYPASS_COL_SYNC, x) + +#define DEV_MAC_HDX_CFG_OB_ENA BIT(25) +#define DEV_MAC_HDX_CFG_OB_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_OB_ENA, x) +#define DEV_MAC_HDX_CFG_OB_ENA_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_OB_ENA, x) + +#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24) +#define DEV_MAC_HDX_CFG_WEXC_DIS_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_WEXC_DIS, x) +#define DEV_MAC_HDX_CFG_WEXC_DIS_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_WEXC_DIS, x) + +#define DEV_MAC_HDX_CFG_SEED GENMASK(23, 16) +#define DEV_MAC_HDX_CFG_SEED_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED, x) +#define DEV_MAC_HDX_CFG_SEED_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED, x) + +#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12) +#define DEV_MAC_HDX_CFG_SEED_LOAD_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_SEED_LOAD, x) +#define DEV_MAC_HDX_CFG_SEED_LOAD_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_SEED_LOAD, x) + +#define DEV_MAC_HDX_CFG_RETRY_EXC_COL_ENA BIT(8) +#define DEV_MAC_HDX_CFG_RETRY_EXC_COL_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_RETRY_EXC_COL_ENA, x) +#define DEV_MAC_HDX_CFG_RETRY_EXC_COL_ENA_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_RETRY_EXC_COL_ENA, x) + +#define DEV_MAC_HDX_CFG_LATE_COL_POS GENMASK(6, 0) +#define DEV_MAC_HDX_CFG_LATE_COL_POS_SET(x)\ + FIELD_PREP(DEV_MAC_HDX_CFG_LATE_COL_POS, x) +#define DEV_MAC_HDX_CFG_LATE_COL_POS_GET(x)\ + FIELD_GET(DEV_MAC_HDX_CFG_LATE_COL_POS, x) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_LOW_CFG */ +#define DEV_FC_MAC_LOW_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 32, 0, 1, 4) + +#define DEV_FC_MAC_LOW_CFG_MAC_LOW GENMASK(23, 0) +#define DEV_FC_MAC_LOW_CFG_MAC_LOW_SET(x)\ + FIELD_PREP(DEV_FC_MAC_LOW_CFG_MAC_LOW, x) +#define DEV_FC_MAC_LOW_CFG_MAC_LOW_GET(x)\ + FIELD_GET(DEV_FC_MAC_LOW_CFG_MAC_LOW, x) + +/* DEV:MAC_CFG_STATUS:MAC_FC_MAC_HIGH_CFG */ +#define DEV_FC_MAC_HIGH_CFG(t) __REG(TARGET_DEV,\ + t, 9, 20, 0, 1, 44, 36, 0, 1, 4) + +#define DEV_FC_MAC_HIGH_CFG_MAC_HIGH GENMASK(23, 0) +#define DEV_FC_MAC_HIGH_CFG_MAC_HIGH_SET(x)\ + FIELD_PREP(DEV_FC_MAC_HIGH_CFG_MAC_HIGH, x) +#define DEV_FC_MAC_HIGH_CFG_MAC_HIGH_GET(x)\ + FIELD_GET(DEV_FC_MAC_HIGH_CFG_MAC_HIGH, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_CFG */ +#define DEV_PCS1G_CFG(t) __REG(TARGET_DEV,\ + t, 9, 64, 0, 1, 68, 0, 0, 1, 4) + +#define DEV_PCS1G_CFG_LINK_STATUS_TYPE BIT(4) +#define DEV_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_LINK_STATUS_TYPE, x) +#define DEV_PCS1G_CFG_LINK_STATUS_TYPE_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_LINK_STATUS_TYPE, x) + +#define DEV_PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) +#define DEV_PCS1G_CFG_AN_LINK_CTRL_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_AN_LINK_CTRL_ENA, x) +#define DEV_PCS1G_CFG_AN_LINK_CTRL_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_AN_LINK_CTRL_ENA, x) + +#define DEV_PCS1G_CFG_PCS_ENA BIT(0) +#define DEV_PCS1G_CFG_PCS_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_CFG_PCS_ENA, x) +#define DEV_PCS1G_CFG_PCS_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_CFG_PCS_ENA, x) + +/* DEV:PCS1G_CFG_STATUS:PCS1G_SD_CFG */ +#define DEV_PCS1G_SD_CFG(t) __REG(TARGET_DEV,\ + t, 9, 64, 0, 1, 68, 8, 0, 1, 4) + +#define DEV_PCS1G_SD_CFG_SD_SEL BIT(8) +#define DEV_PCS1G_SD_CFG_SD_SEL_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_SEL, x) +#define DEV_PCS1G_SD_CFG_SD_SEL_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_SEL, x) + +#define DEV_PCS1G_SD_CFG_SD_POL BIT(4) +#define DEV_PCS1G_SD_CFG_SD_POL_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_POL, x) +#define DEV_PCS1G_SD_CFG_SD_POL_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_POL, x) + +#define DEV_PCS1G_SD_CFG_SD_ENA BIT(0) +#define DEV_PCS1G_SD_CFG_SD_ENA_SET(x)\ + FIELD_PREP(DEV_PCS1G_SD_CFG_SD_ENA, x) +#define DEV_PCS1G_SD_CFG_SD_ENA_GET(x)\ + FIELD_GET(DEV_PCS1G_SD_CFG_SD_ENA, x) + +/* DEVCPU_GCB:CHIP_REGS:FEAT_DISABLE */ +#define GCB_FEAT_DISABLE __REG(TARGET_GCB,\ + 0, 1, 0, 0, 1, 28, 20, 0, 1, 4) + +#define GCB_FEAT_DISABLE_FEAT_EEPROM_CFG_DIS BIT(0) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_CFG_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_EEPROM_CFG_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_CFG_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_EEPROM_CFG_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_EEPROM_BOOT_DIS BIT(1) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_BOOT_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_EEPROM_BOOT_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_BOOT_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_EEPROM_BOOT_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_EEPROM_FW_PATCH_DIS BIT(2) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_FW_PATCH_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_EEPROM_FW_PATCH_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_EEPROM_FW_PATCH_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_EEPROM_FW_PATCH_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_MMU_BACKDOOR_WR_DIS BIT(3) +#define GCB_FEAT_DISABLE_FEAT_MMU_BACKDOOR_WR_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_MMU_BACKDOOR_WR_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_MMU_BACKDOOR_WR_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_MMU_BACKDOOR_WR_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_MGMT_IF_WR_DIS BIT(4) +#define GCB_FEAT_DISABLE_FEAT_MGMT_IF_WR_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_MGMT_IF_WR_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_MGMT_IF_WR_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_MGMT_IF_WR_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_DD_DIS BIT(5) +#define GCB_FEAT_DISABLE_FEAT_DD_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_DD_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_DD_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_DD_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_TSN_DIS BIT(6) +#define GCB_FEAT_DISABLE_FEAT_TSN_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_TSN_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_TSN_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_TSN_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_PTP_DIS BIT(7) +#define GCB_FEAT_DISABLE_FEAT_PTP_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_PTP_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_PTP_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_PTP_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_FRER_DIS BIT(8) +#define GCB_FEAT_DISABLE_FEAT_FRER_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_FRER_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_FRER_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_FRER_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS GENMASK(14, 12) +#define GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_DIS GENMASK(16, 15) +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_NUM_CU_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_NUM_CU_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_RGMII_DIS GENMASK(18, 17) +#define GCB_FEAT_DISABLE_FEAT_RGMII_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_RGMII_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_RGMII_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_RGMII_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_SGMII_DIS GENMASK(20, 19) +#define GCB_FEAT_DISABLE_FEAT_SGMII_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_SGMII_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_SGMII_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_SGMII_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_QSGMII_DIS BIT(21) +#define GCB_FEAT_DISABLE_FEAT_QSGMII_DIS_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_QSGMII_DIS, x) +#define GCB_FEAT_DISABLE_FEAT_QSGMII_DIS_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_QSGMII_DIS, x) + +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_FIXED BIT(22) +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_FIXED_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_NUM_CU_FIXED, x) +#define GCB_FEAT_DISABLE_FEAT_NUM_CU_FIXED_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_NUM_CU_FIXED, x) + +#define GCB_FEAT_DISABLE_FEAT_WDT_ENABLED BIT(25) +#define GCB_FEAT_DISABLE_FEAT_WDT_ENABLED_SET(x)\ + FIELD_PREP(GCB_FEAT_DISABLE_FEAT_WDT_ENABLED, x) +#define GCB_FEAT_DISABLE_FEAT_WDT_ENABLED_GET(x)\ + FIELD_GET(GCB_FEAT_DISABLE_FEAT_WDT_ENABLED, x) + +/* HSIO:HW_CFGSTAT:HW_CFG */ +#define HSIO_HW_CFG __REG(TARGET_HSIO,\ + 0, 1, 72, 0, 1, 44, 0, 0, 1, 4) + +#define HSIO_HW_CFG_RGMII_0_CFG BIT(10) +#define HSIO_HW_CFG_RGMII_0_CFG_SET(x)\ + FIELD_PREP(HSIO_HW_CFG_RGMII_0_CFG, x) +#define HSIO_HW_CFG_RGMII_0_CFG_GET(x)\ + FIELD_GET(HSIO_HW_CFG_RGMII_0_CFG, x) + +#define HSIO_HW_CFG_GMII_ENA GENMASK(9, 1) +#define HSIO_HW_CFG_GMII_ENA_SET(x)\ + FIELD_PREP(HSIO_HW_CFG_GMII_ENA, x) +#define HSIO_HW_CFG_GMII_ENA_GET(x)\ + FIELD_GET(HSIO_HW_CFG_GMII_ENA, x) + +#define HSIO_HW_CFG_QSGMII_ENA BIT(0) +#define HSIO_HW_CFG_QSGMII_ENA_SET(x)\ + FIELD_PREP(HSIO_HW_CFG_QSGMII_ENA, x) +#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\ + FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x) + +/* HSIO:HW_CFGSTAT:RGMII_CFG */ +#define HSIO_RGMII_CFG(r) __REG(TARGET_HSIO,\ + 0, 1, 72, 0, 1, 44, 12, r, 2, 4) + +#define HSIO_RGMII_CFG_IB_RX_LINK_STATUS BIT(15) +#define HSIO_RGMII_CFG_IB_RX_LINK_STATUS_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_RX_LINK_STATUS, x) +#define HSIO_RGMII_CFG_IB_RX_LINK_STATUS_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_RX_LINK_STATUS, x) + +#define HSIO_RGMII_CFG_IB_RX_DUPLEX BIT(14) +#define HSIO_RGMII_CFG_IB_RX_DUPLEX_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_RX_DUPLEX, x) +#define HSIO_RGMII_CFG_IB_RX_DUPLEX_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_RX_DUPLEX, x) + +#define HSIO_RGMII_CFG_IB_RX_SPEED GENMASK(13, 12) +#define HSIO_RGMII_CFG_IB_RX_SPEED_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_RX_SPEED, x) +#define HSIO_RGMII_CFG_IB_RX_SPEED_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_RX_SPEED, x) + +#define HSIO_RGMII_CFG_IB_TX_LINK_STATUS BIT(11) +#define HSIO_RGMII_CFG_IB_TX_LINK_STATUS_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_TX_LINK_STATUS, x) +#define HSIO_RGMII_CFG_IB_TX_LINK_STATUS_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_TX_LINK_STATUS, x) + +#define HSIO_RGMII_CFG_IB_TX_FDX BIT(10) +#define HSIO_RGMII_CFG_IB_TX_FDX_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_TX_FDX, x) +#define HSIO_RGMII_CFG_IB_TX_FDX_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_TX_FDX, x) + +#define HSIO_RGMII_CFG_IB_TX_MII_SPD BIT(9) +#define HSIO_RGMII_CFG_IB_TX_MII_SPD_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_TX_MII_SPD, x) +#define HSIO_RGMII_CFG_IB_TX_MII_SPD_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_TX_MII_SPD, x) + +#define HSIO_RGMII_CFG_IB_TX_SPD_1G BIT(8) +#define HSIO_RGMII_CFG_IB_TX_SPD_1G_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_TX_SPD_1G, x) +#define HSIO_RGMII_CFG_IB_TX_SPD_1G_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_TX_SPD_1G, x) + +#define HSIO_RGMII_CFG_IB_TX_ENA BIT(7) +#define HSIO_RGMII_CFG_IB_TX_ENA_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_TX_ENA, x) +#define HSIO_RGMII_CFG_IB_TX_ENA_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_TX_ENA, x) + +#define HSIO_RGMII_CFG_IB_RX_ENA BIT(6) +#define HSIO_RGMII_CFG_IB_RX_ENA_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_RX_ENA, x) +#define HSIO_RGMII_CFG_IB_RX_ENA_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_RX_ENA, x) + +#define HSIO_RGMII_CFG_IB_ENA BIT(5) +#define HSIO_RGMII_CFG_IB_ENA_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_IB_ENA, x) +#define HSIO_RGMII_CFG_IB_ENA_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_IB_ENA, x) + +#define HSIO_RGMII_CFG_TX_CLK_CFG GENMASK(4, 2) +#define HSIO_RGMII_CFG_TX_CLK_CFG_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_TX_CLK_CFG, x) +#define HSIO_RGMII_CFG_TX_CLK_CFG_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_TX_CLK_CFG, x) + +#define HSIO_RGMII_CFG_RGMII_TX_RST BIT(1) +#define HSIO_RGMII_CFG_RGMII_TX_RST_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_RGMII_TX_RST, x) +#define HSIO_RGMII_CFG_RGMII_TX_RST_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_RGMII_TX_RST, x) + +#define HSIO_RGMII_CFG_RGMII_RX_RST BIT(0) +#define HSIO_RGMII_CFG_RGMII_RX_RST_SET(x)\ + FIELD_PREP(HSIO_RGMII_CFG_RGMII_RX_RST, x) +#define HSIO_RGMII_CFG_RGMII_RX_RST_GET(x)\ + FIELD_GET(HSIO_RGMII_CFG_RGMII_RX_RST, x) + +/* HSIO:HW_CFGSTAT:DLL_CFG */ +#define HSIO_DLL_CFG(r) __REG(TARGET_HSIO,\ + 0, 1, 72, 0, 1, 44, 28, r, 4, 4) + +#define HSIO_DLL_CFG_DLL_CLK_ENA BIT(20) +#define HSIO_DLL_CFG_DLL_CLK_ENA_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_DLL_CLK_ENA, x) +#define HSIO_DLL_CFG_DLL_CLK_ENA_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_DLL_CLK_ENA, x) + +#define HSIO_DLL_CFG_BIST_PASS BIT(19) +#define HSIO_DLL_CFG_BIST_PASS_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_BIST_PASS, x) +#define HSIO_DLL_CFG_BIST_PASS_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_BIST_PASS, x) + +#define HSIO_DLL_CFG_BIST_END BIT(18) +#define HSIO_DLL_CFG_BIST_END_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_BIST_END, x) +#define HSIO_DLL_CFG_BIST_END_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_BIST_END, x) + +#define HSIO_DLL_CFG_BIST_START BIT(17) +#define HSIO_DLL_CFG_BIST_START_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_BIST_START, x) +#define HSIO_DLL_CFG_BIST_START_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_BIST_START, x) + +#define HSIO_DLL_CFG_TAP_SEL GENMASK(16, 10) +#define HSIO_DLL_CFG_TAP_SEL_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_TAP_SEL, x) +#define HSIO_DLL_CFG_TAP_SEL_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_TAP_SEL, x) + +#define HSIO_DLL_CFG_TAP_ADJ GENMASK(9, 3) +#define HSIO_DLL_CFG_TAP_ADJ_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_TAP_ADJ, x) +#define HSIO_DLL_CFG_TAP_ADJ_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_TAP_ADJ, x) + +#define HSIO_DLL_CFG_DELAY_ENA BIT(2) +#define HSIO_DLL_CFG_DELAY_ENA_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_DELAY_ENA, x) +#define HSIO_DLL_CFG_DELAY_ENA_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_DELAY_ENA, x) + +#define HSIO_DLL_CFG_DLL_ENA BIT(1) +#define HSIO_DLL_CFG_DLL_ENA_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_DLL_ENA, x) +#define HSIO_DLL_CFG_DLL_ENA_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_DLL_ENA, x) + +#define HSIO_DLL_CFG_DLL_RST BIT(0) +#define HSIO_DLL_CFG_DLL_RST_SET(x)\ + FIELD_PREP(HSIO_DLL_CFG_DLL_RST, x) +#define HSIO_DLL_CFG_DLL_RST_GET(x)\ + FIELD_GET(HSIO_DLL_CFG_DLL_RST, x) + +/* DEVCPU_QS:XTR:XTR_FLUSH */ +#define QS_XTR_FLUSH __REG(TARGET_QS,\ + 0, 1, 0, 0, 1, 36, 24, 0, 1, 4) + +#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0) +#define QS_XTR_FLUSH_FLUSH_SET(x)\ + FIELD_PREP(QS_XTR_FLUSH_FLUSH, x) +#define QS_XTR_FLUSH_FLUSH_GET(x)\ + FIELD_GET(QS_XTR_FLUSH_FLUSH, x) + +/* DEVCPU_QS:INJ:INJ_GRP_CFG */ +#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\ + 0, 1, 36, 0, 1, 40, 0, r, 2, 4) + +#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2) +#define QS_INJ_GRP_CFG_MODE_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_MODE, x) +#define QS_INJ_GRP_CFG_MODE_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_MODE, x) + +#define QS_INJ_GRP_CFG_BYTE_SWAP BIT(0) +#define QS_INJ_GRP_CFG_BYTE_SWAP_SET(x)\ + FIELD_PREP(QS_INJ_GRP_CFG_BYTE_SWAP, x) +#define QS_INJ_GRP_CFG_BYTE_SWAP_GET(x)\ + FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x) + +/* DEVCPU_QS:INJ:INJ_CTRL */ +#define QS_INJ_CTRL(r) __REG(TARGET_QS,\ + 0, 1, 36, 0, 1, 40, 16, r, 2, 4) + +#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21) +#define QS_INJ_CTRL_GAP_SIZE_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_GAP_SIZE, x) +#define QS_INJ_CTRL_GAP_SIZE_GET(x)\ + FIELD_GET(QS_INJ_CTRL_GAP_SIZE, x) + +#define QS_INJ_CTRL_ABORT BIT(20) +#define QS_INJ_CTRL_ABORT_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_ABORT, x) +#define QS_INJ_CTRL_ABORT_GET(x)\ + FIELD_GET(QS_INJ_CTRL_ABORT, x) + +#define QS_INJ_CTRL_EOF BIT(19) +#define QS_INJ_CTRL_EOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_EOF, x) +#define QS_INJ_CTRL_EOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_EOF, x) + +#define QS_INJ_CTRL_SOF BIT(18) +#define QS_INJ_CTRL_SOF_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_SOF, x) +#define QS_INJ_CTRL_SOF_GET(x)\ + FIELD_GET(QS_INJ_CTRL_SOF, x) + +#define QS_INJ_CTRL_VLD_BYTES GENMASK(17, 16) +#define QS_INJ_CTRL_VLD_BYTES_SET(x)\ + FIELD_PREP(QS_INJ_CTRL_VLD_BYTES, x) +#define QS_INJ_CTRL_VLD_BYTES_GET(x)\ + FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x) + +/* QSYS:SYSTEM:PORT_MODE */ +#define QSYS_PORT_MODE(r) __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 0, r, 11, 4) + +#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) +#define QSYS_PORT_MODE_DEQUEUE_DIS_SET(x)\ + FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_DIS, x) +#define QSYS_PORT_MODE_DEQUEUE_DIS_GET(x)\ + FIELD_GET(QSYS_PORT_MODE_DEQUEUE_DIS, x) + +#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0) +#define QSYS_PORT_MODE_DEQUEUE_LATE_SET(x)\ + FIELD_PREP(QSYS_PORT_MODE_DEQUEUE_LATE, x) +#define QSYS_PORT_MODE_DEQUEUE_LATE_GET(x)\ + FIELD_GET(QSYS_PORT_MODE_DEQUEUE_LATE, x) + +/* QSYS:SYSTEM:SWITCH_PORT_MODE */ +#define QSYS_SW_PORT_MODE(r) __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 88, r, 10, 4) + +#define QSYS_SW_PORT_MODE_PORT_ENA BIT(19) +#define QSYS_SW_PORT_MODE_PORT_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_PORT_ENA, x) +#define QSYS_SW_PORT_MODE_PORT_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_PORT_ENA, x) + +#define QSYS_SW_PORT_MODE_IDEQ_DIS BIT(18) +#define QSYS_SW_PORT_MODE_IDEQ_DIS_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_IDEQ_DIS, x) +#define QSYS_SW_PORT_MODE_IDEQ_DIS_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_IDEQ_DIS, x) + +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG GENMASK(17, 15) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) +#define QSYS_SW_PORT_MODE_SCH_NEXT_CFG_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_SCH_NEXT_CFG, x) + +#define QSYS_SW_PORT_MODE_YEL_RSRVD BIT(14) +#define QSYS_SW_PORT_MODE_YEL_RSRVD_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_YEL_RSRVD, x) +#define QSYS_SW_PORT_MODE_YEL_RSRVD_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_YEL_RSRVD, x) + +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE BIT(13) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) +#define QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, x) + +#define QSYS_SW_PORT_MODE_TX_PFC_ENA GENMASK(12, 5) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) +#define QSYS_SW_PORT_MODE_TX_PFC_ENA_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_ENA, x) + +#define QSYS_SW_PORT_MODE_TX_PFC_MODE BIT(4) +#define QSYS_SW_PORT_MODE_TX_PFC_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_TX_PFC_MODE, x) +#define QSYS_SW_PORT_MODE_TX_PFC_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_TX_PFC_MODE, x) + +#define QSYS_SW_PORT_MODE_FWD_TWOCYCLE_MODE BIT(3) +#define QSYS_SW_PORT_MODE_FWD_TWOCYCLE_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_FWD_TWOCYCLE_MODE, x) +#define QSYS_SW_PORT_MODE_FWD_TWOCYCLE_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_FWD_TWOCYCLE_MODE, x) + +#define QSYS_SW_PORT_MODE_AGING_MODE GENMASK(2, 1) +#define QSYS_SW_PORT_MODE_AGING_MODE_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_AGING_MODE, x) +#define QSYS_SW_PORT_MODE_AGING_MODE_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_AGING_MODE, x) + +#define QSYS_SW_PORT_MODE_HOL_PROTECTED BIT(0) +#define QSYS_SW_PORT_MODE_HOL_PROTECTED_SET(x)\ + FIELD_PREP(QSYS_SW_PORT_MODE_HOL_PROTECTED, x) +#define QSYS_SW_PORT_MODE_HOL_PROTECTED_GET(x)\ + FIELD_GET(QSYS_SW_PORT_MODE_HOL_PROTECTED, x) + +/* QSYS:SYSTEM:EGR_NO_SHARING */ +#define QSYS_EGR_NO_SHARING __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 176, 0, 1, 4) + +#define QSYS_EGR_NO_SHARING_EGR_NO_SHARING GENMASK(9, 0) +#define QSYS_EGR_NO_SHARING_EGR_NO_SHARING_SET(x)\ + FIELD_PREP(QSYS_EGR_NO_SHARING_EGR_NO_SHARING, x) +#define QSYS_EGR_NO_SHARING_EGR_NO_SHARING_GET(x)\ + FIELD_GET(QSYS_EGR_NO_SHARING_EGR_NO_SHARING, x) + +/* QSYS:SYSTEM:SW_STATUS */ +#define QSYS_SW_STATUS(r) __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 180, r, 10, 4) + +#define QSYS_SW_STATUS_EQ_AVAIL GENMASK(7, 0) +#define QSYS_SW_STATUS_EQ_AVAIL_SET(x)\ + FIELD_PREP(QSYS_SW_STATUS_EQ_AVAIL, x) +#define QSYS_SW_STATUS_EQ_AVAIL_GET(x)\ + FIELD_GET(QSYS_SW_STATUS_EQ_AVAIL, x) + +/* QSYS:SYSTEM:EXT_CPU_CFG */ +#define QSYS_EXT_CPU_CFG __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 220, 0, 1, 4) + +#define QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA BIT(14) +#define QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA_SET(x)\ + FIELD_PREP(QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA, x) +#define QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA_GET(x)\ + FIELD_GET(QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA, x) + +#define QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA BIT(13) +#define QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA_SET(x)\ + FIELD_PREP(QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA, x) +#define QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA_GET(x)\ + FIELD_GET(QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA, x) + +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT GENMASK(12, 8) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_SET(x)\ + FIELD_PREP(QSYS_EXT_CPU_CFG_EXT_CPU_PORT, x) +#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_GET(x)\ + FIELD_GET(QSYS_EXT_CPU_CFG_EXT_CPU_PORT, x) + +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK GENMASK(7, 0) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_SET(x)\ + FIELD_PREP(QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK, x) +#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_GET(x)\ + FIELD_GET(QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK, x) + +/* QSYS:SYSTEM:CPU_GROUP_MAP */ +#define QSYS_CPU_GROUP_MAP __REG(TARGET_QSYS,\ + 0, 1, 14336, 0, 1, 240, 224, 0, 1, 4) + +#define QSYS_CPU_GROUP_MAP_CPU_GROUP_MAP GENMASK(7, 0) +#define QSYS_CPU_GROUP_MAP_CPU_GROUP_MAP_SET(x)\ + FIELD_PREP(QSYS_CPU_GROUP_MAP_CPU_GROUP_MAP, x) +#define QSYS_CPU_GROUP_MAP_CPU_GROUP_MAP_GET(x)\ + FIELD_GET(QSYS_CPU_GROUP_MAP_CPU_GROUP_MAP, x) + +/* QSYS:RES_CTRL:RES_CFG */ +#define QSYS_RES_CFG(g) __REG(TARGET_QSYS,\ + 0, 1, 16384, g, 1024, 8, 0, 0, 1, 4) + +#define QSYS_RES_CFG_WM_HIGH GENMASK(8, 0) +#define QSYS_RES_CFG_WM_HIGH_SET(x)\ + FIELD_PREP(QSYS_RES_CFG_WM_HIGH, x) +#define QSYS_RES_CFG_WM_HIGH_GET(x)\ + FIELD_GET(QSYS_RES_CFG_WM_HIGH, x) + +/* QSYS:DROP_CFG:EGR_DROP_MODE */ +#define QSYS_EGR_DROP_MODE __REG(TARGET_QSYS,\ + 0, 1, 12736, 0, 1, 8, 0, 0, 1, 4) + +#define QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE GENMASK(9, 0) +#define QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE_SET(x)\ + FIELD_PREP(QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE, x) +#define QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE_GET(x)\ + FIELD_GET(QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE, x) + +/* REW:PORT:PORT_VLAN_CFG */ +#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\ + 0, 1, 4096, g, 11, 128, 0, 0, 1, 4) + +#define REW_PORT_VLAN_CFG_PORT_TPID GENMASK(31, 16) +#define REW_PORT_VLAN_CFG_PORT_TPID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_TPID, x) +#define REW_PORT_VLAN_CFG_PORT_TPID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_TPID, x) + +#define REW_PORT_VLAN_CFG_PORT_DEI BIT(15) +#define REW_PORT_VLAN_CFG_PORT_DEI_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_DEI, x) +#define REW_PORT_VLAN_CFG_PORT_DEI_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_DEI, x) + +#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(14, 12) +#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_PCP, x) +#define REW_PORT_VLAN_CFG_PORT_PCP_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_PCP, x) + +#define REW_PORT_VLAN_CFG_PORT_VID GENMASK(11, 0) +#define REW_PORT_VLAN_CFG_PORT_VID_SET(x)\ + FIELD_PREP(REW_PORT_VLAN_CFG_PORT_VID, x) +#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\ + FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x) + +/* REW:PORT:TAG_CFG */ +#define REW_TAG_CFG(g) __REG(TARGET_REW,\ + 0, 1, 4096, g, 11, 128, 4, 0, 1, 4) + +#define REW_TAG_CFG_TAG_CFG GENMASK(8, 7) +#define REW_TAG_CFG_TAG_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_CFG, x) +#define REW_TAG_CFG_TAG_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_CFG, x) + +#define REW_TAG_CFG_TAG_TPID_CFG GENMASK(6, 5) +#define REW_TAG_CFG_TAG_TPID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_TPID_CFG, x) +#define REW_TAG_CFG_TAG_TPID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_TPID_CFG, x) + +#define REW_TAG_CFG_TAG_VID_CFG BIT(4) +#define REW_TAG_CFG_TAG_VID_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_VID_CFG, x) +#define REW_TAG_CFG_TAG_VID_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_VID_CFG, x) + +#define REW_TAG_CFG_TAG_PCP_CFG GENMASK(3, 2) +#define REW_TAG_CFG_TAG_PCP_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_PCP_CFG, x) +#define REW_TAG_CFG_TAG_PCP_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_PCP_CFG, x) + +#define REW_TAG_CFG_TAG_DEI_CFG GENMASK(1, 0) +#define REW_TAG_CFG_TAG_DEI_CFG_SET(x)\ + FIELD_PREP(REW_TAG_CFG_TAG_DEI_CFG, x) +#define REW_TAG_CFG_TAG_DEI_CFG_GET(x)\ + FIELD_GET(REW_TAG_CFG_TAG_DEI_CFG, x) + +/* REW:PORT:PORT_CFG */ +#define REW_PORT_CFG(g) __REG(TARGET_REW,\ + 0, 1, 4096, g, 11, 128, 8, 0, 1, 4) + +#define REW_PORT_CFG_ES0_EN BIT(4) +#define REW_PORT_CFG_ES0_EN_SET(x)\ + FIELD_PREP(REW_PORT_CFG_ES0_EN, x) +#define REW_PORT_CFG_ES0_EN_GET(x)\ + FIELD_GET(REW_PORT_CFG_ES0_EN, x) + +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG GENMASK(3, 2) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_SET(x)\ + FIELD_PREP(REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG, x) +#define REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG_GET(x)\ + FIELD_GET(REW_PORT_CFG_FCS_UPDATE_NONCPU_CFG, x) + +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA BIT(1) +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA_SET(x)\ + FIELD_PREP(REW_PORT_CFG_FCS_UPDATE_CPU_ENA, x) +#define REW_PORT_CFG_FCS_UPDATE_CPU_ENA_GET(x)\ + FIELD_GET(REW_PORT_CFG_FCS_UPDATE_CPU_ENA, x) + +#define REW_PORT_CFG_NO_REWRITE BIT(0) +#define REW_PORT_CFG_NO_REWRITE_SET(x)\ + FIELD_PREP(REW_PORT_CFG_NO_REWRITE, x) +#define REW_PORT_CFG_NO_REWRITE_GET(x)\ + FIELD_GET(REW_PORT_CFG_NO_REWRITE, x) + +/* SYS:SYSTEM:RESET_CFG */ +#define SYS_RESET_CFG __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 0, 0, 1, 4) + +#define SYS_RESET_CFG_CORE_ENA BIT(0) +#define SYS_RESET_CFG_CORE_ENA_SET(x)\ + FIELD_PREP(SYS_RESET_CFG_CORE_ENA, x) +#define SYS_RESET_CFG_CORE_ENA_GET(x)\ + FIELD_GET(SYS_RESET_CFG_CORE_ENA, x) + +/* SYS:SYSTEM:PORT_MODE */ +#define SYS_PORT_MODE(r) __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 48, r, 11, 4) + +#define SYS_PORT_MODE_PRP_LANID BIT(8) +#define SYS_PORT_MODE_PRP_LANID_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_PRP_LANID, x) +#define SYS_PORT_MODE_PRP_LANID_GET(x)\ + FIELD_GET(SYS_PORT_MODE_PRP_LANID, x) + +#define SYS_PORT_MODE_PRP_ENA BIT(7) +#define SYS_PORT_MODE_PRP_ENA_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_PRP_ENA, x) +#define SYS_PORT_MODE_PRP_ENA_GET(x)\ + FIELD_GET(SYS_PORT_MODE_PRP_ENA, x) + +#define SYS_PORT_MODE_INCL_INJ_HDR GENMASK(6, 5) +#define SYS_PORT_MODE_INCL_INJ_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_INJ_HDR, x) +#define SYS_PORT_MODE_INCL_INJ_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_INJ_HDR, x) + +#define SYS_PORT_MODE_INCL_XTR_HDR GENMASK(4, 3) +#define SYS_PORT_MODE_INCL_XTR_HDR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INCL_XTR_HDR, x) +#define SYS_PORT_MODE_INCL_XTR_HDR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INCL_XTR_HDR, x) + +#define SYS_PORT_MODE_INJ_HDR_ERR BIT(2) +#define SYS_PORT_MODE_INJ_HDR_ERR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_INJ_HDR_ERR, x) +#define SYS_PORT_MODE_INJ_HDR_ERR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_INJ_HDR_ERR, x) + +#define SYS_PORT_MODE_PAD_DIS BIT(1) +#define SYS_PORT_MODE_PAD_DIS_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_PAD_DIS, x) +#define SYS_PORT_MODE_PAD_DIS_GET(x)\ + FIELD_GET(SYS_PORT_MODE_PAD_DIS, x) + +#define SYS_PORT_MODE_RTAG_CLEAR BIT(0) +#define SYS_PORT_MODE_RTAG_CLEAR_SET(x)\ + FIELD_PREP(SYS_PORT_MODE_RTAG_CLEAR, x) +#define SYS_PORT_MODE_RTAG_CLEAR_GET(x)\ + FIELD_GET(SYS_PORT_MODE_RTAG_CLEAR, x) + +/* SYS:SYSTEM:FRONT_PORT_MODE */ +#define SYS_FRONT_PORT_MODE(r) __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 92, r, 9, 4) + +#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(1) +#define SYS_FRONT_PORT_MODE_HDX_MODE_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_HDX_MODE, x) +#define SYS_FRONT_PORT_MODE_HDX_MODE_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_HDX_MODE, x) + +#define SYS_FRONT_PORT_MODE_ADD_FRAG_SIZE GENMASK(9, 8) +#define SYS_FRONT_PORT_MODE_ADD_FRAG_SIZE_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_ADD_FRAG_SIZE, x) +#define SYS_FRONT_PORT_MODE_ADD_FRAG_SIZE_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_ADD_FRAG_SIZE, x) + +#define SYS_FRONT_PORT_MODE_DONT_WAIT_FOR_TS BIT(0) +#define SYS_FRONT_PORT_MODE_DONT_WAIT_FOR_TS_SET(x)\ + FIELD_PREP(SYS_FRONT_PORT_MODE_DONT_WAIT_FOR_TS, x) +#define SYS_FRONT_PORT_MODE_DONT_WAIT_FOR_TS_GET(x)\ + FIELD_GET(SYS_FRONT_PORT_MODE_DONT_WAIT_FOR_TS, x) + +/* SYS:SYSTEM:FRM_AGING */ +#define SYS_FRM_AGING __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 128, 0, 1, 4) + +#define SYS_FRM_AGING_AGE_TX_ENA BIT(20) +#define SYS_FRM_AGING_AGE_TX_ENA_SET(x)\ + FIELD_PREP(SYS_FRM_AGING_AGE_TX_ENA, x) +#define SYS_FRM_AGING_AGE_TX_ENA_GET(x)\ + FIELD_GET(SYS_FRM_AGING_AGE_TX_ENA, x) + +#define SYS_FRM_AGING_MAX_AGE GENMASK(19, 0) +#define SYS_FRM_AGING_MAX_AGE_SET(x)\ + FIELD_PREP(SYS_FRM_AGING_MAX_AGE, x) +#define SYS_FRM_AGING_MAX_AGE_GET(x)\ + FIELD_GET(SYS_FRM_AGING_MAX_AGE, x) + +/* SYS:SYSTEM:STAT_CFG */ +#define SYS_STAT_CFG __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 132, 0, 1, 4) + +#define SYS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(16, 10) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\ + FIELD_PREP(SYS_STAT_CFG_STAT_CLEAR_SHOT, x) +#define SYS_STAT_CFG_STAT_CLEAR_SHOT_GET(x)\ + FIELD_GET(SYS_STAT_CFG_STAT_CLEAR_SHOT, x) + +#define SYS_STAT_CFG_STAT_VIEW GENMASK(9, 0) +#define SYS_STAT_CFG_STAT_VIEW_SET(x)\ + FIELD_PREP(SYS_STAT_CFG_STAT_VIEW, x) +#define SYS_STAT_CFG_STAT_VIEW_GET(x)\ + FIELD_GET(SYS_STAT_CFG_STAT_VIEW, x) + +/* SYS:SYSTEM:SW_STATUS */ +#define SYS_SW_STATUS(r) __REG(TARGET_SYS,\ + 0, 1, 4160, 0, 1, 184, 136, r, 10, 4) + +#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0) +#define SYS_SW_STATUS_PORT_RX_PAUSED_SET(x)\ + FIELD_PREP(SYS_SW_STATUS_PORT_RX_PAUSED, x) +#define SYS_SW_STATUS_PORT_RX_PAUSED_GET(x)\ + FIELD_GET(SYS_SW_STATUS_PORT_RX_PAUSED, x) + +/* SYS:PAUSE_CFG:PAUSE_CFG */ +#define SYS_PAUSE_CFG(r) __REG(TARGET_SYS,\ + 0, 1, 4344, 0, 1, 124, 0, r, 10, 4) + +#define SYS_PAUSE_CFG_PAUSE_START GENMASK(18, 10) +#define SYS_PAUSE_CFG_PAUSE_START_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_START, x) +#define SYS_PAUSE_CFG_PAUSE_START_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_START, x) + +#define SYS_PAUSE_CFG_PAUSE_STOP GENMASK(9, 1) +#define SYS_PAUSE_CFG_PAUSE_STOP_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_STOP, x) +#define SYS_PAUSE_CFG_PAUSE_STOP_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_STOP, x) + +#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) +#define SYS_PAUSE_CFG_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_PAUSE_CFG_PAUSE_ENA, x) +#define SYS_PAUSE_CFG_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_PAUSE_CFG_PAUSE_ENA, x) + +/* SYS:PAUSE_CFG:ATOP */ +#define SYS_ATOP(r) __REG(TARGET_SYS,\ + 0, 1, 4344, 0, 1, 124, 44, r, 10, 4) + +#define SYS_ATOP_ATOP GENMASK(8, 0) +#define SYS_ATOP_ATOP_SET(x)\ + FIELD_PREP(SYS_ATOP_ATOP, x) +#define SYS_ATOP_ATOP_GET(x)\ + FIELD_GET(SYS_ATOP_ATOP, x) + +/* SYS:PAUSE_CFG:ATOP_TOT_CFG */ +#define SYS_ATOP_TOT_CFG __REG(TARGET_SYS,\ + 0, 1, 4344, 0, 1, 124, 84, 0, 1, 4) + +#define SYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(8, 0) +#define SYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\ + FIELD_PREP(SYS_ATOP_TOT_CFG_ATOP_TOT, x) +#define SYS_ATOP_TOT_CFG_ATOP_TOT_GET(x)\ + FIELD_GET(SYS_ATOP_TOT_CFG_ATOP_TOT, x) + +/* SYS:PAUSE_CFG:MAC_FC_CFG */ +#define SYS_MAC_FC_CFG(r) __REG(TARGET_SYS,\ + 0, 1, 4344, 0, 1, 124, 88, r, 9, 4) + +#define SYS_MAC_FC_CFG_FC_LINK_SPEED GENMASK(27, 26) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) +#define SYS_MAC_FC_CFG_FC_LINK_SPEED_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LINK_SPEED, x) + +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG GENMASK(25, 20) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) +#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_FC_LATENCY_CFG, x) + +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) +#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_ZERO_PAUSE_ENA, x) + +#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17) +#define SYS_MAC_FC_CFG_TX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_TX_FC_ENA, x) +#define SYS_MAC_FC_CFG_TX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_TX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16) +#define SYS_MAC_FC_CFG_RX_FC_ENA_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_RX_FC_ENA, x) +#define SYS_MAC_FC_CFG_RX_FC_ENA_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_RX_FC_ENA, x) + +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG GENMASK(15, 0) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(x)\ + FIELD_PREP(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) +#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_GET(x)\ + FIELD_GET(SYS_MAC_FC_CFG_PAUSE_VAL_CFG, x) + +/* SYS:STAT:CNT */ +#define SYS_CNT(g) __REG(TARGET_SYS,\ + 0, 1, 0, g, 896, 4, 0, 0, 1, 4) + +/* SYS:RAM_CTRL:RAM_INIT */ +#define SYS_RAM_INIT __REG(TARGET_SYS,\ + 0, 1, 4492, 0, 1, 4, 0, 0, 1, 4) + +#define SYS_RAM_INIT_RAM_TEST_OPT GENMASK(4, 2) +#define SYS_RAM_INIT_RAM_TEST_OPT_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_TEST_OPT, x) +#define SYS_RAM_INIT_RAM_TEST_OPT_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_TEST_OPT, x) + +#define SYS_RAM_INIT_RAM_INIT BIT(1) +#define SYS_RAM_INIT_RAM_INIT_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_INIT, x) +#define SYS_RAM_INIT_RAM_INIT_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_INIT, x) + +#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0) +#define SYS_RAM_INIT_RAM_CFG_HOOK_SET(x)\ + FIELD_PREP(SYS_RAM_INIT_RAM_CFG_HOOK, x) +#define SYS_RAM_INIT_RAM_CFG_HOOK_GET(x)\ + FIELD_GET(SYS_RAM_INIT_RAM_CFG_HOOK, x) + +/* SYS:PTPPORT:PTP_RXDLY_CFG */ +#define SYS_PTP_RXDLY_CFG(g) __REG(TARGET_SYS,\ + 0, 1, 4512, g, 11, 28, 8, 0, 1, 4) + +#define SYS_PTP_RXDLY_CFG_PTP_RX_IO_DLY GENMASK(23, 0) +#define SYS_PTP_RXDLY_CFG_PTP_RX_IO_DLY_SET(x)\ + FIELD_PREP(SYS_PTP_RXDLY_CFG_PTP_RX_IO_DLY, x) +#define SYS_PTP_RXDLY_CFG_PTP_RX_IO_DLY_GET(x)\ + FIELD_GET(SYS_PTP_RXDLY_CFG_PTP_RX_IO_DLY, x) + +/* SYS:PTPPORT:PTP_TXDLY_CFG */ +#define SYS_PTP_TXDLY_CFG(g) __REG(TARGET_SYS,\ + 0, 1, 4512, g, 11, 28, 12, 0, 1, 4) + +#define SYS_PTP_TXDLY_CFG_PTP_TX_IO_DLY GENMASK(23, 0) +#define SYS_PTP_TXDLY_CFG_PTP_TX_IO_DLY_SET(x)\ + FIELD_PREP(SYS_PTP_TXDLY_CFG_PTP_TX_IO_DLY, x) +#define SYS_PTP_TXDLY_CFG_PTP_TX_IO_DLY_GET(x)\ + FIELD_GET(SYS_PTP_TXDLY_CFG_PTP_TX_IO_DLY, x) + +#endif /* _LAN9645X_REGS_H_ */ -- 2.52.0 Add the LAN9645X basic DSA driver with initialization, parent regmap requests, port module initialization for NPI, CPU ports and front ports, and phylink integration for MAC side configuration. IPv6 is disabled on the conduit. When enabled, the RFC 4861 frames are dispatched directly on the conduit bypassing the tag driver. The switch parses these frames as if they have an IFH prepended, leading to a garbage in garbage out situation. Therefore, IPv6 on the conduit is not a sensible configuration. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- drivers/net/dsa/microchip/Makefile | 1 + drivers/net/dsa/microchip/lan9645x/Kconfig | 12 + drivers/net/dsa/microchip/lan9645x/Makefile | 7 + drivers/net/dsa/microchip/lan9645x/lan9645x_main.c | 435 +++++++++++++++++ drivers/net/dsa/microchip/lan9645x/lan9645x_main.h | 396 +++++++++++++++ drivers/net/dsa/microchip/lan9645x/lan9645x_npi.c | 99 ++++ .../net/dsa/microchip/lan9645x/lan9645x_phylink.c | 537 +++++++++++++++++++++ drivers/net/dsa/microchip/lan9645x/lan9645x_port.c | 289 +++++++++++ drivers/net/ethernet/microchip/Kconfig | 1 + 9 files changed, 1777 insertions(+) diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index 9347cfb3d0b5..e75f17888f75 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -12,3 +12,4 @@ endif obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o +obj-$(CONFIG_NET_DSA_MICROCHIP_LAN9645X) += lan9645x/ diff --git a/drivers/net/dsa/microchip/lan9645x/Kconfig b/drivers/net/dsa/microchip/lan9645x/Kconfig new file mode 100644 index 000000000000..8cbac1f9875d --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_DSA_MICROCHIP_LAN9645X + tristate "Microchip Lan9645x switch support" + depends on NET_DSA + depends on NET_VENDOR_MICROCHIP + select NET_DSA_TAG_LAN9645X + help + This driver adds DSA support for Microchip Lan9645x switch chips. + The lan9645x switch is a multi-port Gigabit AVB/TSN Ethernet Switch + with five integrated 10/100/1000Base-T PHYs. In addition to the + integrated PHYs, it supports up to 2 RGMII/RMII, up to 2 + BASE-X/SERDES/2.5GBASE-X and one Quad-SGMII/Quad-USGMII interfaces. diff --git a/drivers/net/dsa/microchip/lan9645x/Makefile b/drivers/net/dsa/microchip/lan9645x/Makefile new file mode 100644 index 000000000000..eea1edc5c0e3 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_NET_DSA_MICROCHIP_LAN9645X) += mchp-lan9645x.o + +mchp-lan9645x-objs := lan9645x_main.o \ + lan9645x_npi.o \ + lan9645x_port.o \ + lan9645x_phylink.o \ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c new file mode 100644 index 000000000000..739013f049d0 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include +#include +#include + +#include "lan9645x_main.h" + +static const char *lan9645x_resource_names[NUM_TARGETS + 1] = { + [TARGET_GCB] = "gcb", + [TARGET_QS] = "qs", + [TARGET_CHIP_TOP] = "chip_top", + [TARGET_REW] = "rew", + [TARGET_SYS] = "sys", + [TARGET_HSIO] = "hsio", + [TARGET_DEV] = "dev", + [TARGET_DEV + 1] = "dev1", + [TARGET_DEV + 2] = "dev2", + [TARGET_DEV + 3] = "dev3", + [TARGET_DEV + 4] = "dev4", + [TARGET_DEV + 5] = "dev5", + [TARGET_DEV + 6] = "dev6", + [TARGET_DEV + 7] = "dev7", + [TARGET_DEV + 8] = "dev8", + [TARGET_QSYS] = "qsys", + [TARGET_AFI] = "afi", + [TARGET_ANA] = "ana", + [NUM_TARGETS] = NULL, +}; + +static int lan9645x_tag_npi_setup(struct dsa_switch *ds) +{ + struct dsa_port *dp, *first_cpu_dp = NULL; + struct lan9645x *lan9645x = ds->priv; + + dsa_switch_for_each_user_port(dp, ds) { + if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { + dev_err(ds->dev, "Multiple NPI ports not supported\n"); + return -EINVAL; + } + + first_cpu_dp = dp->cpu_dp; + } + + if (!first_cpu_dp) + return -EINVAL; + + lan9645x_npi_port_init(lan9645x, first_cpu_dp); + + return 0; +} + +static enum dsa_tag_protocol lan9645x_get_tag_protocol(struct dsa_switch *ds, + int port, + enum dsa_tag_protocol tp) +{ + struct lan9645x *lan9645x = ds->priv; + + return lan9645x->tag_proto; +} + +static int lan9645x_connect_tag_protocol(struct dsa_switch *ds, + enum dsa_tag_protocol proto) +{ + switch (proto) { + case DSA_TAG_PROTO_LAN9645X: + return 0; + default: + return -EPROTONOSUPPORT; + } +} + +static void lan9645x_teardown(struct dsa_switch *ds) +{ + struct lan9645x *lan9645x = ds->priv; + + debugfs_remove_recursive(lan9645x->debugfs_root); + lan9645x_npi_port_deinit(lan9645x, lan9645x->npi); +} + +static int lan9645x_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + return lan9645x_port_set_maxlen(ds->priv, port, new_mtu); +} + +static int lan9645x_get_max_mtu(struct dsa_switch *ds, int port) +{ + struct lan9645x *lan9645x = ds->priv; + int max_mtu; + + /* Actual MAC max MTU is around 16KB. We set 10000 - overhead which + * should be sufficient for all jumbo frames. Larger frames can cause + * problems especially with flow control, since we only have 160K queue + * buffer. + */ + max_mtu = 10000 - ETH_HLEN - ETH_FCS_LEN; + + if (port == lan9645x->npi) { + max_mtu -= LAN9645X_IFH_LEN; + max_mtu -= LAN9645X_LONG_PREFIX_LEN; + } + + return max_mtu; +} + +static int lan9645x_reset_switch(struct lan9645x *lan9645x) +{ + int val = 0; + int err; + + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan9645x, SYS_RESET_CFG); + lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan9645x, SYS_RAM_INIT); + err = lan9645x_rd_poll_timeout(lan9645x, SYS_RAM_INIT, val, + SYS_RAM_INIT_RAM_INIT_GET(val) == 0); + if (err) { + dev_err(lan9645x->dev, "Lan9645x setup: failed to init chip RAM."); + return err; + } + lan_wr(SYS_RESET_CFG_CORE_ENA_SET(1), lan9645x, SYS_RESET_CFG); + + return 0; +} + +static int lan9645x_setup(struct dsa_switch *ds) +{ + struct lan9645x *lan9645x = ds->priv; + struct device *dev = lan9645x->dev; + struct dsa_port *dp; + u32 all_phys_ports; + int err = 0; + + lan9645x->num_phys_ports = ds->num_ports; + all_phys_ports = GENMASK(lan9645x->num_phys_ports - 1, 0); + + err = lan9645x_reset_switch(lan9645x); + if (err) + return err; + + lan9645x->debugfs_root = debugfs_create_dir("lan9645x_sw", NULL); + + lan9645x->ports = devm_kcalloc(lan9645x->dev, lan9645x->num_phys_ports, + sizeof(struct lan9645x_port *), + GFP_KERNEL); + if (!lan9645x->ports) + return -ENOMEM; + + for (int port = 0; port < lan9645x->num_phys_ports; port++) { + struct lan9645x_port *p; + + p = devm_kzalloc(lan9645x->dev, + sizeof(struct lan9645x_port), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->lan9645x = lan9645x; + p->chip_port = port; + lan9645x->ports[port] = p; + } + + err = lan9645x_port_parse_ports_node(lan9645x); + if (err) { + dev_err(dev, "Lan9645x setup: failed to parse ports node."); + return err; + } + + err = lan9645x_tag_npi_setup(ds); + if (err) { + dev_err(dev, "Lan9645x setup: failed to setup NPI port.\n"); + return err; + } + + /* Link Aggregation Mode: NETDEV_LAG_HASH_L2 */ + lan_wr(ANA_AGGR_CFG_AC_SMAC_ENA | + ANA_AGGR_CFG_AC_DMAC_ENA, + lan9645x, ANA_AGGR_CFG); + + /* Flush queues */ + lan_wr(GENMASK(1, 0), lan9645x, QS_XTR_FLUSH); + + /* Allow to drain */ + mdelay(1); + + /* All Queues normal */ + lan_wr(0x0, lan9645x, QS_XTR_FLUSH); + + /* Set MAC age time to default value, the entry is aged after + * 2 * AGE_PERIOD + */ + lan_wr(ANA_AUTOAGE_AGE_PERIOD_SET(BR_DEFAULT_AGEING_TIME / 2 / HZ), + lan9645x, ANA_AUTOAGE); + + /* Disable learning for frames discarded by VLAN ingress filtering */ + lan_rmw(ANA_ADVLEARN_VLAN_CHK_SET(1), + ANA_ADVLEARN_VLAN_CHK, + lan9645x, ANA_ADVLEARN); + + /* Queue system frame ageing. We target 2s ageing. + * + * Register unit is 1024 cycles. + * + * ASIC: 165.625 Mhz ~ 6.0377 ns period + * + * 1024 * 6.0377 ns =~ 6182 ns + * val = 2000000000ns / 6182ns + */ + lan_wr(SYS_FRM_AGING_AGE_TX_ENA_SET(1) | + SYS_FRM_AGING_MAX_AGE_SET((2000000000 / 6182)), + lan9645x, SYS_FRM_AGING); + + /* Setup flooding PGIDs for IPv4/IPv6 multicast. Control and dataplane + * use the same masks. Control frames are redirected to CPU, and + * the network stack is responsible for forwarding these. + * The dataplane is forwarding according to the offloaded MDB entries. + */ + lan_wr(ANA_FLOODING_IPMC_FLD_MC4_DATA_SET(PGID_MCIPV4) | + ANA_FLOODING_IPMC_FLD_MC4_CTRL_SET(PGID_MC) | + ANA_FLOODING_IPMC_FLD_MC6_DATA_SET(PGID_MCIPV6) | + ANA_FLOODING_IPMC_FLD_MC6_CTRL_SET(PGID_MC), + lan9645x, ANA_FLOODING_IPMC); + + /* There are 8 priorities */ + for (int prio = 0; prio < 8; ++prio) + lan_wr(ANA_FLOODING_FLD_MULTICAST_SET(PGID_MC) | + ANA_FLOODING_FLD_UNICAST_SET(PGID_UC) | + ANA_FLOODING_FLD_BROADCAST_SET(PGID_BC), + lan9645x, ANA_FLOODING(prio)); + + /* Set all the entries to obey VLAN. */ + for (int i = 0; i < PGID_ENTRIES; ++i) + lan_wr(ANA_PGID_CFG_OBEY_VLAN_SET(1), + lan9645x, ANA_PGID_CFG(i)); + + /* Disable bridging by default */ + for (int p = 0; p < lan9645x->num_phys_ports; p++) { + lan_wr(0, lan9645x, ANA_PGID(PGID_SRC + p)); + + /* Do not forward BPDU frames to the front ports and copy them + * to CPU + */ + lan_wr(ANA_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA, + lan9645x, ANA_CPU_FWD_BPDU_CFG(p)); + } + + /* Set source buffer size for each priority and port to ~1700 bytes */ + for (int i = 0; i <= QSYS_Q_RSRV; ++i) { + lan_wr(QS_SRC_BUF_RSV / 64, lan9645x, QSYS_RES_CFG(i)); + lan_wr(QS_SRC_BUF_RSV / 64, lan9645x, QSYS_RES_CFG(512 + i)); + } + + /* Configure and enable the CPU port */ + lan9645x_port_cpu_init(lan9645x); + + /* Multicast to all front ports */ + lan_wr(all_phys_ports, lan9645x, ANA_PGID(PGID_MC)); + + /* IP multicast to all front ports */ + lan_wr(all_phys_ports, lan9645x, ANA_PGID(PGID_MCIPV4)); + lan_wr(all_phys_ports, lan9645x, ANA_PGID(PGID_MCIPV6)); + + /* Unicast to all front ports */ + lan_wr(all_phys_ports, lan9645x, ANA_PGID(PGID_UC)); + + /* Broadcast to all ports */ + lan_wr(BIT(CPU_PORT) | all_phys_ports, lan9645x, ANA_PGID(PGID_BC)); + + dsa_switch_for_each_available_port(dp, ds) + lan9645x_port_init(lan9645x, dp->index); + + lan9645x_port_set_tail_drop_wm(lan9645x); + + ds->mtu_enforcement_ingress = true; + ds->assisted_learning_on_cpu_port = true; + ds->fdb_isolation = true; + + dev_info(lan9645x->dev, + "Setup complete. SKU features: tsn_dis=%d hsr_dis=%d max_ports=%d", + lan9645x->tsn_dis, lan9645x->dd_dis, + lan9645x->num_phys_ports - lan9645x->num_port_dis); + + return 0; +} + +static void lan9645x_port_phylink_get_caps(struct dsa_switch *ds, int port, + struct phylink_config *config) +{ + lan9645x_phylink_get_caps(ds->priv, port, config); +} + +static const struct dsa_switch_ops lan9645x_switch_ops = { + .get_tag_protocol = lan9645x_get_tag_protocol, + .connect_tag_protocol = lan9645x_connect_tag_protocol, + + .setup = lan9645x_setup, + .teardown = lan9645x_teardown, + + /* Phylink integration */ + .phylink_get_caps = lan9645x_port_phylink_get_caps, + + /* MTU */ + .port_change_mtu = lan9645x_change_mtu, + .port_max_mtu = lan9645x_get_max_mtu, +}; + +static int lan9645x_request_target_regmaps(struct lan9645x *lan9645x) +{ + const char *resource_name; + struct regmap *tgt_map; + + for (int i = 0; i < NUM_TARGETS; i++) { + resource_name = lan9645x_resource_names[i]; + if (!resource_name) + continue; + + tgt_map = dev_get_regmap(lan9645x->dev->parent, resource_name); + if (IS_ERR_OR_NULL(tgt_map)) { + dev_err(lan9645x->dev, "Failed to get regmap=%d", i); + return -ENODEV; + } + + lan9645x->rmap[i] = tgt_map; + } + + return 0; +} + +static void lan9645x_set_feat_dis(struct lan9645x *lan9645x) +{ + u32 feat_dis; + + /* The features which can be physically disabled on some SKUs are: + * 1) Number of ports can be 5, 7 or 9. Any ports can be used, the chip + * tracks how many are active. + * 2) HSR/PRP. The duplicate discard table can be disabled. + * 3) TAS, frame preemption and PSFP can be disabled. + */ + feat_dis = lan_rd(lan9645x, GCB_FEAT_DISABLE); + + lan9645x->num_port_dis = + GCB_FEAT_DISABLE_FEAT_NUM_PORTS_DIS_GET(feat_dis); + lan9645x->dd_dis = GCB_FEAT_DISABLE_FEAT_DD_DIS_GET(feat_dis); + lan9645x->tsn_dis = GCB_FEAT_DISABLE_FEAT_TSN_DIS_GET(feat_dis); +} + +static int lan9645x_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct lan9645x *lan9645x; + struct dsa_switch *ds; + int err = 0; + + lan9645x = devm_kzalloc(dev, sizeof(*lan9645x), GFP_KERNEL); + if (!lan9645x) + return dev_err_probe(dev, -ENOMEM, + "Failed to allocate LAN9645X"); + + dev_set_drvdata(dev, lan9645x); + lan9645x->dev = dev; + + err = lan9645x_request_target_regmaps(lan9645x); + if (err) + return dev_err_probe(dev, err, "Failed to request regmaps"); + + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return dev_err_probe(dev, -ENOMEM, + "Failed to allocate DSA switch"); + + ds->dev = dev; + ds->num_ports = NUM_PHYS_PORTS; + ds->num_tx_queues = NUM_PRIO_QUEUES; + ds->dscp_prio_mapping_is_global = true; + + ds->ops = &lan9645x_switch_ops; + ds->phylink_mac_ops = &lan9645x_phylink_mac_ops; + ds->priv = lan9645x; + + lan9645x->ds = ds; + lan9645x->tag_proto = DSA_TAG_PROTO_LAN9645X; + lan9645x->shared_queue_sz = LAN9645X_BUFFER_MEMORY; + + lan9645x_set_feat_dis(lan9645x); + + err = dsa_register_switch(ds); + if (err) + return dev_err_probe(dev, err, "Failed to register DSA switch"); + + return 0; +} + +static void lan9645x_remove(struct platform_device *pdev) +{ + struct lan9645x *lan9645x = dev_get_drvdata(&pdev->dev); + + if (!lan9645x) + return; + + /* Calls lan9645x DSA .teardown */ + dsa_unregister_switch(lan9645x->ds); + dev_set_drvdata(&pdev->dev, NULL); +} + +static void lan9645x_shutdown(struct platform_device *pdev) +{ + struct lan9645x *lan9645x = dev_get_drvdata(&pdev->dev); + + if (!lan9645x) + return; + + dsa_switch_shutdown(lan9645x->ds); + + dev_set_drvdata(&pdev->dev, NULL); +} + +static const struct of_device_id lan9645x_switch_of_match[] = { + { .compatible = "microchip,lan96455s-switch" }, + {}, +}; +MODULE_DEVICE_TABLE(of, lan9645x_switch_of_match); + +static struct platform_driver lan9645x_switch_driver = { + .driver = { + .name = "lan96455s-switch", + .of_match_table = lan9645x_switch_of_match, + }, + .probe = lan9645x_probe, + .remove = lan9645x_remove, + .shutdown = lan9645x_shutdown, +}; +module_platform_driver(lan9645x_switch_driver); + +MODULE_DESCRIPTION("Lan9645x Switch Driver"); +MODULE_AUTHOR("Jens Emil Schulz Østergaard "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h new file mode 100644 index 000000000000..a51b637f28bf --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h @@ -0,0 +1,396 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#ifndef __LAN9645X_MAIN_H__ +#define __LAN9645X_MAIN_H__ + +#include +#include +#include + +#include "lan9645x_regs.h" + +#define lan9645x_for_each_chipport(_lan9645x, _i) \ + for ((_i) = 0; (_i) < (_lan9645x)->num_phys_ports; (_i)++) + +/* Ports 0-8 are front ports + * Ports 9-10 are CPU ports + * + * CPU ports are logical ports in the chip intended for management. The frame + * delivery mechanism can vary: direct register injection/extraction or one can + * use a front port as CPU port, called a Node Processor Interface (NPI). + * + * This is the approach used by LAN9645X. + */ +#define NUM_PHYS_PORTS 9 +#define CPU_PORT 9 +#define NUM_PRIO_QUEUES 8 +#define LAN9645X_NUM_TC 8 + +#define QS_SRC_BUF_RSV 1700 + +/* Reserved amount for (SRC, PRIO) at index 8*SRC + PRIO + * See QSYS:RES_CTRL[*]:RES_CFG description + */ +#define QSYS_Q_RSRV 95 + +#define LAN9645X_ISDX_MAX 128 +#define LAN9645X_ESDX_MAX 128 +#define LAN9645X_SFID_MAX 128 + +/* Reserved VLAN IDs. */ +#define UNAWARE_PVID 0 +#define HOST_PVID 4095 +#define VLAN_HSR_PRP 4094 +#define VLAN_MAX (VLAN_HSR_PRP - 1) + +/* VLAN flags for VLAN table defined in ANA_VLANTIDX */ +#define VLAN_SRC_CHK 0x01 +#define VLAN_MIR 0x02 +#define VLAN_LRN_DIS 0x04 +#define VLAN_PRV_VLAN 0x08 +#define VLAN_FLD_DIS 0x10 +#define VLAN_S_FWD_ENA 0x20 + +/* 160KiB / 1.25Mbit */ +#define LAN9645X_BUFFER_MEMORY (160 * 1024) + +/* Port Group Identifiers (PGID) are port-masks applied to all frames. + * The replicated registers are organized like so in HW: + * + * 0-63: Destination analysis + * 64-79: Aggregation analysis + * 80-(80+10-1): Source port analysis + * + * Destination: By default the first 9 port masks == BIT(port_num). Never change + * these except for aggregation. Remaining dst masks are for L2 MC and + * flooding. (See FLOODING and FLOODING_IPMC). + * + * Aggregation: Used to pick a port within an aggregation group. If no + * aggregation is configured, these are all-ones. + * + * Source: Control which ports a given source port can forward to. A frame that + * is received on port n, uses mask 80+n as a mask to filter out destination + * ports. The default values are that all bits are set except for the index + * number (no loopback). + * + * We reserve destination PGIDs at the end of the range. + */ + +#define PGID_AGGR 64 +#define PGID_SRC 80 +#define PGID_ENTRIES 89 + +#define PGID_AGGR_NUM (PGID_SRC - PGID_AGGR) + +/* General purpose PGIDs. */ +#define PGID_GP_START CPU_PORT +#define PGID_GP_END PGID_MRP + +/* Reserved PGIDs. + * PGID_MRP is a blackhole PGID + */ +#define PGID_MRP (PGID_AGGR - 7) +#define PGID_CPU (PGID_AGGR - 6) +#define PGID_UC (PGID_AGGR - 5) +#define PGID_BC (PGID_AGGR - 4) +#define PGID_MC (PGID_AGGR - 3) +#define PGID_MCIPV4 (PGID_AGGR - 2) +#define PGID_MCIPV6 (PGID_AGGR - 1) + +/* Flooding PGIDS: + * PGID_UC + * PGID_MC* + * PGID_BC + * + */ + +#define RD_SLEEP_US 3 +#define RD_SLEEPTIMEOUT_US 100000 +#define SLOW_RD_SLEEP_US 1000 +#define SLOW_RD_SLEEPTIMEOUT_US 2000000 + +#define lan9645x_rd_poll_timeout(_lan9645x, _reg_macro, _val, _cond) \ + regmap_read_poll_timeout(lan_rmap((_lan9645x), _reg_macro), \ + lan_rel_addr(_reg_macro), (_val), \ + (_cond), RD_SLEEP_US, RD_SLEEPTIMEOUT_US) + +#define lan9645x_rd_poll_slow(_lan9645x, _reg_macro, _val, _cond) \ + regmap_read_poll_timeout(lan_rmap((_lan9645x), _reg_macro), \ + lan_rel_addr(_reg_macro), (_val), \ + (_cond), SLOW_RD_SLEEP_US, \ + SLOW_RD_SLEEPTIMEOUT_US) + +/* NPI port prefix config encoding + * + * 0: No CPU extraction header (normal frames) + * 1: CPU extraction header without prefix + * 2: CPU extraction header with short prefix + * 3: CPU extraction header with long prefix + */ +enum lan9645x_tag_prefix { + LAN9645X_TAG_PREFIX_DISABLED = 0, + LAN9645X_TAG_PREFIX_NONE = 1, + LAN9645X_TAG_PREFIX_SHORT = 2, + LAN9645X_TAG_PREFIX_LONG = 3, +}; + +enum { + LAN9645X_SPEED_DISABLED = 0, + LAN9645X_SPEED_10 = 1, + LAN9645X_SPEED_100 = 2, + LAN9645X_SPEED_1000 = 3, + LAN9645X_SPEED_2500 = 4, +}; + +/* Rewriter VLAN port tagging encoding for REW:PORT[0-10]:TAG_CFG.TAG_CFG + * + * 0: Port tagging disabled. + * 1: Tag all frames, except when VID=PORT_VLAN_CFG.PORT_VID or VID=0. + * 2: Tag all frames, except when VID=0. + * 3: Tag all frames. + */ +enum lan9645x_vlan_port_tag { + LAN9645X_TAG_DISABLED = 0, + LAN9645X_TAG_NO_PVID_NO_UNAWARE = 1, + LAN9645X_TAG_NO_UNAWARE = 2, + LAN9645X_TAG_ALL = 3, +}; + +struct lan9645x { + struct device *dev; + struct dsa_switch *ds; + enum dsa_tag_protocol tag_proto; + struct regmap *rmap[NUM_TARGETS]; + + int shared_queue_sz; + + /* NPI chip_port */ + int npi; + + u8 num_phys_ports; + struct lan9645x_port **ports; + + /* debugfs */ + struct dentry *debugfs_root; + + int num_port_dis; + bool dd_dis; + bool tsn_dis; +}; + +struct lan9645x_port { + struct lan9645x *lan9645x; + const char *name; + + u16 pvid; + u16 untagged_vid; + u8 chip_port; + u8 stp_state; + bool vlan_aware; + bool learn_ena; + + phy_interface_t phy_mode; + + int speed; /* internal speed value LAN9645X_SPEED_* */ + u8 duplex; + struct list_head path_delays; + u32 rx_delay; +}; + +struct lan9645x_path_delay { + struct list_head list; + u32 rx_delay; + u32 tx_delay; + u32 speed; +}; + +extern const struct phylink_mac_ops lan9645x_phylink_mac_ops; + +/* PFC_CFG.FC_LINK_SPEED encoding */ +static inline int lan9645x_speed_fc_enc(int speed) +{ + switch (speed) { + case LAN9645X_SPEED_10: + return 3; + case LAN9645X_SPEED_100: + return 2; + case LAN9645X_SPEED_1000: + return 1; + case LAN9645X_SPEED_2500: + return 0; + default: + WARN_ON_ONCE(1); + return 1; + } +} + +/* Watermark encode. See QSYS:RES_CTRL[*]:RES_CFG.WM_HIGH for details. + * Returns lowest encoded number which will fit request/ is larger than request. + * Or the maximum representable value, if request is too large. + */ +static inline u32 lan9645x_wm_enc(u32 value) +{ +#define GWM_MULTIPLIER_BIT BIT(8) +#define LAN9645X_BUFFER_CELL_SZ 64 + value = DIV_ROUND_UP(value, LAN9645X_BUFFER_CELL_SZ); + + if (value >= GWM_MULTIPLIER_BIT) { + value = DIV_ROUND_UP(value, 16); + if (value >= GWM_MULTIPLIER_BIT) + value = (GWM_MULTIPLIER_BIT - 1); + value |= GWM_MULTIPLIER_BIT; + } + + return value; +} + +static inline struct lan9645x_port *lan9645x_to_port(struct lan9645x *lan9645x, + int port) +{ + if (WARN_ON(!(port >= 0 && port < lan9645x->num_phys_ports))) + return NULL; + + return lan9645x->ports[port]; +} + +static inline struct net_device *lan9645x_port_to_ndev(struct lan9645x_port *p) +{ + struct lan9645x *lan9645x = p->lan9645x; + struct dsa_port *dp; + + dp = dsa_to_port(lan9645x->ds, p->chip_port); + if (dp && dp->type == DSA_PORT_TYPE_USER) + return dp->user; + + return NULL; +} + +static inline struct net_device * +lan9645x_chipport_to_ndev(struct lan9645x *lan9645x, int port) +{ + return lan9645x_port_to_ndev(lan9645x_to_port(lan9645x, port)); +} + +static inline bool lan9645x_port_is_used(struct lan9645x *lan9645x, int port) +{ + struct dsa_port *dp; + + dp = dsa_to_port(lan9645x->ds, port); + if (!dp) + return false; + + return dp->type != DSA_PORT_TYPE_UNUSED; +} + +static inline struct regmap *lan_tgt2rmap(struct lan9645x *lan9645x, + enum lan9645x_target t, int tinst) +{ + WARN_ON_ONCE(!lan9645x->rmap[t + tinst]); + return lan9645x->rmap[t + tinst]; +} + +static inline u32 __lan_rel_addr(int gbase, int ginst, int gcnt, + int gwidth, int raddr, int rinst, + int rcnt, int rwidth) +{ + WARN_ON(ginst >= gcnt); + WARN_ON(rinst >= rcnt); + return gbase + ginst * gwidth + raddr + rinst * rwidth; +} + +/* Get register address relative to target instance */ +static inline u32 lan_rel_addr(enum lan9645x_target t, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + WARN_ON(tinst >= tcnt); + return __lan_rel_addr(gbase, ginst, gcnt, gwidth, raddr, rinst, + rcnt, rwidth); +} + +static inline u32 lan_rd(struct lan9645x *lan9645x, enum lan9645x_target t, + int tinst, int tcnt, int gbase, int ginst, + int gcnt, int gwidth, int raddr, int rinst, + int rcnt, int rwidth) +{ + u32 addr, val = 0; + + addr = lan_rel_addr(t, tinst, tcnt, gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); + + WARN_ON_ONCE(regmap_read(lan_tgt2rmap(lan9645x, t, tinst), addr, &val)); + + return val; +} + +static inline int lan_bulk_rd(void *val, size_t val_count, + struct lan9645x *lan9645x, + enum lan9645x_target t, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 addr; + + addr = lan_rel_addr(t, tinst, tcnt, gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); + + return regmap_bulk_read(lan_tgt2rmap(lan9645x, t, tinst), addr, val, + val_count); +} + +static inline struct regmap *lan_rmap(struct lan9645x *lan9645x, + enum lan9645x_target t, int tinst, + int tcnt, int gbase, int ginst, + int gcnt, int gwidth, int raddr, + int rinst, int rcnt, int rwidth) +{ + return lan_tgt2rmap(lan9645x, t, tinst); +} + +static inline void lan_wr(u32 val, struct lan9645x *lan9645x, + enum lan9645x_target t, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 addr; + + addr = lan_rel_addr(t, tinst, tcnt, gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); + + WARN_ON_ONCE(regmap_write(lan_tgt2rmap(lan9645x, t, tinst), addr, val)); +} + +static inline void lan_rmw(u32 val, u32 mask, struct lan9645x *lan9645x, + enum lan9645x_target t, int tinst, int tcnt, + int gbase, int ginst, int gcnt, int gwidth, + int raddr, int rinst, int rcnt, int rwidth) +{ + u32 addr; + + addr = lan_rel_addr(t, tinst, tcnt, gbase, ginst, gcnt, gwidth, + raddr, rinst, rcnt, rwidth); + + WARN_ON_ONCE(regmap_update_bits(lan_tgt2rmap(lan9645x, t, tinst), + addr, mask, val)); +} + +/* lan9645x_npi.c */ +void lan9645x_npi_port_init(struct lan9645x *lan9645x, + struct dsa_port *cpu_port); +void lan9645x_npi_port_deinit(struct lan9645x *lan9645x, int port); + +/* lan9645x_port.c */ +int lan9645x_port_init(struct lan9645x *lan9645x, int port); +void lan9645x_port_cpu_init(struct lan9645x *lan9645x); +void lan9645x_port_set_tail_drop_wm(struct lan9645x *lan9645x); +int lan9645x_port_set_maxlen(struct lan9645x *lan9645x, int port, size_t sdu); +int lan9645x_port_parse_ports_node(struct lan9645x *lan9645x); + +/* lan9645x_phylink.c */ +void lan9645x_phylink_get_caps(struct lan9645x *lan9645x, int port, + struct phylink_config *c); +void lan9645x_phylink_port_down(struct lan9645x *lan9645x, int port); + +#endif /* __LAN9645X_MAIN_H__ */ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_npi.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_npi.c new file mode 100644 index 000000000000..0ae8b9acb916 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_npi.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ +#include + +#include "lan9645x_main.h" + +static void disable_conduit_ipv6(struct lan9645x *lan9645x, + struct net_device *conduit) +{ + struct inet6_dev *dev_v6; + + if (!conduit) + return; + + /* IPv6 on the conduit will generate frames bypassing our tag driver, so + * they lack an IFH. This will be garbage in garbage out and we want to + * avoid this. + */ + rtnl_lock(); + dev_v6 = __in6_dev_get(conduit); + if (dev_v6) { + WRITE_ONCE(dev_v6->cnf.disable_ipv6, 1); + dev_warn(lan9645x->dev, "Disabled IPv6 on conduit device: %s\n", + netdev_name(conduit)); + } + rtnl_unlock(); +} + +void lan9645x_npi_port_init(struct lan9645x *lan9645x, + struct dsa_port *cpu_port) +{ + int port = cpu_port->index; + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + lan9645x->npi = port; + + dev_dbg(lan9645x->dev, "NPI port=%d\n", port); + + /* Enabling IPv6 on the conduit will send frames directly on the + * interface, without being intercepted by our tag driver. This causes a + * GIGO situation. + */ + disable_conduit_ipv6(lan9645x, cpu_port->conduit); + + /* Any CPU extraction queue frames, are sent to external CPU on given + * port. Never send injected frames back to cpu. + */ + lan_wr(QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK | + QSYS_EXT_CPU_CFG_EXT_CPU_PORT_SET(p->chip_port) | + QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA_SET(1) | + QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA_SET(1), + lan9645x, QSYS_EXT_CPU_CFG); + + /* Configure IFH prefix mode for NPI port. */ + lan_rmw(SYS_PORT_MODE_INCL_XTR_HDR_SET(LAN9645X_TAG_PREFIX_LONG) | + SYS_PORT_MODE_INCL_INJ_HDR_SET(LAN9645X_TAG_PREFIX_NONE), + SYS_PORT_MODE_INCL_XTR_HDR | + SYS_PORT_MODE_INCL_INJ_HDR, + lan9645x, + SYS_PORT_MODE(p->chip_port)); + + /* Rewriting and extraction with IFH does not play nice together. A VLAN + * tag pushed into the frame by REW will cause 4 bytes at the end of the + * extraction header to be overwritten with the top 4 bytes of the DMAC. + * + * We can not use REW_PORT_CFG_NO_REWRITE=1 as that disabled RTAGD + * setting in the IFH + */ + lan_rmw(REW_TAG_CFG_TAG_CFG_SET(LAN9645X_TAG_DISABLED), + REW_TAG_CFG_TAG_CFG, lan9645x, REW_TAG_CFG(port)); + + /* Make sure frames with src_port=CPU_PORT are not reflected back via + * the NPI port. This could happen if a frame is flooded for instance. + * The *_CPU_KILL_ENA flags above only have an effect when a frame is + * output due to a CPU forwarding decision such as trapping or cpu copy. + */ + lan_rmw(0, BIT(port), lan9645x, ANA_PGID(PGID_SRC + CPU_PORT)); +} + +void lan9645x_npi_port_deinit(struct lan9645x *lan9645x, int port) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + + lan9645x->npi = -1; + + lan_wr(QSYS_EXT_CPU_CFG_EXT_CPU_PORT_SET(0x1f) | + QSYS_EXT_CPU_CFG_EXT_CPU_KILL_ENA_SET(1) | + QSYS_EXT_CPU_CFG_INT_CPU_KILL_ENA_SET(1), + lan9645x, QSYS_EXT_CPU_CFG); + + lan_rmw(SYS_PORT_MODE_INCL_XTR_HDR_SET(LAN9645X_TAG_PREFIX_DISABLED) | + SYS_PORT_MODE_INCL_INJ_HDR_SET(LAN9645X_TAG_PREFIX_DISABLED), + SYS_PORT_MODE_INCL_XTR_HDR | + SYS_PORT_MODE_INCL_INJ_HDR, + lan9645x, + SYS_PORT_MODE(p->chip_port)); +} diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_phylink.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_phylink.c new file mode 100644 index 000000000000..3acc48f12fae --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_phylink.c @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include +#include + +#include "lan9645x_main.h" + +void lan9645x_phylink_get_caps(struct lan9645x *lan9645x, int port, + struct phylink_config *c) +{ + c->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | + MAC_100 | MAC_1000FD | MAC_2500FD; + + switch (port) { + case 0 ... 3: + __set_bit(PHY_INTERFACE_MODE_GMII, c->supported_interfaces); + break; + case 4: + __set_bit(PHY_INTERFACE_MODE_GMII, c->supported_interfaces); + phy_interface_set_rgmii(c->supported_interfaces); + break; + case 5 ... 6: + /* SerDes ports: QSGMII/SGMII/1000BASEX/2500BASEX modes + * require PCS support which is not yet implemented. + */ + break; + case 7 ... 8: + /* QSGMII mode on ports 7-8 requires SerDes PCS support, + * which is not yet implemented. + */ + phy_interface_set_rgmii(c->supported_interfaces); + break; + default: + break; + } +} + +static int lan9645x_rgmii_setup(struct lan9645x *lan9645x, int port, int speed, + phy_interface_t mode) +{ + bool tx_delay = false, rx_delay = false; + u32 rx_idx, tx_idx; + u8 tx_clk; + int idx; + + /* Port 4 or 7 is RGMII_0 and port 8 is RGMII_1 */ + idx = port == 8 ? 1 : 0; + + if (!phy_interface_mode_is_rgmii(mode)) + return 0; + + tx_clk = speed == SPEED_1000 ? 1 : + speed == SPEED_100 ? 2 : + speed == SPEED_10 ? 3 : 0; + + lan_rmw(HSIO_RGMII_CFG_RGMII_RX_RST_SET(0) | + HSIO_RGMII_CFG_RGMII_TX_RST_SET(0) | + HSIO_RGMII_CFG_TX_CLK_CFG_SET(tx_clk), + HSIO_RGMII_CFG_RGMII_RX_RST | + HSIO_RGMII_CFG_RGMII_TX_RST | + HSIO_RGMII_CFG_TX_CLK_CFG, + lan9645x, HSIO_RGMII_CFG(idx)); + + /* We configure delays on the MAC side. When the PHY is not responsible + * for delays, the MAC is, which is why RGMII_TXID results in + * rx_delay=true. + */ + if (mode == PHY_INTERFACE_MODE_RGMII || + mode == PHY_INTERFACE_MODE_RGMII_TXID) + rx_delay = true; + + if (mode == PHY_INTERFACE_MODE_RGMII || + mode == PHY_INTERFACE_MODE_RGMII_RXID) + tx_delay = true; + + /* Setup DLL configuration. Register layout: + * 0: RGMII_0_RX + * 1: RGMII_0_TX + * 2: RGMII_1_RX + * 3: RGMII_1_TX + * ... + * (N<<1) RGMII_N_RX, + * (N<<1)+1: RGMII_N_TX, + */ + rx_idx = idx << 1; + tx_idx = rx_idx + 1; + + /* Enable DLL in RGMII clock paths, deassert DLL reset, and start the + * delay tune FSM. + */ + lan_rmw(HSIO_DLL_CFG_DLL_CLK_ENA_SET(1) | + HSIO_DLL_CFG_DLL_RST_SET(0) | + HSIO_DLL_CFG_DLL_ENA_SET(rx_delay) | + HSIO_DLL_CFG_DELAY_ENA_SET(rx_delay), + HSIO_DLL_CFG_DLL_CLK_ENA | + HSIO_DLL_CFG_DLL_RST | + HSIO_DLL_CFG_DLL_ENA | + HSIO_DLL_CFG_DELAY_ENA, + lan9645x, HSIO_DLL_CFG(rx_idx)); + + lan_rmw(HSIO_DLL_CFG_DLL_CLK_ENA_SET(1) | + HSIO_DLL_CFG_DLL_RST_SET(0) | + HSIO_DLL_CFG_DLL_ENA_SET(tx_delay) | + HSIO_DLL_CFG_DELAY_ENA_SET(tx_delay), + HSIO_DLL_CFG_DLL_CLK_ENA | + HSIO_DLL_CFG_DLL_RST | + HSIO_DLL_CFG_DLL_ENA | + HSIO_DLL_CFG_DELAY_ENA, + lan9645x, HSIO_DLL_CFG(tx_idx)); + + return 0; +} + +static void lan9645x_phylink_mac_config(struct lan9645x *lan9645x, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + if (phy_interface_mode_is_rgmii(state->interface)) + lan9645x_rgmii_setup(lan9645x, port, state->speed, + state->interface); +} + +static int lan9645x_phylink_mac_prepare(struct lan9645x *lan9645x, int port, + unsigned int mode, + phy_interface_t iface) +{ + switch (port) { + case 0 ... 3: + lan_rmw(HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), + HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), lan9645x, + HSIO_HW_CFG); + break; + case 4: + lan_rmw(HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), + HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), lan9645x, + HSIO_HW_CFG); + + if (phy_interface_mode_is_rgmii(iface)) + lan_rmw(HSIO_HW_CFG_RGMII_0_CFG_SET(1), + HSIO_HW_CFG_RGMII_0_CFG, + lan9645x, HSIO_HW_CFG); + + break; + case 7 ... 8: + lan_rmw(HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), + HSIO_HW_CFG_GMII_ENA_SET(BIT(port)), lan9645x, + HSIO_HW_CFG); + break; + default: + /* Ports 5-6 are SerDes-only and need PCS support (not yet + * implemented). They are excluded from phylink_get_caps. + */ + return -EINVAL; + } + + return 0; +} + +static int lan9645x_port_is_cuphy(struct lan9645x *lan9645x, int port, + phy_interface_t interface) +{ + return port >= 0 && port <= 4 && interface == PHY_INTERFACE_MODE_GMII; +} + +static void lan9645x_phylink_mac_link_up(struct lan9645x *lan9645x, int port, + unsigned int link_an_mode, + phy_interface_t interface, + struct phy_device *phydev, int speed, + int duplex, bool tx_pause, + bool rx_pause) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + int rx_ifg1, rx_ifg2, tx_ifg, gtx_clk = 0; + struct lan9645x_path_delay *path_delay; + int gspeed = LAN9645X_SPEED_DISABLED; + int mode = 0; + int fc_spd; + + /* Configure speed for RGMII modules. */ + if (phy_interface_mode_is_rgmii(interface)) + lan9645x_rgmii_setup(lan9645x, port, speed, interface); + + if (duplex == DUPLEX_FULL) { + mode |= DEV_MAC_MODE_CFG_FDX_ENA_SET(1); + rx_ifg2 = DEV_MAC_IFG_CFG_RX_IFG2_SET(0x2); + tx_ifg = DEV_MAC_IFG_CFG_TX_IFG_SET(0x5); + + } else { + rx_ifg2 = DEV_MAC_IFG_CFG_RX_IFG2_SET(0x2); + tx_ifg = DEV_MAC_IFG_CFG_TX_IFG_SET(0x6); + } + + switch (speed) { + case SPEED_10: + rx_ifg1 = DEV_MAC_IFG_CFG_RX_IFG1_SET(0x2); + gspeed = LAN9645X_SPEED_10; + break; + case SPEED_100: + rx_ifg1 = DEV_MAC_IFG_CFG_RX_IFG1_SET(0x1); + gspeed = LAN9645X_SPEED_100; + break; + case SPEED_1000: + gspeed = LAN9645X_SPEED_1000; + mode |= DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + mode |= DEV_MAC_MODE_CFG_FDX_ENA_SET(1); + rx_ifg1 = DEV_MAC_IFG_CFG_RX_IFG1_SET(0x1); + rx_ifg2 = DEV_MAC_IFG_CFG_RX_IFG2_SET(0x2); + tx_ifg = DEV_MAC_IFG_CFG_TX_IFG_SET(0x6); + gtx_clk = 1; + break; + case SPEED_2500: + gspeed = LAN9645X_SPEED_2500; + mode |= DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + mode |= DEV_MAC_MODE_CFG_FDX_ENA_SET(1); + rx_ifg1 = DEV_MAC_IFG_CFG_RX_IFG1_SET(0x1); + rx_ifg2 = DEV_MAC_IFG_CFG_RX_IFG2_SET(0x2); + tx_ifg = DEV_MAC_IFG_CFG_TX_IFG_SET(0x6); + break; + default: + dev_err(lan9645x->dev, "Unsupported speed on port %d: %d\n", + p->chip_port, speed); + return; + } + + p->speed = gspeed; + p->duplex = duplex; + fc_spd = lan9645x_speed_fc_enc(p->speed); + + if (phy_interface_num_ports(interface) == 4 || + interface == PHY_INTERFACE_MODE_SGMII) + mode |= DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); + + lan_rmw(mode, + DEV_MAC_MODE_CFG_FDX_ENA | + DEV_MAC_MODE_CFG_GIGA_MODE_ENA, + lan9645x, DEV_MAC_MODE_CFG(p->chip_port)); + + lan_rmw(tx_ifg | rx_ifg1 | rx_ifg2, + DEV_MAC_IFG_CFG_TX_IFG | + DEV_MAC_IFG_CFG_RX_IFG1 | + DEV_MAC_IFG_CFG_RX_IFG2, + lan9645x, DEV_MAC_IFG_CFG(p->chip_port)); + + lan_rmw(DEV_MAC_HDX_CFG_SEED_SET(p->chip_port) | + DEV_MAC_HDX_CFG_SEED_LOAD_SET(1), + DEV_MAC_HDX_CFG_SEED | + DEV_MAC_HDX_CFG_SEED_LOAD, lan9645x, + DEV_MAC_HDX_CFG(p->chip_port)); + + if (lan9645x_port_is_cuphy(lan9645x, port, interface)) { + lan_rmw(CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA_SET(gtx_clk), + CHIP_TOP_CUPHY_PORT_CFG_GTX_CLK_ENA, lan9645x, + CHIP_TOP_CUPHY_PORT_CFG(p->chip_port)); + } + + lan_rmw(DEV_MAC_HDX_CFG_SEED_LOAD_SET(0), + DEV_MAC_HDX_CFG_SEED_LOAD, lan9645x, + DEV_MAC_HDX_CFG(p->chip_port)); + + /* Set PFC link speed and enable map */ + lan_rmw(ANA_PFC_CFG_FC_LINK_SPEED_SET(fc_spd) | + ANA_PFC_CFG_RX_PFC_ENA_SET(0), + ANA_PFC_CFG_FC_LINK_SPEED | + ANA_PFC_CFG_RX_PFC_ENA, + lan9645x, ANA_PFC_CFG(p->chip_port)); + + lan_rmw(DEV_PCS1G_CFG_PCS_ENA_SET(1), + DEV_PCS1G_CFG_PCS_ENA, lan9645x, + DEV_PCS1G_CFG(p->chip_port)); + + lan_rmw(DEV_PCS1G_SD_CFG_SD_ENA_SET(0), + DEV_PCS1G_SD_CFG_SD_ENA, + lan9645x, DEV_PCS1G_SD_CFG(p->chip_port)); + + lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(1), + SYS_PAUSE_CFG_PAUSE_ENA, + lan9645x, SYS_PAUSE_CFG(p->chip_port)); + + /* Set SMAC of Pause frame (00:00:00:00:00:00) */ + lan_wr(0, lan9645x, DEV_FC_MAC_LOW_CFG(p->chip_port)); + lan_wr(0, lan9645x, DEV_FC_MAC_HIGH_CFG(p->chip_port)); + + /* Flow control */ + lan_rmw(SYS_MAC_FC_CFG_FC_LINK_SPEED_SET(fc_spd) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG_SET(0x7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA_SET(1) | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG_SET(0xffff) | + SYS_MAC_FC_CFG_RX_FC_ENA_SET(rx_pause ? 1 : 0) | + SYS_MAC_FC_CFG_TX_FC_ENA_SET(tx_pause ? 1 : 0), + SYS_MAC_FC_CFG_FC_LINK_SPEED | + SYS_MAC_FC_CFG_FC_LATENCY_CFG | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG | + SYS_MAC_FC_CFG_RX_FC_ENA | + SYS_MAC_FC_CFG_TX_FC_ENA, + lan9645x, SYS_MAC_FC_CFG(p->chip_port)); + + list_for_each_entry(path_delay, &p->path_delays, list) { + if (path_delay->speed == speed) { + lan_wr(path_delay->rx_delay + p->rx_delay, + lan9645x, SYS_PTP_RXDLY_CFG(p->chip_port)); + lan_wr(path_delay->tx_delay, + lan9645x, SYS_PTP_TXDLY_CFG(p->chip_port)); + } + } + + /* Enable MAC module */ + lan_wr(DEV_MAC_ENA_CFG_RX_ENA_SET(1) | + DEV_MAC_ENA_CFG_TX_ENA_SET(1), + lan9645x, DEV_MAC_ENA_CFG(p->chip_port)); + + /* port _must_ be taken out of reset before MAC. */ + lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(0), + DEV_CLOCK_CFG_PORT_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); + + /* Take out the clock from reset. Note this write will set all these + * fields to zero: + * + * DEV_CLOCK_CFG[*].MAC_TX_RST + * DEV_CLOCK_CFG[*].MAC_RX_RST + * DEV_CLOCK_CFG[*].PCS_TX_RST + * DEV_CLOCK_CFG[*].PCS_RX_RST + * DEV_CLOCK_CFG[*].PORT_RST + * DEV_CLOCK_CFG[*].PHY_RST + * + * Note link_down will assert PORT_RST, MAC_RX_RST and MAC_TX_RST, so + * we are effectively taking the mac tx/rx clocks out of reset. + * + * This linkspeed field has a slightly different encoding from others: + * + * - 0 is no-link + * - 1 is both 2500/1000 + * - 2 is 100mbit + * - 3 is 10mbit + * + */ + lan_wr(DEV_CLOCK_CFG_LINK_SPEED_SET(fc_spd == 0 ? 1 : fc_spd), + lan9645x, + DEV_CLOCK_CFG(p->chip_port)); + + /* Core: Enable port for frame transfer */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1) | + QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE | + QSYS_SW_PORT_MODE_TX_PFC_ENA, + lan9645x, QSYS_SW_PORT_MODE(p->chip_port)); + + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(0) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(16), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan9645x, AFI_PORT_CFG(p->chip_port)); +} + +void lan9645x_phylink_port_down(struct lan9645x *lan9645x, int port) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + u32 val; + + /* 0.5: Disable any AFI */ + lan_rmw(AFI_PORT_CFG_FC_SKIP_TTI_INJ_SET(1) | + AFI_PORT_CFG_FRM_OUT_MAX_SET(0), + AFI_PORT_CFG_FC_SKIP_TTI_INJ | + AFI_PORT_CFG_FRM_OUT_MAX, + lan9645x, AFI_PORT_CFG(p->chip_port)); + + /* wait for reg afi_port_frm_out to become 0 for the port */ + if (lan9645x_rd_poll_slow(lan9645x, AFI_PORT_FRM_OUT(p->chip_port), + val, + !AFI_PORT_FRM_OUT_FRM_OUT_CNT_GET(val))) + dev_err(lan9645x->dev, "AFI timeout chip port %u", + p->chip_port); + + /* 2: Disable MAC frame reception */ + lan_rmw(DEV_MAC_ENA_CFG_RX_ENA_SET(0), + DEV_MAC_ENA_CFG_RX_ENA, + lan9645x, DEV_MAC_ENA_CFG(p->chip_port)); + + /* 1: Reset the PCS Rx clock domain */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(1), + DEV_CLOCK_CFG_PCS_RX_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); + + p->speed = LAN9645X_SPEED_DISABLED; + p->duplex = DUPLEX_UNKNOWN; + + /* 3: Disable traffic being sent to or from switch port */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), + QSYS_SW_PORT_MODE_PORT_ENA, + lan9645x, QSYS_SW_PORT_MODE(p->chip_port)); + + /* 4: Disable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(1), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan9645x, QSYS_PORT_MODE(p->chip_port)); + + /* 5: Disable Flowcontrol */ + lan_rmw(SYS_PAUSE_CFG_PAUSE_ENA_SET(0), + SYS_PAUSE_CFG_PAUSE_ENA, + lan9645x, SYS_PAUSE_CFG(p->chip_port)); + + /* 5.1: Disable PFC */ + lan_rmw(QSYS_SW_PORT_MODE_TX_PFC_ENA_SET(0), + QSYS_SW_PORT_MODE_TX_PFC_ENA, + lan9645x, QSYS_SW_PORT_MODE(p->chip_port)); + + /* 6: Wait a worst case time 8ms (10K jumbo/10Mbit) */ + usleep_range(8 * USEC_PER_MSEC, 9 * USEC_PER_MSEC); + + /* 7: Disable HDX backpressure. */ + lan_rmw(SYS_FRONT_PORT_MODE_HDX_MODE_SET(0), + SYS_FRONT_PORT_MODE_HDX_MODE, + lan9645x, SYS_FRONT_PORT_MODE(p->chip_port)); + + /* 8: Flush the queues associated with the port */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3), + QSYS_SW_PORT_MODE_AGING_MODE, + lan9645x, QSYS_SW_PORT_MODE(p->chip_port)); + + /* 9: Enable dequeuing from the egress queues */ + lan_rmw(QSYS_PORT_MODE_DEQUEUE_DIS_SET(0), + QSYS_PORT_MODE_DEQUEUE_DIS, + lan9645x, QSYS_PORT_MODE(p->chip_port)); + + /* 10: Wait until flushing is complete */ + if (lan9645x_rd_poll_slow(lan9645x, QSYS_SW_STATUS(p->chip_port), + val, !QSYS_SW_STATUS_EQ_AVAIL_GET(val))) + dev_err(lan9645x->dev, "Flush timeout chip port %u", port); + + /* 11: Disable MAC tx */ + lan_rmw(DEV_MAC_ENA_CFG_TX_ENA_SET(0), + DEV_MAC_ENA_CFG_TX_ENA, + lan9645x, DEV_MAC_ENA_CFG(p->chip_port)); + + /* 12: Reset the Port and MAC clock domains */ + lan_rmw(DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_PORT_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); + + /* Wait before resetting MAC clock domains. */ + usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC); + + lan_rmw(DEV_CLOCK_CFG_MAC_TX_RST_SET(1) | + DEV_CLOCK_CFG_MAC_RX_RST_SET(1) | + DEV_CLOCK_CFG_PORT_RST_SET(1), + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST | + DEV_CLOCK_CFG_PORT_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); + + /* 13: Clear flushing */ + lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(1), + QSYS_SW_PORT_MODE_AGING_MODE, + lan9645x, QSYS_SW_PORT_MODE(p->chip_port)); +} + +static void lan9645x_phylink_mac_link_down(struct lan9645x *lan9645x, int port, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + + lan9645x_phylink_port_down(lan9645x, port); + + /* 14: Take PCS out of reset */ + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); +} + +static struct lan9645x_port * +lan9645x_phylink_config_to_port(struct phylink_config *config) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + + return lan9645x_to_port(dp->ds->priv, dp->index); +} + +static void +lan9645x_port_phylink_mac_config(struct phylink_config *config, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct lan9645x_port *p = lan9645x_phylink_config_to_port(config); + + lan9645x_phylink_mac_config(p->lan9645x, p->chip_port, mode, state); +} + +static void lan9645x_port_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct lan9645x_port *p = lan9645x_phylink_config_to_port(config); + + lan9645x_phylink_mac_link_up(p->lan9645x, p->chip_port, link_an_mode, + interface, phydev, speed, duplex, tx_pause, + rx_pause); +} + +static void lan9645x_port_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct lan9645x_port *p = lan9645x_phylink_config_to_port(config); + + lan9645x_phylink_mac_link_down(p->lan9645x, p->chip_port, link_an_mode, + interface); +} + +static int lan9645x_port_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t iface) +{ + struct lan9645x_port *p = lan9645x_phylink_config_to_port(config); + + return lan9645x_phylink_mac_prepare(p->lan9645x, p->chip_port, mode, + iface); +} + +const struct phylink_mac_ops lan9645x_phylink_mac_ops = { + .mac_config = lan9645x_port_phylink_mac_config, + .mac_link_up = lan9645x_port_phylink_mac_link_up, + .mac_link_down = lan9645x_port_phylink_mac_link_down, + .mac_prepare = lan9645x_port_phylink_mac_prepare, +}; diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c new file mode 100644 index 000000000000..038868ae0a32 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include "lan9645x_main.h" + +int lan9645x_port_init(struct lan9645x *lan9645x, int port) +{ + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + + /* Disable learning on port */ + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(0), + ANA_PORT_CFG_LEARN_ENA, + lan9645x, ANA_PORT_CFG(p->chip_port)); + + lan9645x_port_set_maxlen(lan9645x, port, ETH_DATA_LEN); + + lan9645x_phylink_port_down(lan9645x, port); + + if (phy_interface_num_ports(p->phy_mode) == 4) + lan_rmw(DEV_CLOCK_CFG_PCS_RX_RST_SET(0) | + DEV_CLOCK_CFG_PCS_TX_RST_SET(0), + DEV_CLOCK_CFG_PCS_RX_RST | + DEV_CLOCK_CFG_PCS_TX_RST, + lan9645x, DEV_CLOCK_CFG(p->chip_port)); + + /* Drop frames with multicast source address */ + lan_rmw(ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1), + ANA_DROP_CFG_DROP_MC_SMAC_ENA, lan9645x, + ANA_DROP_CFG(p->chip_port)); + + /* Enable receiving frames on the port, and activate auto-learning of + * MAC addresses. + */ + lan_rmw(ANA_PORT_CFG_LEARNAUTO_SET(1) | + ANA_PORT_CFG_RECV_ENA_SET(1) | + ANA_PORT_CFG_PORTID_VAL_SET(p->chip_port), + ANA_PORT_CFG_LEARNAUTO | + ANA_PORT_CFG_RECV_ENA | + ANA_PORT_CFG_PORTID_VAL, + lan9645x, ANA_PORT_CFG(p->chip_port)); + + return 0; +} + +void lan9645x_port_cpu_init(struct lan9645x *lan9645x) +{ + /* Map the 8 CPU extraction queues to CPU port 9 (datasheet is wrong) */ + lan_wr(0, lan9645x, QSYS_CPU_GROUP_MAP); + + /* Configure second cpu port (chip_port 10) for manual frame injection. + * The AFI can not inject frames via the NPI port, unless frame aging is + * disabled on frontports, so we use manual injection for AFI frames. + */ + + /* Set min-spacing of EOF to SOF on injected frames to 0, on cpu device + * 1. This is required when injecting with IFH. + * Default values emulates delay of std preamble/IFG setting on a front + * port. + */ + lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0), + QS_INJ_CTRL_GAP_SIZE, + lan9645x, QS_INJ_CTRL(1)); + + /* Injection: Mode: manual injection | Byte_swap */ + lan_wr(QS_INJ_GRP_CFG_MODE_SET(1) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(1), + lan9645x, QS_INJ_GRP_CFG(1)); + + lan_rmw(QS_INJ_CTRL_GAP_SIZE_SET(0), + QS_INJ_CTRL_GAP_SIZE, + lan9645x, QS_INJ_CTRL(1)); + + lan_wr(SYS_PORT_MODE_INCL_INJ_HDR_SET(1), + lan9645x, SYS_PORT_MODE(CPU_PORT + 1)); + + /* The CPU will only use its reserved buffer in the shared queue system + * and none of the shared buffer space, therefore we disable resource + * sharing in egress direction. We must not disable resource sharing in + * the ingress direction, because some traffic test scenarios require + * loads of buffer memory for frames initiated by the CPU. + */ + lan_rmw(QSYS_EGR_NO_SHARING_EGR_NO_SHARING_SET(BIT(CPU_PORT)), + QSYS_EGR_NO_SHARING_EGR_NO_SHARING_SET(BIT(CPU_PORT)), + lan9645x, QSYS_EGR_NO_SHARING); + + /* The CPU should also discard frames forwarded to it if it has run + * out of the reserved buffer space. Otherwise they will be held back + * in the ingress queues with potential head-of-line blocking effects. + */ + lan_rmw(QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE_SET(BIT(CPU_PORT)), + QSYS_EGR_DROP_MODE_EGRESS_DROP_MODE_SET(BIT(CPU_PORT)), + lan9645x, QSYS_EGR_DROP_MODE); + + lan_wr(BIT(CPU_PORT), lan9645x, ANA_PGID(PGID_CPU)); + + lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(CPU_PORT) | + ANA_PORT_CFG_RECV_ENA_SET(1), + ANA_PORT_CFG_PORTID_VAL | + ANA_PORT_CFG_RECV_ENA, lan9645x, + ANA_PORT_CFG(CPU_PORT)); + + /* Enable switching to/from cpu port. Keep default aging-mode. */ + lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1) | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG_SET(1) | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE_SET(1), + QSYS_SW_PORT_MODE_PORT_ENA | + QSYS_SW_PORT_MODE_SCH_NEXT_CFG | + QSYS_SW_PORT_MODE_INGRESS_DROP_MODE, + lan9645x, QSYS_SW_PORT_MODE(CPU_PORT)); + + /* Transmit cpu frames as received without any tagging, timing or other + * updates. This does not affect CPU-over-NPI, only manual extraction. + * On the NPI port we need NO_REWRITE=0 for HSR/PRP. + */ + lan_wr(REW_PORT_CFG_NO_REWRITE_SET(1), + lan9645x, REW_PORT_CFG(CPU_PORT)); +} + +void lan9645x_port_set_tail_drop_wm(struct lan9645x *lan9645x) +{ + int shared_per_port; + int port; + + /* Configure tail dropping watermark */ + shared_per_port = + lan9645x->shared_queue_sz / (lan9645x->num_phys_ports + 1); + + /* The total memory size is diveded by number of front ports plus CPU + * port. + */ + lan9645x_for_each_chipport(lan9645x, port) + lan_wr(lan9645x_wm_enc(shared_per_port), lan9645x, + SYS_ATOP(port)); + + /* Tail dropping active based only on per port ATOP wm */ + lan_wr(lan9645x_wm_enc(lan9645x->shared_queue_sz), lan9645x, + SYS_ATOP_TOT_CFG); +} + +int lan9645x_port_set_maxlen(struct lan9645x *lan9645x, int port, size_t sdu) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + + int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; + + if (port == lan9645x->npi) { + maxlen += LAN9645X_IFH_LEN; + maxlen += LAN9645X_LONG_PREFIX_LEN; + } + + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(maxlen), lan9645x, + DEV_MAC_MAXLEN_CFG(p->chip_port)); + + /* Set Pause WM hysteresis */ + lan_rmw(SYS_PAUSE_CFG_PAUSE_STOP_SET(lan9645x_wm_enc(4 * maxlen)) | + SYS_PAUSE_CFG_PAUSE_START_SET(lan9645x_wm_enc(6 * maxlen)), + SYS_PAUSE_CFG_PAUSE_START | + SYS_PAUSE_CFG_PAUSE_STOP, + lan9645x, + SYS_PAUSE_CFG(p->chip_port)); + + return 0; +} + +static int lan9645x_port_setup_leds(struct lan9645x *lan9645x, + struct fwnode_handle *portnp, int port) +{ + u32 drive_mode; + int err; + + err = fwnode_property_read_u32(portnp, "microchip,led-drive-mode", + &drive_mode); + if (err) + return err; + + lan_rmw(CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE_SET(drive_mode), + CHIP_TOP_CUPHY_LED_CFG_LED_DRIVE_MODE, lan9645x, + CHIP_TOP_CUPHY_LED_CFG(port)); + + return 0; +} + +static int lan9645x_port_parse_delays(struct lan9645x_port *port, + struct fwnode_handle *portnp) +{ + struct fwnode_handle *delay; + int err; + + INIT_LIST_HEAD(&port->path_delays); + + fwnode_for_each_available_child_node(portnp, delay) { + struct lan9645x_path_delay *path_delay; + s32 tx_delay; + s32 rx_delay; + u32 speed; + + err = fwnode_property_read_u32(delay, "speed", &speed); + if (err) + return err; + + err = fwnode_property_read_u32(delay, "rx_delay", &rx_delay); + if (err) + return err; + + err = fwnode_property_read_u32(delay, "tx_delay", &tx_delay); + if (err) + return err; + + path_delay = devm_kzalloc(port->lan9645x->dev, + sizeof(*path_delay), GFP_KERNEL); + if (!path_delay) + return -ENOMEM; + + path_delay->rx_delay = rx_delay; + path_delay->tx_delay = tx_delay; + path_delay->speed = speed; + list_add_tail(&path_delay->list, &port->path_delays); + } + + return 0; +} + +int lan9645x_port_parse_ports_node(struct lan9645x *lan9645x) +{ + struct fwnode_handle *ports, *portnp; + struct device *dev = lan9645x->dev; + int max_ports, num_ports = 0; + int err = 0; + + max_ports = NUM_PHYS_PORTS - lan9645x->num_port_dis; + + ports = device_get_named_child_node(dev, "ethernet-ports"); + if (!ports) + ports = device_get_named_child_node(dev, "ports"); + if (!ports) { + dev_err(dev, "no ethernet-ports or ports child found\n"); + return -ENODEV; + } + + fwnode_for_each_available_child_node(ports, portnp) { + int phy_mode; + u32 p; + + num_ports++; + + if (num_ports > max_ports) { + dev_err(dev, + "Too many ports in device tree. Max ports supported by SKU: %d\n", + max_ports); + err = -ENODEV; + goto err_free_ports; + } + + if (fwnode_property_read_u32(portnp, "reg", &p)) { + dev_err(dev, "Port number not defined in device tree (property \"reg\")\n"); + err = -ENODEV; + fwnode_handle_put(portnp); + goto err_free_ports; + } + + if (p >= lan9645x->num_phys_ports) { + dev_err(dev, + "Port number in device tree is invalid %u (property \"reg\")\n", + p); + err = -ENODEV; + fwnode_handle_put(portnp); + goto err_free_ports; + } + + phy_mode = fwnode_get_phy_mode(portnp); + if (phy_mode < 0) { + dev_err(dev, "Failed to read phy-mode for port %u", p); + err = -ENODEV; + fwnode_handle_put(portnp); + goto err_free_ports; + } + + lan9645x->ports[p]->phy_mode = phy_mode; + lan9645x_port_parse_delays(lan9645x->ports[p], portnp); + lan9645x_port_setup_leds(lan9645x, portnp, p); + } + +err_free_ports: + fwnode_handle_put(ports); + return err; +} diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index ee046468652c..740f3c2e8199 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -62,5 +62,6 @@ source "drivers/net/ethernet/microchip/lan966x/Kconfig" source "drivers/net/ethernet/microchip/sparx5/Kconfig" source "drivers/net/ethernet/microchip/vcap/Kconfig" source "drivers/net/ethernet/microchip/fdma/Kconfig" +source "drivers/net/dsa/microchip/lan9645x/Kconfig" endif # NET_VENDOR_MICROCHIP -- 2.52.0 Add support for hardware offloading of the bridge. We support a single bridge device. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- drivers/net/dsa/microchip/lan9645x/lan9645x_main.c | 196 +++++++++++++++++++++ drivers/net/dsa/microchip/lan9645x/lan9645x_main.h | 11 ++ drivers/net/dsa/microchip/lan9645x/lan9645x_port.c | 2 + 3 files changed, 209 insertions(+) diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c index 739013f049d0..b6efaf669a3f 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c @@ -171,6 +171,8 @@ static int lan9645x_setup(struct dsa_switch *ds) return err; } + mutex_init(&lan9645x->fwd_domain_lock); + /* Link Aggregation Mode: NETDEV_LAG_HASH_L2 */ lan_wr(ANA_AGGR_CFG_AC_SMAC_ENA | ANA_AGGR_CFG_AC_DMAC_ENA, @@ -288,6 +290,192 @@ static void lan9645x_port_phylink_get_caps(struct dsa_switch *ds, int port, lan9645x_phylink_get_caps(ds->priv, port, config); } +static int lan9645x_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) +{ + u32 age_secs = max(1, msecs / 1000 / 2); + struct lan9645x *lan9645x = ds->priv; + + /* Entry is must suffer two aging scans before it is removed, so an + * entry is aged after 2*AGE_PERIOD, and the unit is in seconds. + * An age period of 0 disables automatic aging. + */ + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(age_secs), + ANA_AUTOAGE_AGE_PERIOD, + lan9645x, ANA_AUTOAGE); + return 0; +} + +static int lan9645x_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & + ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static void lan9645x_port_pgid_set(struct lan9645x *lan9645x, u16 pgid, + int chip_port, bool enabled) +{ + u32 reg_msk, port_msk; + + WARN_ON(chip_port > CPU_PORT); + + port_msk = ANA_PGID_PGID_SET(enabled ? BIT(chip_port) : 0); + reg_msk = ANA_PGID_PGID_SET(BIT(chip_port)); + + lan_rmw(port_msk, reg_msk, lan9645x, ANA_PGID(pgid)); +} + +static void lan9645x_port_set_learning(struct lan9645x *lan9645x, int port, + bool enabled) +{ + struct lan9645x_port *p; + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(enabled), ANA_PORT_CFG_LEARN_ENA, + lan9645x, ANA_PORT_CFG(port)); + + p = lan9645x_to_port(lan9645x, port); + p->learn_ena = enabled; +} + +static int lan9645x_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags f, + struct netlink_ext_ack *extack) +{ + struct lan9645x *l = ds->priv; + + if (WARN_ON(port == l->npi)) + return -EINVAL; + + if (f.mask & BR_LEARNING) + lan9645x_port_set_learning(l, port, !!(f.val & BR_LEARNING)); + + if (f.mask & BR_FLOOD) + lan9645x_port_pgid_set(l, PGID_UC, port, !!(f.val & BR_FLOOD)); + + if (f.mask & BR_MCAST_FLOOD) { + bool ena = !!(f.val & BR_MCAST_FLOOD); + + lan9645x_port_pgid_set(l, PGID_MC, port, ena); + lan9645x_port_pgid_set(l, PGID_MCIPV4, port, ena); + lan9645x_port_pgid_set(l, PGID_MCIPV6, port, ena); + } + + if (f.mask & BR_BCAST_FLOOD) + lan9645x_port_pgid_set(l, PGID_BC, port, + !!(f.val & BR_BCAST_FLOOD)); + + return 0; +} + +static void lan9645x_update_fwd_mask(struct lan9645x *lan9645x) +{ + struct lan9645x_port *p; + int port; + + lockdep_assert_held(&lan9645x->fwd_domain_lock); + + /* Updates the source port PGIDs, making sure frames from p + * are only forwarded to ports q != p, where q is relevant to forward + */ + lan9645x_for_each_chipport(lan9645x, port) { + u32 mask = 0; + + p = lan9645x_to_port(lan9645x, port); + + if (lan9645x_port_is_bridged(p)) { + mask = lan9645x->bridge_mask & + lan9645x->bridge_fwd_mask & ~BIT(p->chip_port); + } + + lan_wr(mask, lan9645x, ANA_PGID(PGID_SRC + port)); + } +} + +static int lan9645x_port_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge, + bool *tx_fwd_offload, + struct netlink_ext_ack *extack) +{ + struct lan9645x *lan9645x = ds->priv; + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + + if (lan9645x->bridge && lan9645x->bridge != bridge.dev) { + NL_SET_ERR_MSG_MOD(extack, "Only one bridge supported"); + return -EBUSY; + } + + mutex_lock(&lan9645x->fwd_domain_lock); + /* First bridged port sets bridge dev */ + if (!lan9645x->bridge_mask) + lan9645x->bridge = bridge.dev; + + lan9645x->bridge_mask |= BIT(p->chip_port); + mutex_unlock(&lan9645x->fwd_domain_lock); + + /* Later: stp_state_set updates forwarding */ + + return 0; +} + +static void lan9645x_port_stp_state_set(struct lan9645x *lan9645x, int port, + u8 state) +{ + struct lan9645x_port *p = lan9645x_to_port(lan9645x, port); + bool learn_ena; + + mutex_lock(&lan9645x->fwd_domain_lock); + + p->stp_state = state; + + if (state == BR_STATE_FORWARDING) + lan9645x->bridge_fwd_mask |= BIT(p->chip_port); + else + lan9645x->bridge_fwd_mask &= ~BIT(p->chip_port); + + learn_ena = (state == BR_STATE_LEARNING || + state == BR_STATE_FORWARDING) && p->learn_ena; + + lan_rmw(ANA_PORT_CFG_LEARN_ENA_SET(learn_ena), + ANA_PORT_CFG_LEARN_ENA, lan9645x, + ANA_PORT_CFG(p->chip_port)); + + lan9645x_update_fwd_mask(lan9645x); + mutex_unlock(&lan9645x->fwd_domain_lock); +} + +static void lan9645x_port_bridge_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + lan9645x_port_stp_state_set(ds->priv, port, state); +} + +static void lan9645x_port_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge) +{ + struct lan9645x *lan9645x = ds->priv; + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + + mutex_lock(&lan9645x->fwd_domain_lock); + + lan9645x->bridge_mask &= ~BIT(p->chip_port); + + /* Last port leaving clears bridge dev */ + if (!lan9645x->bridge_mask) + lan9645x->bridge = NULL; + + lan9645x_update_fwd_mask(lan9645x); + + mutex_unlock(&lan9645x->fwd_domain_lock); +} + static const struct dsa_switch_ops lan9645x_switch_ops = { .get_tag_protocol = lan9645x_get_tag_protocol, .connect_tag_protocol = lan9645x_connect_tag_protocol, @@ -301,6 +489,14 @@ static const struct dsa_switch_ops lan9645x_switch_ops = { /* MTU */ .port_change_mtu = lan9645x_change_mtu, .port_max_mtu = lan9645x_get_max_mtu, + + /* Bridge integration */ + .set_ageing_time = lan9645x_set_ageing_time, + .port_pre_bridge_flags = lan9645x_port_pre_bridge_flags, + .port_bridge_flags = lan9645x_port_bridge_flags, + .port_bridge_join = lan9645x_port_bridge_join, + .port_bridge_leave = lan9645x_port_bridge_leave, + .port_stp_state_set = lan9645x_port_bridge_stp_state_set, }; static int lan9645x_request_target_regmaps(struct lan9645x *lan9645x) diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h index a51b637f28bf..bf110bdbc90c 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h @@ -175,6 +175,12 @@ struct lan9645x { /* debugfs */ struct dentry *debugfs_root; + /* Forwarding Database */ + struct net_device *bridge; /* Only support single bridge */ + u16 bridge_mask; /* Mask for bridged ports */ + u16 bridge_fwd_mask; /* Mask for forwarding bridged ports */ + struct mutex fwd_domain_lock; /* lock forwarding configuration */ + int num_port_dis; bool dd_dis; bool tsn_dis; @@ -273,6 +279,11 @@ lan9645x_chipport_to_ndev(struct lan9645x *lan9645x, int port) return lan9645x_port_to_ndev(lan9645x_to_port(lan9645x, port)); } +static inline bool lan9645x_port_is_bridged(struct lan9645x_port *p) +{ + return p && (p->lan9645x->bridge_mask & BIT(p->chip_port)); +} + static inline bool lan9645x_port_is_used(struct lan9645x *lan9645x, int port) { struct dsa_port *dp; diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c index 038868ae0a32..b60c64458957 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c @@ -15,6 +15,8 @@ int lan9645x_port_init(struct lan9645x *lan9645x, int port) ANA_PORT_CFG_LEARN_ENA, lan9645x, ANA_PORT_CFG(p->chip_port)); + p->learn_ena = false; + lan9645x_port_set_maxlen(lan9645x, port, ETH_DATA_LEN); lan9645x_phylink_port_down(lan9645x, port); -- 2.52.0 Add support for vlanaware bridge. We reserve vid 4095 for standalone mode, to implement fdb-isolation. An vlan-unaware bridge uses vid 0. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- drivers/net/dsa/microchip/lan9645x/Makefile | 1 + drivers/net/dsa/microchip/lan9645x/lan9645x_main.c | 63 ++++ drivers/net/dsa/microchip/lan9645x/lan9645x_main.h | 20 ++ drivers/net/dsa/microchip/lan9645x/lan9645x_port.c | 3 + drivers/net/dsa/microchip/lan9645x/lan9645x_vlan.c | 339 +++++++++++++++++++++ 5 files changed, 426 insertions(+) diff --git a/drivers/net/dsa/microchip/lan9645x/Makefile b/drivers/net/dsa/microchip/lan9645x/Makefile index eea1edc5c0e3..bb5eec14d225 100644 --- a/drivers/net/dsa/microchip/lan9645x/Makefile +++ b/drivers/net/dsa/microchip/lan9645x/Makefile @@ -5,3 +5,4 @@ mchp-lan9645x-objs := lan9645x_main.o \ lan9645x_npi.o \ lan9645x_port.o \ lan9645x_phylink.o \ + lan9645x_vlan.o \ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c index b6efaf669a3f..1c8f20452487 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c @@ -172,6 +172,7 @@ static int lan9645x_setup(struct dsa_switch *ds) } mutex_init(&lan9645x->fwd_domain_lock); + lan9645x_vlan_init(lan9645x); /* Link Aggregation Mode: NETDEV_LAG_HASH_L2 */ lan_wr(ANA_AGGR_CFG_AC_SMAC_ENA | @@ -471,11 +472,68 @@ static void lan9645x_port_bridge_leave(struct dsa_switch *ds, int port, if (!lan9645x->bridge_mask) lan9645x->bridge = NULL; + lan9645x_vlan_set_hostmode(p); lan9645x_update_fwd_mask(lan9645x); mutex_unlock(&lan9645x->fwd_domain_lock); } +static int lan9645x_port_vlan_filtering(struct dsa_switch *ds, int port, + bool enabled, + struct netlink_ext_ack *extack) +{ + struct lan9645x *lan9645x = ds->priv; + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + lan9645x_vlan_port_set_vlan_aware(p, enabled); + lan9645x_vlan_port_apply(p); + + return 0; +} + +static int lan9645x_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + struct lan9645x *lan9645x = ds->priv; + struct lan9645x_port *p; + bool pvid, untagged; + int err; + + p = lan9645x_to_port(lan9645x, port); + + pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + + err = lan9645x_port_vlan_prepare(p, vlan->vid, pvid, untagged, extack); + if (err) + return err; + + if (port == lan9645x->npi) + lan9645x_vlan_cpu_set_vlan(lan9645x, vlan->vid); + + lan9645x_vlan_port_add_vlan(p, vlan->vid, pvid, untagged); + + return 0; +} + +static int lan9645x_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct lan9645x *lan9645x = ds->priv; + struct lan9645x_port *p; + + p = lan9645x_to_port(lan9645x, port); + + if (port == lan9645x->npi) + lan9645x_vlan_cpu_clear_vlan(lan9645x, vlan->vid); + + lan9645x_vlan_port_del_vlan(p, vlan->vid); + + return 0; +} + static const struct dsa_switch_ops lan9645x_switch_ops = { .get_tag_protocol = lan9645x_get_tag_protocol, .connect_tag_protocol = lan9645x_connect_tag_protocol, @@ -497,6 +555,11 @@ static const struct dsa_switch_ops lan9645x_switch_ops = { .port_bridge_join = lan9645x_port_bridge_join, .port_bridge_leave = lan9645x_port_bridge_leave, .port_stp_state_set = lan9645x_port_bridge_stp_state_set, + + /* VLAN integration */ + .port_vlan_filtering = lan9645x_port_vlan_filtering, + .port_vlan_add = lan9645x_port_vlan_add, + .port_vlan_del = lan9645x_port_vlan_del, }; static int lan9645x_request_target_regmaps(struct lan9645x *lan9645x) diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h index bf110bdbc90c..f1471344a9e9 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h @@ -181,6 +181,11 @@ struct lan9645x { u16 bridge_fwd_mask; /* Mask for forwarding bridged ports */ struct mutex fwd_domain_lock; /* lock forwarding configuration */ + /* VLAN */ + u16 vlan_mask[VLAN_N_VID]; /* Port mask per vlan */ + u8 vlan_flags[VLAN_N_VID]; + DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID); /* CPU VLAN membership */ + int num_port_dis; bool dd_dis; bool tsn_dis; @@ -404,4 +409,19 @@ void lan9645x_phylink_get_caps(struct lan9645x *lan9645x, int port, struct phylink_config *c); void lan9645x_phylink_port_down(struct lan9645x *lan9645x, int port); +/* VLAN lan9645x_vlan.c */ +void lan9645x_vlan_init(struct lan9645x *lan9645x); +void lan9645x_vlan_port_set_vlan_aware(struct lan9645x_port *p, + bool vlan_aware); +u16 lan9645x_vlan_unaware_pvid(bool is_bridged); +void lan9645x_vlan_port_apply(struct lan9645x_port *p); +void lan9645x_vlan_port_add_vlan(struct lan9645x_port *p, u16 vid, bool pvid, + bool untagged); +void lan9645x_vlan_port_del_vlan(struct lan9645x_port *p, u16 vid); +void lan9645x_vlan_cpu_set_vlan(struct lan9645x *lan9645x, u16 vid); +void lan9645x_vlan_cpu_clear_vlan(struct lan9645x *lan9645x, u16 vid); +void lan9645x_vlan_set_hostmode(struct lan9645x_port *p); +int lan9645x_port_vlan_prepare(struct lan9645x_port *p, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack); + #endif /* __LAN9645X_MAIN_H__ */ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c index b60c64458957..d69018b015b5 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_port.c @@ -44,6 +44,9 @@ int lan9645x_port_init(struct lan9645x *lan9645x, int port) ANA_PORT_CFG_PORTID_VAL, lan9645x, ANA_PORT_CFG(p->chip_port)); + if (p->chip_port != lan9645x->npi) + lan9645x_vlan_set_hostmode(p); + return 0; } diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_vlan.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_vlan.c new file mode 100644 index 000000000000..1fc2a0ffe3b1 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_vlan.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include "lan9645x_main.h" + +#define VLANACCESS_CMD_IDLE 0 +#define VLANACCESS_CMD_READ 1 +#define VLANACCESS_CMD_WRITE 2 +#define VLANACCESS_CMD_INIT 3 + +int lan9645x_port_vlan_prepare(struct lan9645x_port *p, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack) +{ + struct lan9645x *lan9645x = p->lan9645x; + + if (p->chip_port == lan9645x->npi) + return 0; + + if (untagged && p->untagged_vid && p->untagged_vid != vid) { + NL_SET_ERR_MSG_MOD(extack, + "Only one egress-untagged VLAN allowed."); + return -EBUSY; + } + + if (vid > VLAN_MAX) { + NL_SET_ERR_MSG_MOD(extack, "VLAN range 4094-4095 reserved."); + return -EBUSY; + } + + return 0; +} + +static int lan9645x_vlan_wait_for_completion(struct lan9645x *lan9645x) +{ + u32 val; + + return lan9645x_rd_poll_timeout(lan9645x, ANA_VLANACCESS, val, + ANA_VLANACCESS_VLAN_TBL_CMD_GET(val) == + VLANACCESS_CMD_IDLE); +} + +static void lan9645x_vlan_set_mask(struct lan9645x *lan9645x, u16 vid) +{ + u16 mask = lan9645x->vlan_mask[vid]; + u8 f = lan9645x->vlan_flags[vid]; + bool cpu_dis; + u32 val; + + cpu_dis = !(mask & BIT(CPU_PORT)); + + /* Set flags and the VID to configure */ + val = ANA_VLANTIDX_VLAN_PGID_CPU_DIS_SET(cpu_dis) | + ANA_VLANTIDX_V_INDEX_SET(vid) | + ANA_VLANTIDX_VLAN_SEC_FWD_ENA_SET(!!(f & VLAN_S_FWD_ENA)) | + ANA_VLANTIDX_VLAN_FLOOD_DIS_SET(!!(f & VLAN_FLD_DIS)) | + ANA_VLANTIDX_VLAN_PRIV_VLAN_SET(!!(f & VLAN_PRV_VLAN)) | + ANA_VLANTIDX_VLAN_LEARN_DISABLED_SET(!!(f & VLAN_LRN_DIS)) | + ANA_VLANTIDX_VLAN_MIRROR_SET(!!(f & VLAN_MIR)) | + ANA_VLANTIDX_VLAN_SRC_CHK_SET(!!(f & VLAN_SRC_CHK)); + + lan_wr(val, lan9645x, ANA_VLANTIDX); + + /* Set the vlan port members mask, which enables ingress filtering */ + lan_wr(mask, lan9645x, ANA_VLAN_PORT_MASK); + + /* Issue a write command */ + lan_wr(VLANACCESS_CMD_WRITE, lan9645x, ANA_VLANACCESS); + + if (lan9645x_vlan_wait_for_completion(lan9645x)) + dev_err(lan9645x->dev, "Vlan set mask failed\n"); +} + +static void lan9645x_vlan_port_add_vlan_mask(struct lan9645x_port *p, u16 vid) +{ + struct lan9645x *lan9645x = p->lan9645x; + + lan9645x->vlan_mask[vid] |= BIT(p->chip_port); + lan9645x_vlan_set_mask(lan9645x, vid); +} + +static void lan9645x_vlan_port_del_vlan_mask(struct lan9645x_port *p, u16 vid) +{ + struct lan9645x *lan9645x = p->lan9645x; + + lan9645x->vlan_mask[vid] &= ~BIT(p->chip_port); + lan9645x_vlan_set_mask(lan9645x, vid); +} + +static void lan9645x_vlan_cpu_add_vlan_mask(struct lan9645x *lan9645x, u16 vid) +{ + lan9645x->vlan_mask[vid] |= BIT(CPU_PORT); + lan9645x_vlan_set_mask(lan9645x, vid); +} + +static void lan9645x_vlan_cpu_del_vlan_mask(struct lan9645x *lan9645x, u16 vid) +{ + lan9645x->vlan_mask[vid] &= ~BIT(CPU_PORT); + lan9645x_vlan_set_mask(lan9645x, vid); +} + +static bool lan9645x_vlan_port_any_vlan_mask(struct lan9645x *lan9645x, u16 vid) +{ + return !!(lan9645x->vlan_mask[vid] & ~BIT(CPU_PORT)); +} + +void lan9645x_vlan_cpu_set_vlan(struct lan9645x *lan9645x, u16 vid) +{ + __set_bit(vid, lan9645x->cpu_vlan_mask); +} + +void lan9645x_vlan_cpu_clear_vlan(struct lan9645x *lan9645x, u16 vid) +{ + __clear_bit(vid, lan9645x->cpu_vlan_mask); +} + +static bool lan9645x_vlan_cpu_member_cpu_vlan_mask(struct lan9645x *lan9645x, + u16 vid) +{ + return test_bit(vid, lan9645x->cpu_vlan_mask); +} + +u16 lan9645x_vlan_unaware_pvid(bool is_bridged) +{ + return is_bridged ? UNAWARE_PVID : HOST_PVID; +} + +static u16 lan9645x_vlan_port_get_pvid(struct lan9645x_port *port) +{ + bool is_bridged = lan9645x_port_is_bridged(port); + + if (is_bridged && port->vlan_aware) + return port->pvid; + else + return lan9645x_vlan_unaware_pvid(is_bridged); +} + +static void lan9645x_vlan_port_set_vid(struct lan9645x_port *p, u16 vid, + bool pvid, bool untagged) +{ + /* Egress vlan classification */ + if (untagged) + p->untagged_vid = vid; + + /* Default ingress vlan classification */ + if (pvid) + p->pvid = vid; +} + +static void lan9645x_vlan_port_remove_vid(struct lan9645x_port *p, u16 vid) +{ + if (p->pvid == vid) + p->pvid = 0; + + if (p->untagged_vid == vid) + p->untagged_vid = 0; +} + +void lan9645x_vlan_port_set_vlan_aware(struct lan9645x_port *p, + bool vlan_aware) +{ + p->vlan_aware = vlan_aware; +} + +void lan9645x_vlan_port_apply(struct lan9645x_port *p) +{ + struct lan9645x *lan9645x = p->lan9645x; + u32 val, tag_cfg; + u16 pvid; + + pvid = lan9645x_vlan_port_get_pvid(p); + + /* Ingress classification (ANA_PORT_VLAN_CFG) */ + /* Default vlan to classify for untagged frames (may be zero) */ + val = ANA_VLAN_CFG_VLAN_VID_SET(pvid); + if (p->vlan_aware) + val |= ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1); + + lan_rmw(val, + ANA_VLAN_CFG_VLAN_VID | + ANA_VLAN_CFG_VLAN_AWARE_ENA | + ANA_VLAN_CFG_VLAN_POP_CNT, + lan9645x, ANA_VLAN_CFG(p->chip_port)); + + lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(p->vlan_aware) | + DEV_MAC_TAGS_CFG_PB_ENA_SET(p->vlan_aware), + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_PB_ENA, + lan9645x, DEV_MAC_TAGS_CFG(p->chip_port)); + + /* Drop frames with multicast source address */ + val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); + if (p->vlan_aware && !pvid) + /* If port is vlan-aware and tagged, drop untagged and priority + * tagged frames. + */ + val |= ANA_DROP_CFG_DROP_UNTAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_S_TAGGED_ENA_SET(1) | + ANA_DROP_CFG_DROP_PRIO_C_TAGGED_ENA_SET(1); + + lan_wr(val, lan9645x, ANA_DROP_CFG(p->chip_port)); + + /* TAG_TPID_CFG encoding: + * + * 0: Use 0x8100. + * 1: Use 0x88A8. + * 2: Use custom value from PORT_VLAN_CFG.PORT_TPID. + * 3: Use PORT_VLAN_CFG.PORT_TPID, unless ingress tag was a C-tag + * (EtherType = 0x8100) + * + * Use 3 and PORT_VLAN_CFG.PORT_TPID=0x88a8 to ensure stags are not + * rewritten to ctags on egress. + */ + val = REW_TAG_CFG_TAG_TPID_CFG_SET(3); + if (p->vlan_aware) { + if (p->untagged_vid) + tag_cfg = LAN9645X_TAG_NO_PVID_NO_UNAWARE; + else + tag_cfg = LAN9645X_TAG_ALL; + + val |= REW_TAG_CFG_TAG_CFG_SET(tag_cfg); + } + + lan_rmw(val, + REW_TAG_CFG_TAG_TPID_CFG | + REW_TAG_CFG_TAG_CFG, + lan9645x, REW_TAG_CFG(p->chip_port)); + + /* Set default VLAN and tag type to 8021Q */ + + lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021AD) | + REW_PORT_VLAN_CFG_PORT_VID_SET(p->untagged_vid), + REW_PORT_VLAN_CFG_PORT_TPID | + REW_PORT_VLAN_CFG_PORT_VID, + lan9645x, REW_PORT_VLAN_CFG(p->chip_port)); +} + +void lan9645x_vlan_port_add_vlan(struct lan9645x_port *p, u16 vid, bool pvid, + bool untagged) +{ + struct lan9645x *lan9645x = p->lan9645x; + + if (lan9645x_vlan_cpu_member_cpu_vlan_mask(lan9645x, vid)) + lan9645x_vlan_cpu_add_vlan_mask(lan9645x, vid); + + lan9645x_vlan_port_set_vid(p, vid, pvid, untagged); + lan9645x_vlan_port_add_vlan_mask(p, vid); + lan9645x_vlan_port_apply(p); +} + +void lan9645x_vlan_port_del_vlan(struct lan9645x_port *port, u16 vid) +{ + struct lan9645x *lan9645x = port->lan9645x; + + lan9645x_vlan_port_remove_vid(port, vid); + lan9645x_vlan_port_del_vlan_mask(port, vid); + lan9645x_vlan_port_apply(port); + + if (!lan9645x_vlan_port_any_vlan_mask(lan9645x, vid)) + lan9645x_vlan_cpu_del_vlan_mask(lan9645x, vid); +} + +static void lan9645x_vlan_port_rew_host(struct lan9645x_port *port) +{ + struct lan9645x *lan9645x = port->lan9645x; + + lan_rmw(REW_TAG_CFG_TAG_CFG_SET(LAN9645X_TAG_NO_PVID_NO_UNAWARE) | + REW_TAG_CFG_TAG_TPID_CFG_SET(3), + REW_TAG_CFG_TAG_CFG | + REW_TAG_CFG_TAG_TPID_CFG, + lan9645x, REW_TAG_CFG(port->chip_port)); + + /* Standalone ports must have the reserved VID set in the rewriter, + * because the TAG_CFG used above acts on this PORT_VID value, otherwise + * untagged frames are tagged with HOST_PVID. + * + * Usually frames leaving the switch from standalone ports go to the + * CPU, where tagging is disabled. But if we mirror a standalone port, + * the problem becomes apparent. + */ + lan_rmw(REW_PORT_VLAN_CFG_PORT_TPID_SET(ETH_P_8021AD) | + REW_PORT_VLAN_CFG_PORT_VID_SET(HOST_PVID), + REW_PORT_VLAN_CFG_PORT_TPID | + REW_PORT_VLAN_CFG_PORT_VID, + lan9645x, REW_PORT_VLAN_CFG(port->chip_port)); +} + +void lan9645x_vlan_set_hostmode(struct lan9645x_port *p) +{ + lan9645x_vlan_port_set_vlan_aware(p, false); + lan9645x_vlan_port_set_vid(p, HOST_PVID, false, false); + lan9645x_vlan_port_apply(p); + lan9645x_vlan_port_rew_host(p); +} + +void lan9645x_vlan_init(struct lan9645x *lan9645x) +{ + u32 all_phys_ports, all_ports; + u16 port, vid; + + all_phys_ports = GENMASK(lan9645x->num_phys_ports - 1, 0); + all_ports = all_phys_ports | BIT(CPU_PORT); + + /* Clear VLAN table, by default all ports are members of all VLANS */ + lan_wr(ANA_VLANACCESS_VLAN_TBL_CMD_SET(VLANACCESS_CMD_INIT), + lan9645x, ANA_VLANACCESS); + + if (lan9645x_vlan_wait_for_completion(lan9645x)) + dev_err(lan9645x->dev, "Vlan clear table failed\n"); + + for (vid = 1; vid < VLAN_N_VID; vid++) { + lan9645x->vlan_mask[vid] = 0; + lan9645x_vlan_set_mask(lan9645x, vid); + } + + /* Set all the ports + cpu to be part of HOST_PVID and UNAWARE_PVID */ + lan9645x->vlan_mask[HOST_PVID] = all_ports; + lan9645x_vlan_set_mask(lan9645x, HOST_PVID); + + lan9645x->vlan_mask[UNAWARE_PVID] = all_ports; + lan9645x_vlan_set_mask(lan9645x, UNAWARE_PVID); + + lan9645x_vlan_cpu_set_vlan(lan9645x, UNAWARE_PVID); + + /* Configure the CPU port to be vlan aware */ + lan_wr(ANA_VLAN_CFG_VLAN_VID_SET(UNAWARE_PVID) | + ANA_VLAN_CFG_VLAN_AWARE_ENA_SET(1) | + ANA_VLAN_CFG_VLAN_POP_CNT_SET(1), + lan9645x, ANA_VLAN_CFG(CPU_PORT)); + + /* Set vlan ingress filter mask to all ports */ + lan_wr(all_ports, lan9645x, ANA_VLANMASK); + + for (port = 0; port < lan9645x->num_phys_ports; port++) { + lan_wr(0, lan9645x, REW_PORT_VLAN_CFG(port)); + lan_wr(0, lan9645x, REW_TAG_CFG(port)); + } +} -- 2.52.0 Add MAC table support, and dsa fdb callback integration. The mactable is keyed on (vid,mac) and each bucket has 4 slots. A mac table entry typically points to a PGID index, the first 9 of which represent a front port. Mac table entries for L2 multicast will use a PGID containing a group port mask. For IP multicast entries in the mac table a trick us used, where the group port mask is packed into the MAC data, exploiting the fact that the top bits are fixed, and that the number of switch ports is small enough to fit in the redundant bits. Therefore, we can avoid using sparse PGID resources for IP multicast entries in the mac table. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- drivers/net/dsa/microchip/lan9645x/Makefile | 1 + drivers/net/dsa/microchip/lan9645x/lan9645x_mac.c | 413 +++++++++++++++++++++ drivers/net/dsa/microchip/lan9645x/lan9645x_main.c | 110 +++++- drivers/net/dsa/microchip/lan9645x/lan9645x_main.h | 48 +++ 4 files changed, 571 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/microchip/lan9645x/Makefile b/drivers/net/dsa/microchip/lan9645x/Makefile index bb5eec14d225..a90a46f81c72 100644 --- a/drivers/net/dsa/microchip/lan9645x/Makefile +++ b/drivers/net/dsa/microchip/lan9645x/Makefile @@ -6,3 +6,4 @@ mchp-lan9645x-objs := lan9645x_main.o \ lan9645x_port.o \ lan9645x_phylink.o \ lan9645x_vlan.o \ + lan9645x_mac.o \ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_mac.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_mac.c new file mode 100644 index 000000000000..3226cff16e8c --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_mac.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include "lan9645x_main.h" + +#define LAN9645X_MAC_COLUMNS 4 + +#define CMD_IDLE 0 +#define CMD_LEARN 1 +#define CMD_FORGET 2 +#define CMD_AGE 3 +#define CMD_GET_NEXT 4 +#define CMD_INIT 5 +#define CMD_READ 6 +#define CMD_WRITE 7 +#define CMD_SYNC_GET_NEXT 8 + +#define LAN9645X_INVALID_ROW (-1) + +static bool lan9645x_mact_entry_equal(struct lan9645x_mact_entry *entry, + const unsigned char *mac, u16 vid) +{ + /* The hardware table is keyed on (vid,mac) */ + return entry->common.key.vid == vid && + ether_addr_equal(mac, entry->common.key.mac); +} + +static struct lan9645x_mact_entry * +lan9645x_mact_entry_find(struct lan9645x *lan9645x, const unsigned char *mac, + u16 vid) +{ + struct lan9645x_mact_entry *entry; + + lockdep_assert_held(&lan9645x->mac_entry_lock); + + list_for_each_entry(entry, &lan9645x->mac_entries, list) + if (lan9645x_mact_entry_equal(entry, mac, vid)) + return entry; + + return NULL; +} + +static struct lan9645x_mact_entry * +lan9645x_mact_entry_alloc(struct lan9645x *lan9645x, const unsigned char *mac, + u16 vid, u8 pgid, enum macaccess_entry_type type) +{ + struct lan9645x_mact_entry *entry; + + entry = kzalloc_obj(*entry); + if (!entry) + return NULL; + + INIT_LIST_HEAD(&entry->list); + ether_addr_copy(entry->common.key.mac, mac); + entry->common.key.vid = vid; + entry->common.pgid = pgid; + entry->common.row = LAN9645X_INVALID_ROW; + entry->common.type = type; + + dev_dbg(lan9645x->dev, + "mact_entry_alloc mac=%pM vid=%u pgid=%u type=%d", + entry->common.key.mac, entry->common.key.vid, + entry->common.pgid, entry->common.type); + return entry; +} + +static void lan9645x_mact_entry_dealloc(struct lan9645x *lan9645x, + struct lan9645x_mact_entry *entry) +{ + if (!entry) + return; + + dev_dbg(lan9645x->dev, + "mact_entry_dealloc mac=%pM vid=%u pgid=%u type=%d", + entry->common.key.mac, entry->common.key.vid, + entry->common.pgid, entry->common.type); + + list_del(&entry->list); + kfree(entry); +} + +static int lan9645x_mac_wait_for_completion(struct lan9645x *lan9645x, + u32 *maca) +{ + u32 val = 0; + int err; + + lockdep_assert_held(&lan9645x->mact_lock); + + err = lan9645x_rd_poll_timeout(lan9645x, ANA_MACACCESS, val, + ANA_MACACCESS_MAC_TABLE_CMD_GET(val) == + CMD_IDLE); + if (err) + return err; + + if (maca) + *maca = val; + + return 0; +} + +static void lan9645x_mact_parse(u32 machi, u32 maclo, u32 maca, + struct lan9645x_mact_common *rentry) +{ + u64 addr = ANA_MACHDATA_MACHDATA_GET(machi); + + addr = addr << 32 | maclo; + u64_to_ether_addr(addr, rentry->key.mac); + rentry->key.vid = ANA_MACHDATA_VID_GET(machi); + rentry->pgid = ANA_MACACCESS_DEST_IDX_GET(maca); + rentry->type = ANA_MACACCESS_ENTRYTYPE_GET(maca); +} + +static void lan9645x_mac_select(struct lan9645x *lan9645x, + const unsigned char *addr, u16 vid) +{ + u64 maddr = ether_addr_to_u64(addr); + + lockdep_assert_held(&lan9645x->mact_lock); + + lan_wr(ANA_MACHDATA_VID_SET(vid) | + ANA_MACHDATA_MACHDATA_SET(maddr >> 32), + lan9645x, + ANA_MACHDATA); + + lan_wr(maddr & GENMASK(31, 0), + lan9645x, + ANA_MACLDATA); +} + +static int __lan9645x_mact_forget(struct lan9645x *lan9645x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + lockdep_assert_held(&lan9645x->mact_lock); + + lan9645x_mac_select(lan9645x, mac, vid); + + lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(CMD_FORGET), + lan9645x, + ANA_MACACCESS); + + return lan9645x_mac_wait_for_completion(lan9645x, NULL); +} + +int lan9645x_mact_forget(struct lan9645x *lan9645x, + const unsigned char mac[ETH_ALEN], unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + mutex_lock(&lan9645x->mact_lock); + ret = __lan9645x_mact_forget(lan9645x, mac, vid, type); + mutex_unlock(&lan9645x->mact_lock); + + return ret; +} + +static bool lan9645x_mac_ports_use_cpu(const unsigned char *mac, + enum macaccess_entry_type type) +{ + u32 mc_ports; + + switch (type) { + case ENTRYTYPE_MACV4: + mc_ports = (mac[1] << 8) | mac[2]; + break; + case ENTRYTYPE_MACV6: + mc_ports = (mac[0] << 8) | mac[1]; + break; + default: + return false; + } + + return !!(mc_ports & BIT(CPU_PORT)); +} + +static int __lan9645x_mact_learn_cpu_copy(struct lan9645x *lan9645x, int port, + const unsigned char *addr, u16 vid, + enum macaccess_entry_type type, + bool cpu_copy) +{ + lockdep_assert_held(&lan9645x->mact_lock); + + lan9645x_mac_select(lan9645x, addr, vid); + + lan_wr(ANA_MACACCESS_VALID_SET(1) | + ANA_MACACCESS_DEST_IDX_SET(port) | + ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) | + ANA_MACACCESS_ENTRYTYPE_SET(type) | + ANA_MACACCESS_MAC_TABLE_CMD_SET(CMD_LEARN), + lan9645x, ANA_MACACCESS); + + return lan9645x_mac_wait_for_completion(lan9645x, NULL); +} + +static int __lan9645x_mact_learn(struct lan9645x *lan9645x, int port, + const unsigned char *addr, u16 vid, + enum macaccess_entry_type type) +{ + bool cpu_copy = lan9645x_mac_ports_use_cpu(addr, type); + + return __lan9645x_mact_learn_cpu_copy(lan9645x, port, addr, vid, type, + cpu_copy); +} + +int lan9645x_mact_learn(struct lan9645x *lan9645x, int port, + const unsigned char *addr, u16 vid, + enum macaccess_entry_type type) +{ + int ret; + + mutex_lock(&lan9645x->mact_lock); + ret = __lan9645x_mact_learn(lan9645x, port, addr, vid, type); + mutex_unlock(&lan9645x->mact_lock); + + return ret; +} + +int lan9645x_mact_flush(struct lan9645x *lan9645x, int port) +{ + int err = 0; + + mutex_lock(&lan9645x->mact_lock); + /* MAC table entries with dst index matching port are aged on scan. */ + lan_wr(ANA_ANAGEFIL_PID_EN_SET(1) | + ANA_ANAGEFIL_PID_VAL_SET(port), + lan9645x, ANA_ANAGEFIL); + + /* Flushing requires two scans. First sets AGE_FLAG=1, second removes + * entries with AGE_FLAG=1. + */ + lan_wr(ANA_MACACCESS_MAC_TABLE_CMD_SET(CMD_AGE), + lan9645x, + ANA_MACACCESS); + + err = lan9645x_mac_wait_for_completion(lan9645x, NULL); + if (err) + goto mact_unlock; + + lan_wr(ANA_MACACCESS_MAC_TABLE_CMD_SET(CMD_AGE), + lan9645x, + ANA_MACACCESS); + + err = lan9645x_mac_wait_for_completion(lan9645x, NULL); + +mact_unlock: + lan_wr(0, lan9645x, ANA_ANAGEFIL); + mutex_unlock(&lan9645x->mact_lock); + return err; +} + +int lan9645x_mact_entry_add(struct lan9645x *lan9645x, int pgid, + const unsigned char *mac, u16 vid) +{ + struct lan9645x_mact_entry *entry; + int ret = 0; + + /* Users can not move (vid,mac) to a different port, without removing + * the original entry first. But we overwrite entry in HW, and update + * software pgid for good measure. + */ + mutex_lock(&lan9645x->mac_entry_lock); + entry = lan9645x_mact_entry_find(lan9645x, mac, vid); + if (entry) { + entry->common.pgid = pgid; + mutex_unlock(&lan9645x->mac_entry_lock); + goto mac_learn; + } + + entry = lan9645x_mact_entry_alloc(lan9645x, mac, vid, pgid, + ENTRYTYPE_LOCKED); + if (!entry) { + mutex_unlock(&lan9645x->mac_entry_lock); + return -ENOMEM; + } + + list_add_tail(&entry->list, &lan9645x->mac_entries); + mutex_unlock(&lan9645x->mac_entry_lock); + +mac_learn: + WARN_ON(entry->common.pgid != pgid); + ret = lan9645x_mact_learn(lan9645x, pgid, mac, vid, ENTRYTYPE_LOCKED); + if (ret) { + mutex_lock(&lan9645x->mac_entry_lock); + lan9645x_mact_entry_dealloc(lan9645x, entry); + mutex_unlock(&lan9645x->mac_entry_lock); + } + return ret; +} + +int lan9645x_mact_entry_del(struct lan9645x *lan9645x, int pgid, + const unsigned char *mac, u16 vid) +{ + struct lan9645x_mact_entry *entry; + + mutex_lock(&lan9645x->mac_entry_lock); + entry = lan9645x_mact_entry_find(lan9645x, mac, vid); + if (entry) { + WARN_ON(entry->common.pgid != pgid); + lan9645x_mact_entry_dealloc(lan9645x, entry); + mutex_unlock(&lan9645x->mac_entry_lock); + goto forget; + } + mutex_unlock(&lan9645x->mac_entry_lock); + return -ENOENT; + +forget: + return lan9645x_mact_forget(lan9645x, mac, vid, ENTRYTYPE_LOCKED); +} + +void lan9645x_mac_init(struct lan9645x *lan9645x) +{ + mutex_init(&lan9645x->mac_entry_lock); + mutex_init(&lan9645x->mact_lock); + mutex_init(&lan9645x->fwd_domain_lock); + INIT_LIST_HEAD(&lan9645x->mac_entries); + + /* Clear the MAC table */ + mutex_lock(&lan9645x->mact_lock); + lan_wr(CMD_INIT, lan9645x, ANA_MACACCESS); + lan9645x_mac_wait_for_completion(lan9645x, NULL); + mutex_unlock(&lan9645x->mact_lock); +} + +void lan9645x_mac_deinit(struct lan9645x *lan9645x) +{ + mutex_destroy(&lan9645x->mac_entry_lock); + mutex_destroy(&lan9645x->mact_lock); + mutex_destroy(&lan9645x->fwd_domain_lock); +} + +int lan9645x_mact_dsa_dump(struct lan9645x *lan9645x, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct lan9645x_mact_entry entry = { 0 }; + u32 mach, macl, maca; + int err = 0; + u32 autoage; + + mach = 0; + macl = 0; + entry.common.type = ENTRYTYPE_NORMAL; + + mutex_lock(&lan9645x->mact_lock); + + /* The aging filter works both for aging scans and GET_NEXT table scans. + * With it, the HW table iteration only stops at entries matching our + * filter. Since DSA calls us for each port on a table dump, this helps + * avoid unnecessary work. + * + * Disable automatic aging temporarily. First save current state. + */ + autoage = lan_rd(lan9645x, ANA_AUTOAGE); + + /* Disable aging */ + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(0), + ANA_AUTOAGE_AGE_PERIOD, + lan9645x, ANA_AUTOAGE); + + /* Setup filter on our port */ + lan_wr(ANA_ANAGEFIL_PID_EN_SET(1) | + ANA_ANAGEFIL_PID_VAL_SET(port), + lan9645x, ANA_ANAGEFIL); + + lan_wr(0, lan9645x, ANA_MACHDATA); + lan_wr(0, lan9645x, ANA_MACLDATA); + + while (1) { + /* NOTE: we rely on mach, macl and type being set correctly in + * the registers from previous round, vis a vis the GET_NEXT + * semantics, so locking entire loop is important. + */ + lan_wr(ANA_MACACCESS_MAC_TABLE_CMD_SET(CMD_GET_NEXT) | + ANA_MACACCESS_ENTRYTYPE_SET(entry.common.type), + lan9645x, ANA_MACACCESS); + + if (lan9645x_mac_wait_for_completion(lan9645x, &maca)) + break; + + if (ANA_MACACCESS_VALID_GET(maca) == 0) + break; + + mach = lan_rd(lan9645x, ANA_MACHDATA); + macl = lan_rd(lan9645x, ANA_MACLDATA); + + lan9645x_mact_parse(mach, macl, maca, &entry.common); + + if (ANA_MACACCESS_DEST_IDX_GET(maca) == port && + entry.common.type == ENTRYTYPE_NORMAL) { + if (entry.common.key.vid > VLAN_MAX) + entry.common.key.vid = 0; + + err = cb(entry.common.key.mac, entry.common.key.vid, + false, data); + if (err) + break; + } + } + + /* Remove aging filters and restore aging */ + lan_wr(0, lan9645x, ANA_ANAGEFIL); + lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ANA_AUTOAGE_AGE_PERIOD_GET(autoage)), + ANA_AUTOAGE_AGE_PERIOD, + lan9645x, ANA_AUTOAGE); + + mutex_unlock(&lan9645x->mact_lock); + + return err; +} diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c index 1c8f20452487..ba76279b4414 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c @@ -78,6 +78,7 @@ static void lan9645x_teardown(struct dsa_switch *ds) debugfs_remove_recursive(lan9645x->debugfs_root); lan9645x_npi_port_deinit(lan9645x, lan9645x->npi); + lan9645x_mac_deinit(lan9645x); } static int lan9645x_change_mtu(struct dsa_switch *ds, int port, int new_mtu) @@ -171,8 +172,8 @@ static int lan9645x_setup(struct dsa_switch *ds) return err; } - mutex_init(&lan9645x->fwd_domain_lock); lan9645x_vlan_init(lan9645x); + lan9645x_mac_init(lan9645x); /* Link Aggregation Mode: NETDEV_LAG_HASH_L2 */ lan_wr(ANA_AGGR_CFG_AC_SMAC_ENA | @@ -534,6 +535,107 @@ static int lan9645x_port_vlan_del(struct dsa_switch *ds, int port, return 0; } +static void lan9645x_port_fast_age(struct dsa_switch *ds, int port) +{ + lan9645x_mact_flush(ds->priv, port); +} + +static int lan9645x_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + return lan9645x_mact_dsa_dump(ds->priv, port, cb, data); +} + +static int __lan9645x_fdb_add(struct lan9645x *lan9645x, int pgid, + const unsigned char *mac, u16 vid, + struct net_device *bridge) +{ + if (!vid) + vid = lan9645x_vlan_unaware_pvid(!!bridge); + + return lan9645x_mact_entry_add(lan9645x, pgid, mac, vid); +} + +static struct net_device *lan9645x_db2bridge(struct dsa_db db) +{ + switch (db.type) { + case DSA_DB_PORT: + case DSA_DB_LAG: + return NULL; + case DSA_DB_BRIDGE: + return db.bridge.dev; + default: + return ERR_PTR(-EOPNOTSUPP); + } +} + +static int lan9645x_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *br = lan9645x_db2bridge(db); + struct dsa_port *dp = dsa_to_port(ds, port); + struct lan9645x *lan9645x = ds->priv; + + if (IS_ERR(br)) + return PTR_ERR(br); + + if (dsa_port_is_cpu(dp) && !br && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) { + /* Trap DSA cpu port */ + return lan9645x_mact_learn(lan9645x, PGID_CPU, addr, + lan9645x_vlan_unaware_pvid(!!br), + ENTRYTYPE_LOCKED); + } + + return __lan9645x_fdb_add(lan9645x, port, addr, vid, br); +} + +static int __lan9645x_fdb_del(struct lan9645x *lan9645x, int port, + const unsigned char *addr, u16 vid, + struct net_device *bridge) +{ + int err; + + if (!vid) + vid = lan9645x_vlan_unaware_pvid(!!bridge); + + err = lan9645x_mact_entry_del(lan9645x, port, addr, vid); + if (err == -ENOENT) { + dev_dbg(lan9645x->dev, "fdb not found mac %pM vid %u pgid %u", + addr, vid, port); + return 0; + } + return err; +} + +static int lan9645x_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db) +{ + struct net_device *br = lan9645x_db2bridge(db); + struct dsa_port *dp = dsa_to_port(ds, port); + struct lan9645x *lan9645x = ds->priv; + + if (IS_ERR(br)) + return PTR_ERR(br); + + if (dsa_port_is_cpu(dp) && !br && + dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) + return 0; + + if (dsa_port_is_cpu(dp)) { + return lan9645x_mact_forget(lan9645x, addr, + lan9645x_vlan_unaware_pvid(!!br), + ENTRYTYPE_LOCKED); + } + + return __lan9645x_fdb_del(lan9645x, port, addr, vid, br); +} + static const struct dsa_switch_ops lan9645x_switch_ops = { .get_tag_protocol = lan9645x_get_tag_protocol, .connect_tag_protocol = lan9645x_connect_tag_protocol, @@ -560,6 +662,12 @@ static const struct dsa_switch_ops lan9645x_switch_ops = { .port_vlan_filtering = lan9645x_port_vlan_filtering, .port_vlan_add = lan9645x_port_vlan_add, .port_vlan_del = lan9645x_port_vlan_del, + + /* MAC table integration */ + .port_fast_age = lan9645x_port_fast_age, + .port_fdb_dump = lan9645x_fdb_dump, + .port_fdb_add = lan9645x_fdb_add, + .port_fdb_del = lan9645x_fdb_del, }; static int lan9645x_request_target_regmaps(struct lan9645x *lan9645x) diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h index f1471344a9e9..4c7111375918 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h @@ -158,6 +158,34 @@ enum lan9645x_vlan_port_tag { LAN9645X_TAG_ALL = 3, }; +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACV4, + ENTRYTYPE_MACV6, +}; + +struct lan9645x_mact_common { + struct lan9645x_mact_key { + u16 vid; + u8 mac[ETH_ALEN] __aligned(2); + } key; + u32 row: 11, /* 2048 rows, 4 buckets each */ + pgid: 6, /* 0-63 general purpose pgids. */ + type: 2; +}; + +struct lan9645x_mact_entry { + struct lan9645x_mact_common common; + struct list_head list; +}; + struct lan9645x { struct device *dev; struct dsa_switch *ds; @@ -180,6 +208,9 @@ struct lan9645x { u16 bridge_mask; /* Mask for bridged ports */ u16 bridge_fwd_mask; /* Mask for forwarding bridged ports */ struct mutex fwd_domain_lock; /* lock forwarding configuration */ + struct list_head mac_entries; + struct mutex mact_lock; /* lock access to mact_table */ + struct mutex mac_entry_lock; /* lock for mac_entries list */ /* VLAN */ u16 vlan_mask[VLAN_N_VID]; /* Port mask per vlan */ @@ -424,4 +455,21 @@ void lan9645x_vlan_set_hostmode(struct lan9645x_port *p); int lan9645x_port_vlan_prepare(struct lan9645x_port *p, u16 vid, bool pvid, bool untagged, struct netlink_ext_ack *extack); +/* MAC table: lan9645x_mac.c */ +int lan9645x_mact_flush(struct lan9645x *lan9645x, int port); +int lan9645x_mact_learn(struct lan9645x *lan9645x, int port, + const unsigned char *addr, u16 vid, + enum macaccess_entry_type type); +int lan9645x_mact_forget(struct lan9645x *lan9645x, + const unsigned char mac[ETH_ALEN], unsigned int vid, + enum macaccess_entry_type type); +void lan9645x_mac_init(struct lan9645x *lan9645x); +void lan9645x_mac_deinit(struct lan9645x *lan9645x); +int lan9645x_mact_dsa_dump(struct lan9645x *lan9645x, int port, + dsa_fdb_dump_cb_t *cb, void *data); +int lan9645x_mact_entry_del(struct lan9645x *lan9645x, int pgid, + const unsigned char *mac, u16 vid); +int lan9645x_mact_entry_add(struct lan9645x *lan9645x, int pgid, + const unsigned char *mac, u16 vid); + #endif /* __LAN9645X_MAIN_H__ */ -- 2.52.0 Add statistics support for the port counters. Chip registers are 32 bit, so this unit is responsible maintaining a 64bit software cache, and updating it frequently to handle overflows in hardware. Reviewed-by: Steen Hegelund Signed-off-by: Jens Emil Schulz Østergaard --- drivers/net/dsa/microchip/lan9645x/Makefile | 1 + drivers/net/dsa/microchip/lan9645x/lan9645x_main.c | 82 ++ drivers/net/dsa/microchip/lan9645x/lan9645x_main.h | 3 + .../net/dsa/microchip/lan9645x/lan9645x_stats.c | 825 +++++++++++++++++++++ .../net/dsa/microchip/lan9645x/lan9645x_stats.h | 288 +++++++ 5 files changed, 1199 insertions(+) diff --git a/drivers/net/dsa/microchip/lan9645x/Makefile b/drivers/net/dsa/microchip/lan9645x/Makefile index a90a46f81c72..486b005cf740 100644 --- a/drivers/net/dsa/microchip/lan9645x/Makefile +++ b/drivers/net/dsa/microchip/lan9645x/Makefile @@ -7,3 +7,4 @@ mchp-lan9645x-objs := lan9645x_main.o \ lan9645x_phylink.o \ lan9645x_vlan.o \ lan9645x_mac.o \ + lan9645x_stats.o \ diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c index ba76279b4414..8a1de2588ab8 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.c @@ -7,6 +7,7 @@ #include #include "lan9645x_main.h" +#include "lan9645x_stats.h" static const char *lan9645x_resource_names[NUM_TARGETS + 1] = { [TARGET_GCB] = "gcb", @@ -79,6 +80,7 @@ static void lan9645x_teardown(struct dsa_switch *ds) debugfs_remove_recursive(lan9645x->debugfs_root); lan9645x_npi_port_deinit(lan9645x, lan9645x->npi); lan9645x_mac_deinit(lan9645x); + lan9645x_stats_deinit(lan9645x); } static int lan9645x_change_mtu(struct dsa_switch *ds, int port, int new_mtu) @@ -274,6 +276,12 @@ static int lan9645x_setup(struct dsa_switch *ds) lan9645x_port_set_tail_drop_wm(lan9645x); + err = lan9645x_stats_init(lan9645x); + if (err) { + dev_err(dev, "Lan9645x setup: failed to init stats."); + return err; + } + ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; ds->fdb_isolation = true; @@ -636,6 +644,68 @@ static int lan9645x_fdb_del(struct dsa_switch *ds, int port, return __lan9645x_fdb_del(lan9645x, port, addr, vid, br); } +static void lan9645x_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + lan9645x_stats_get_strings(ds->priv, port, stringset, data); +} + +static void lan9645x_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + lan9645x_stats_get_ethtool_stats(ds->priv, port, data); +} + +static int lan9645x_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + return lan9645x_stats_get_sset_count(ds->priv, port, sset); +} + +static void lan9645x_get_eth_mac_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + lan9645x_stats_get_eth_mac_stats(ds->priv, port, mac_stats); +} + +static void +lan9645x_get_rmon_stats(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + lan9645x_stats_get_rmon_stats(ds->priv, port, rmon_stats, ranges); +} + +static void lan9645x_get_stats64(struct dsa_switch *ds, int port, + struct rtnl_link_stats64 *s) +{ + lan9645x_stats_get_stats64(ds->priv, port, s); +} + +static void lan9645x_get_pause_stats(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats) +{ + lan9645x_stats_get_pause_stats(ds->priv, port, pause_stats); +} + +static void lan9645x_get_mm_stats(struct dsa_switch *ds, int port, + struct ethtool_mm_stats *stats) +{ + lan9645x_stats_get_mm_stats(ds->priv, port, stats); +} + +static void lan9645x_get_eth_phy_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + lan9645x_stats_get_eth_phy_stats(ds->priv, port, phy_stats); +} + +static void +lan9645x_get_eth_ctrl_stats(struct dsa_switch *ds, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + lan9645x_stats_get_eth_ctrl_stats(ds->priv, port, ctrl_stats); +} + static const struct dsa_switch_ops lan9645x_switch_ops = { .get_tag_protocol = lan9645x_get_tag_protocol, .connect_tag_protocol = lan9645x_connect_tag_protocol, @@ -668,6 +738,18 @@ static const struct dsa_switch_ops lan9645x_switch_ops = { .port_fdb_dump = lan9645x_fdb_dump, .port_fdb_add = lan9645x_fdb_add, .port_fdb_del = lan9645x_fdb_del, + + /* Port statistics counters. */ + .get_strings = lan9645x_get_strings, + .get_ethtool_stats = lan9645x_get_ethtool_stats, + .get_sset_count = lan9645x_get_sset_count, + .get_eth_mac_stats = lan9645x_get_eth_mac_stats, + .get_rmon_stats = lan9645x_get_rmon_stats, + .get_stats64 = lan9645x_get_stats64, + .get_pause_stats = lan9645x_get_pause_stats, + .get_mm_stats = lan9645x_get_mm_stats, + .get_eth_phy_stats = lan9645x_get_eth_phy_stats, + .get_eth_ctrl_stats = lan9645x_get_eth_ctrl_stats, }; static int lan9645x_request_target_regmaps(struct lan9645x *lan9645x) diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h index 4c7111375918..fe801d0ed39a 100644 --- a/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_main.h @@ -217,6 +217,9 @@ struct lan9645x { u8 vlan_flags[VLAN_N_VID]; DECLARE_BITMAP(cpu_vlan_mask, VLAN_N_VID); /* CPU VLAN membership */ + /* Statistics */ + struct lan9645x_stats *stats; + int num_port_dis; bool dd_dis; bool tsn_dis; diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.c b/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.c new file mode 100644 index 000000000000..43078e441e55 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.c @@ -0,0 +1,825 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#include +#include + +#include "lan9645x_main.h" +#include "lan9645x_stats.h" + +#define LAN9645X_STATS_CHECK_DELAY (3 * HZ) + +static const struct lan9645x_stat_region lan9645x_port_stat_regions[] = { + /* RX region */ + { .base_offset = 0x0, .cnt = 67, .cnts_base_idx = 0 }, + /* TX region */ + { .base_offset = 0x80, .cnt = 48, .cnts_base_idx = 67 }, + /* DR region */ + { .base_offset = 0x100, .cnt = 18, .cnts_base_idx = 115 }, +}; + +static const struct lan9645x_stat_layout lan9645x_port_stats_layout[] = { + { .name = "rx_oct", .offset = 0x0 }, + { .name = "rx_uc", .offset = 0x1 }, + { .name = "rx_mc", .offset = 0x2 }, + { .name = "rx_bc", .offset = 0x3 }, + { .name = "rx_short", .offset = 0x4 }, + { .name = "rx_frag", .offset = 0x5 }, + { .name = "rx_jabber", .offset = 0x6 }, + { .name = "rx_crc", .offset = 0x7 }, + { .name = "rx_symbol_err", .offset = 0x8 }, + { .name = "rx_sz_64", .offset = 0x9 }, + { .name = "rx_sz_65_127", .offset = 0xa }, + { .name = "rx_sz_128_255", .offset = 0xb }, + { .name = "rx_sz_256_511", .offset = 0xc }, + { .name = "rx_sz_512_1023", .offset = 0xd }, + { .name = "rx_sz_1024_1526", .offset = 0xe }, + { .name = "rx_sz_jumbo", .offset = 0xf }, + { .name = "rx_pause", .offset = 0x10 }, + { .name = "rx_control", .offset = 0x11 }, + { .name = "rx_long", .offset = 0x12 }, + { .name = "rx_cat_drop", .offset = 0x13 }, + { .name = "rx_red_prio_0", .offset = 0x14 }, + { .name = "rx_red_prio_1", .offset = 0x15 }, + { .name = "rx_red_prio_2", .offset = 0x16 }, + { .name = "rx_red_prio_3", .offset = 0x17 }, + { .name = "rx_red_prio_4", .offset = 0x18 }, + { .name = "rx_red_prio_5", .offset = 0x19 }, + { .name = "rx_red_prio_6", .offset = 0x1a }, + { .name = "rx_red_prio_7", .offset = 0x1b }, + { .name = "rx_yellow_prio_0", .offset = 0x1c }, + { .name = "rx_yellow_prio_1", .offset = 0x1d }, + { .name = "rx_yellow_prio_2", .offset = 0x1e }, + { .name = "rx_yellow_prio_3", .offset = 0x1f }, + { .name = "rx_yellow_prio_4", .offset = 0x20 }, + { .name = "rx_yellow_prio_5", .offset = 0x21 }, + { .name = "rx_yellow_prio_6", .offset = 0x22 }, + { .name = "rx_yellow_prio_7", .offset = 0x23 }, + { .name = "rx_green_prio_0", .offset = 0x24 }, + { .name = "rx_green_prio_1", .offset = 0x25 }, + { .name = "rx_green_prio_2", .offset = 0x26 }, + { .name = "rx_green_prio_3", .offset = 0x27 }, + { .name = "rx_green_prio_4", .offset = 0x28 }, + { .name = "rx_green_prio_5", .offset = 0x29 }, + { .name = "rx_green_prio_6", .offset = 0x2a }, + { .name = "rx_green_prio_7", .offset = 0x2b }, + { .name = "rx_assembly_err", .offset = 0x2c }, + { .name = "rx_smd_err", .offset = 0x2d }, + { .name = "rx_assembly_ok", .offset = 0x2e }, + { .name = "rx_merge_frag", .offset = 0x2f }, + { .name = "rx_pmac_oct", .offset = 0x30 }, + { .name = "rx_pmac_uc", .offset = 0x31 }, + { .name = "rx_pmac_mc", .offset = 0x32 }, + { .name = "rx_pmac_bc", .offset = 0x33 }, + { .name = "rx_pmac_short", .offset = 0x34 }, + { .name = "rx_pmac_frag", .offset = 0x35 }, + { .name = "rx_pmac_jabber", .offset = 0x36 }, + { .name = "rx_pmac_crc", .offset = 0x37 }, + { .name = "rx_pmac_symbol_err", .offset = 0x38 }, + { .name = "rx_pmac_sz_64", .offset = 0x39 }, + { .name = "rx_pmac_sz_65_127", .offset = 0x3a }, + { .name = "rx_pmac_sz_128_255", .offset = 0x3b }, + { .name = "rx_pmac_sz_256_511", .offset = 0x3c }, + { .name = "rx_pmac_sz_512_1023", .offset = 0x3d }, + { .name = "rx_pmac_sz_1024_1526", .offset = 0x3e }, + { .name = "rx_pmac_sz_jumbo", .offset = 0x3f }, + { .name = "rx_pmac_pause", .offset = 0x40 }, + { .name = "rx_pmac_control", .offset = 0x41 }, + { .name = "rx_pmac_long", .offset = 0x42 }, + { .name = "tx_oct", .offset = 0x80 }, + { .name = "tx_uc", .offset = 0x81 }, + { .name = "tx_mc", .offset = 0x82 }, + { .name = "tx_bc", .offset = 0x83 }, + { .name = "tx_col", .offset = 0x84 }, + { .name = "tx_drop", .offset = 0x85 }, + { .name = "tx_pause", .offset = 0x86 }, + { .name = "tx_sz_64", .offset = 0x87 }, + { .name = "tx_sz_65_127", .offset = 0x88 }, + { .name = "tx_sz_128_255", .offset = 0x89 }, + { .name = "tx_sz_256_511", .offset = 0x8a }, + { .name = "tx_sz_512_1023", .offset = 0x8b }, + { .name = "tx_sz_1024_1526", .offset = 0x8c }, + { .name = "tx_sz_jumbo", .offset = 0x8d }, + { .name = "tx_yellow_prio_0", .offset = 0x8e }, + { .name = "tx_yellow_prio_1", .offset = 0x8f }, + { .name = "tx_yellow_prio_2", .offset = 0x90 }, + { .name = "tx_yellow_prio_3", .offset = 0x91 }, + { .name = "tx_yellow_prio_4", .offset = 0x92 }, + { .name = "tx_yellow_prio_5", .offset = 0x93 }, + { .name = "tx_yellow_prio_6", .offset = 0x94 }, + { .name = "tx_yellow_prio_7", .offset = 0x95 }, + { .name = "tx_green_prio_0", .offset = 0x96 }, + { .name = "tx_green_prio_1", .offset = 0x97 }, + { .name = "tx_green_prio_2", .offset = 0x98 }, + { .name = "tx_green_prio_3", .offset = 0x99 }, + { .name = "tx_green_prio_4", .offset = 0x9a }, + { .name = "tx_green_prio_5", .offset = 0x9b }, + { .name = "tx_green_prio_6", .offset = 0x9c }, + { .name = "tx_green_prio_7", .offset = 0x9d }, + { .name = "tx_aged", .offset = 0x9e }, + { .name = "tx_llct", .offset = 0x9f }, + { .name = "tx_ct", .offset = 0xa0 }, + { .name = "tx_bufdrop", .offset = 0xa1 }, + { .name = "tx_mm_hold", .offset = 0xa2 }, + { .name = "tx_merge_frag", .offset = 0xa3 }, + { .name = "tx_pmac_oct", .offset = 0xa4 }, + { .name = "tx_pmac_uc", .offset = 0xa5 }, + { .name = "tx_pmac_mc", .offset = 0xa6 }, + { .name = "tx_pmac_bc", .offset = 0xa7 }, + { .name = "tx_pmac_pause", .offset = 0xa8 }, + { .name = "tx_pmac_sz_64", .offset = 0xa9 }, + { .name = "tx_pmac_sz_65_127", .offset = 0xaa }, + { .name = "tx_pmac_sz_128_255", .offset = 0xab }, + { .name = "tx_pmac_sz_256_511", .offset = 0xac }, + { .name = "tx_pmac_sz_512_1023", .offset = 0xad }, + { .name = "tx_pmac_sz_1024_1526", .offset = 0xae }, + { .name = "tx_pmac_sz_jumbo", .offset = 0xaf }, + { .name = "dr_local", .offset = 0x100 }, + { .name = "dr_tail", .offset = 0x101 }, + { .name = "dr_yellow_prio_0", .offset = 0x102 }, + { .name = "dr_yellow_prio_1", .offset = 0x103 }, + { .name = "dr_yellow_prio_2", .offset = 0x104 }, + { .name = "dr_yellow_prio_3", .offset = 0x105 }, + { .name = "dr_yellow_prio_4", .offset = 0x106 }, + { .name = "dr_yellow_prio_5", .offset = 0x107 }, + { .name = "dr_yellow_prio_6", .offset = 0x108 }, + { .name = "dr_yellow_prio_7", .offset = 0x109 }, + { .name = "dr_green_prio_0", .offset = 0x10a }, + { .name = "dr_green_prio_1", .offset = 0x10b }, + { .name = "dr_green_prio_2", .offset = 0x10c }, + { .name = "dr_green_prio_3", .offset = 0x10d }, + { .name = "dr_green_prio_4", .offset = 0x10e }, + { .name = "dr_green_prio_5", .offset = 0x10f }, + { .name = "dr_green_prio_6", .offset = 0x110 }, + { .name = "dr_green_prio_7", .offset = 0x111 }, +}; + +static const struct lan9645x_view_stats lan9645x_view_stat_cfgs[] = { + [LAN9645X_STAT_PORTS] = { + .name = "ports", + .type = LAN9645X_STAT_PORTS, + .layout = lan9645x_port_stats_layout, + .num_cnts = ARRAY_SIZE(lan9645x_port_stats_layout), + .num_indexes = NUM_PHYS_PORTS, + .regions = lan9645x_port_stat_regions, + .num_regions = ARRAY_SIZE(lan9645x_port_stat_regions), + }, +}; + +static void __lan9645x_stats_view_idx_update(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type vtype, + int idx) +{ + struct lan9645x_stat_region region; + struct lan9645x_view_stats *vstats; + u64 *idx_counters; + u32 *region_buf; + int cntr; + int err; + + lockdep_assert_held(&lan9645x->stats->hw_lock); + + vstats = lan9645x_get_vstats(lan9645x, vtype); + if (!vstats || idx < 0 || idx >= vstats->num_indexes) + return; + + lan_wr(SYS_STAT_CFG_STAT_VIEW_SET(idx), lan9645x, SYS_STAT_CFG); + + idx_counters = STATS_INDEX(vstats, idx); + region_buf = &vstats->buf[vstats->num_cnts * idx]; + + /* Each region for this index contains counters which are at sequential + * addresses, so we can use bulk reads to ease lock pressure a bit. + */ + for (int r = 0; r < vstats->num_regions; r++) { + region = vstats->regions[r]; + err = lan_bulk_rd(®ion_buf[region.cnts_base_idx], region.cnt, + lan9645x, SYS_CNT(region.base_offset)); + if (err) { + dev_err(lan9645x->dev, + "stats bulk read err vtype=%d idx=%d err=%d", + vtype, idx, err); + return; + } + } + + for (cntr = 0; cntr < vstats->num_cnts; cntr++) + lan9645x_stats_add_cnt(&idx_counters[cntr], region_buf[cntr]); +} + +void lan9645x_stats_view_idx_update(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type vtype, int idx) +{ + struct lan9645x_stats *s = lan9645x->stats; + + mutex_lock(&s->hw_lock); + __lan9645x_stats_view_idx_update(lan9645x, vtype, idx); + mutex_unlock(&s->hw_lock); +} + +void lan9645x_stats_view_update(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type vtype) +{ + struct lan9645x_stats *s = lan9645x->stats; + struct lan9645x_view_stats *vstats; + int idx = 0; + + vstats = lan9645x_get_vstats(lan9645x, vtype); + if (!vstats) + return; + + switch (vtype) { + case LAN9645X_STAT_PORTS: + mutex_lock(&s->hw_lock); + for (idx = 0; idx < vstats->num_indexes; idx++) { + if (lan9645x_port_is_used(lan9645x, idx)) + __lan9645x_stats_view_idx_update(lan9645x, + vtype, idx); + } + mutex_unlock(&s->hw_lock); + return; + default: + return; + } +} + +static void lan9645x_stats_update(struct lan9645x *lan9645x) +{ + for (int vtype = 0; vtype < LAN9645X_STAT_NUM; vtype++) + lan9645x_stats_view_update(lan9645x, vtype); +} + +void lan9645x_stats_get_strings(struct lan9645x *lan9645x, int port, + u32 stringset, u8 *data) +{ + struct lan9645x_view_stats *port_stats; + int i; + + if (stringset != ETH_SS_STATS) + return; + + port_stats = lan9645x_get_vstats(lan9645x, LAN9645X_STAT_PORTS); + + for (i = 0; i < port_stats->num_cnts; i++) + memcpy(data + i * ETH_GSTRING_LEN, port_stats->layout[i].name, + ETH_GSTRING_LEN); +} + +int lan9645x_stats_get_sset_count(struct lan9645x *lan9645x, int port, int sset) +{ + struct lan9645x_view_stats *port_stats; + + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + port_stats = lan9645x_get_vstats(lan9645x, LAN9645X_STAT_PORTS); + + return port_stats->num_cnts; +} + +void lan9645x_stats_get_ethtool_stats(struct lan9645x *lan9645x, int port, + u64 *data) +{ + struct lan9645x_view_stats *port_stats; + int cntr; + u64 *s; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_stats = lan9645x_get_vstats(lan9645x, LAN9645X_STAT_PORTS); + + s = STATS_INDEX(port_stats, port); + + for (cntr = 0; cntr < port_stats->num_cnts; cntr++) + *data++ = s[cntr]; + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void lan9645x_stats_get_eth_mac_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_mac_stats *mac_stats) +{ + u64 *port_counters; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_counters = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + mac_stats->FramesTransmittedOK = + port_counters[SCNT_TX_UC] + + port_counters[SCNT_TX_MC] + + port_counters[SCNT_TX_BC] + + port_counters[SCNT_TX_PMAC_UC] + + port_counters[SCNT_TX_PMAC_MC] + + port_counters[SCNT_TX_PMAC_BC]; + mac_stats->SingleCollisionFrames = port_counters[SCNT_TX_COL]; + mac_stats->FramesReceivedOK = port_counters[SCNT_RX_UC] + + port_counters[SCNT_RX_MC] + + port_counters[SCNT_RX_BC]; + mac_stats->FrameCheckSequenceErrors = + port_counters[SCNT_RX_CRC] + + port_counters[SCNT_RX_PMAC_CRC]; + mac_stats->OctetsTransmittedOK = + port_counters[SCNT_TX_OCT] + + port_counters[SCNT_TX_PMAC_OCT]; + mac_stats->FramesWithDeferredXmissions = port_counters[SCNT_TX_MM_HOLD]; + mac_stats->OctetsReceivedOK = + port_counters[SCNT_RX_OCT] + + port_counters[SCNT_RX_PMAC_OCT]; + mac_stats->MulticastFramesXmittedOK = + port_counters[SCNT_TX_MC] + + port_counters[SCNT_TX_PMAC_MC]; + mac_stats->BroadcastFramesXmittedOK = + port_counters[SCNT_TX_BC] + + port_counters[SCNT_TX_PMAC_BC]; + mac_stats->MulticastFramesReceivedOK = + port_counters[SCNT_RX_MC] + + port_counters[SCNT_RX_PMAC_MC]; + mac_stats->BroadcastFramesReceivedOK = + port_counters[SCNT_RX_BC] + + port_counters[SCNT_RX_PMAC_BC]; + mac_stats->InRangeLengthErrors = + port_counters[SCNT_RX_FRAG] + + port_counters[SCNT_RX_JABBER] + + port_counters[SCNT_RX_CRC] + + port_counters[SCNT_RX_PMAC_FRAG] + + port_counters[SCNT_RX_PMAC_JABBER] + + port_counters[SCNT_RX_PMAC_CRC]; + mac_stats->OutOfRangeLengthField = port_counters[SCNT_RX_SHORT] + + port_counters[SCNT_RX_PMAC_SHORT] + + port_counters[SCNT_RX_LONG] + + port_counters[SCNT_RX_PMAC_LONG]; + mac_stats->FrameTooLongErrors = + port_counters[SCNT_RX_LONG] + + port_counters[SCNT_RX_PMAC_LONG]; + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +static const struct ethtool_rmon_hist_range lan9645x_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1526 }, + { 1527, 0xffff }, + {} +}; + +void +lan9645x_stats_get_rmon_stats(struct lan9645x *lan9645x, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + u64 *port_cnt; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + rmon_stats->undersize_pkts = + port_cnt[SCNT_RX_SHORT] + + port_cnt[SCNT_RX_PMAC_SHORT]; + rmon_stats->oversize_pkts = + port_cnt[SCNT_RX_LONG] + + port_cnt[SCNT_RX_PMAC_LONG]; + rmon_stats->fragments = + port_cnt[SCNT_RX_FRAG] + + port_cnt[SCNT_RX_PMAC_FRAG]; + rmon_stats->jabbers = + port_cnt[SCNT_RX_JABBER] + + port_cnt[SCNT_RX_PMAC_JABBER]; + + rmon_stats->hist[0] = + port_cnt[SCNT_RX_SZ_64] + + port_cnt[SCNT_RX_PMAC_SZ_64]; + rmon_stats->hist[1] = + port_cnt[SCNT_RX_SZ_65_127] + + port_cnt[SCNT_RX_PMAC_SZ_65_127]; + rmon_stats->hist[2] = + port_cnt[SCNT_RX_SZ_128_255] + + port_cnt[SCNT_RX_PMAC_SZ_128_255]; + rmon_stats->hist[3] = + port_cnt[SCNT_RX_SZ_256_511] + + port_cnt[SCNT_RX_PMAC_SZ_256_511]; + rmon_stats->hist[4] = + port_cnt[SCNT_RX_SZ_512_1023] + + port_cnt[SCNT_RX_PMAC_SZ_512_1023]; + rmon_stats->hist[5] = + port_cnt[SCNT_RX_SZ_1024_1526] + + port_cnt[SCNT_RX_PMAC_SZ_1024_1526]; + rmon_stats->hist[6] = + port_cnt[SCNT_RX_SZ_JUMBO] + + port_cnt[SCNT_RX_PMAC_SZ_JUMBO]; + + rmon_stats->hist_tx[0] = + port_cnt[SCNT_TX_SZ_64] + + port_cnt[SCNT_TX_PMAC_SZ_64]; + rmon_stats->hist_tx[1] = + port_cnt[SCNT_TX_SZ_65_127] + + port_cnt[SCNT_TX_PMAC_SZ_65_127]; + rmon_stats->hist_tx[2] = + port_cnt[SCNT_TX_SZ_128_255] + + port_cnt[SCNT_TX_PMAC_SZ_128_255]; + rmon_stats->hist_tx[3] = + port_cnt[SCNT_TX_SZ_256_511] + + port_cnt[SCNT_TX_PMAC_SZ_256_511]; + rmon_stats->hist_tx[4] = + port_cnt[SCNT_TX_SZ_512_1023] + + port_cnt[SCNT_TX_PMAC_SZ_512_1023]; + rmon_stats->hist_tx[5] = + port_cnt[SCNT_TX_SZ_1024_1526] + + port_cnt[SCNT_TX_PMAC_SZ_1024_1526]; + rmon_stats->hist_tx[6] = + port_cnt[SCNT_TX_SZ_JUMBO] + + port_cnt[SCNT_TX_PMAC_SZ_JUMBO]; + + mutex_unlock(&lan9645x->stats->hw_lock); + + *ranges = lan9645x_rmon_ranges; +} + +void lan9645x_stats_get_stats64(struct lan9645x *lan9645x, int port, + struct rtnl_link_stats64 *stats) +{ + u64 *port_cnt; + + /* Avoid stats update, as this is called very often by DSA. */ + mutex_lock(&lan9645x->stats->hw_lock); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + stats->rx_bytes = port_cnt[SCNT_RX_OCT] + + port_cnt[SCNT_RX_PMAC_OCT]; + + stats->rx_packets = port_cnt[SCNT_RX_SHORT] + + port_cnt[SCNT_RX_FRAG] + + port_cnt[SCNT_RX_JABBER] + + port_cnt[SCNT_RX_CRC] + + port_cnt[SCNT_RX_SYMBOL_ERR] + + port_cnt[SCNT_RX_SZ_64] + + port_cnt[SCNT_RX_SZ_65_127] + + port_cnt[SCNT_RX_SZ_128_255] + + port_cnt[SCNT_RX_SZ_256_511] + + port_cnt[SCNT_RX_SZ_512_1023] + + port_cnt[SCNT_RX_SZ_1024_1526] + + port_cnt[SCNT_RX_SZ_JUMBO] + + port_cnt[SCNT_RX_LONG] + + port_cnt[SCNT_RX_PMAC_SHORT] + + port_cnt[SCNT_RX_PMAC_FRAG] + + port_cnt[SCNT_RX_PMAC_JABBER] + + port_cnt[SCNT_RX_PMAC_SZ_64] + + port_cnt[SCNT_RX_PMAC_SZ_65_127] + + port_cnt[SCNT_RX_PMAC_SZ_128_255] + + port_cnt[SCNT_RX_PMAC_SZ_256_511] + + port_cnt[SCNT_RX_PMAC_SZ_512_1023] + + port_cnt[SCNT_RX_PMAC_SZ_1024_1526] + + port_cnt[SCNT_RX_PMAC_SZ_JUMBO]; + + stats->multicast = port_cnt[SCNT_RX_MC] + + port_cnt[SCNT_RX_PMAC_MC]; + + stats->rx_errors = port_cnt[SCNT_RX_SHORT] + + port_cnt[SCNT_RX_FRAG] + + port_cnt[SCNT_RX_JABBER] + + port_cnt[SCNT_RX_CRC] + + port_cnt[SCNT_RX_SYMBOL_ERR] + + port_cnt[SCNT_RX_LONG] + + port_cnt[SCNT_RX_PMAC_SHORT] + + port_cnt[SCNT_RX_PMAC_FRAG] + + port_cnt[SCNT_RX_PMAC_JABBER] + + port_cnt[SCNT_RX_PMAC_CRC] + + port_cnt[SCNT_RX_PMAC_SYMBOL_ERR] + + port_cnt[SCNT_RX_PMAC_LONG]; + + stats->rx_dropped = port_cnt[SCNT_RX_LONG] + + port_cnt[SCNT_DR_LOCAL] + + port_cnt[SCNT_DR_TAIL] + + port_cnt[SCNT_RX_CAT_DROP] + + port_cnt[SCNT_RX_RED_PRIO_0] + + port_cnt[SCNT_RX_RED_PRIO_1] + + port_cnt[SCNT_RX_RED_PRIO_2] + + port_cnt[SCNT_RX_RED_PRIO_3] + + port_cnt[SCNT_RX_RED_PRIO_4] + + port_cnt[SCNT_RX_RED_PRIO_5] + + port_cnt[SCNT_RX_RED_PRIO_6] + + port_cnt[SCNT_RX_RED_PRIO_7]; + + for (int i = 0; i < LAN9645X_NUM_TC; i++) { + stats->rx_dropped += port_cnt[SCNT_DR_YELLOW_PRIO_0 + i] + + port_cnt[SCNT_DR_GREEN_PRIO_0 + i]; + } + + stats->tx_bytes = port_cnt[SCNT_TX_OCT] + + port_cnt[SCNT_TX_PMAC_OCT]; + + stats->tx_packets = port_cnt[SCNT_TX_SZ_64] + + port_cnt[SCNT_TX_SZ_65_127] + + port_cnt[SCNT_TX_SZ_128_255] + + port_cnt[SCNT_TX_SZ_256_511] + + port_cnt[SCNT_TX_SZ_512_1023] + + port_cnt[SCNT_TX_SZ_1024_1526] + + port_cnt[SCNT_TX_SZ_JUMBO] + + port_cnt[SCNT_TX_PMAC_SZ_64] + + port_cnt[SCNT_TX_PMAC_SZ_65_127] + + port_cnt[SCNT_TX_PMAC_SZ_128_255] + + port_cnt[SCNT_TX_PMAC_SZ_256_511] + + port_cnt[SCNT_TX_PMAC_SZ_512_1023] + + port_cnt[SCNT_TX_PMAC_SZ_1024_1526] + + port_cnt[SCNT_TX_PMAC_SZ_JUMBO]; + + stats->tx_dropped = port_cnt[SCNT_TX_DROP] + + port_cnt[SCNT_TX_AGED]; + + stats->collisions = port_cnt[SCNT_TX_COL]; + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void lan9645x_stats_get_eth_phy_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_phy_stats *phy_stats) +{ + u64 *port_cnt; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + switch (phy_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + phy_stats->SymbolErrorDuringCarrier = + port_cnt[SCNT_RX_SYMBOL_ERR]; + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + phy_stats->SymbolErrorDuringCarrier = + port_cnt[SCNT_RX_PMAC_SYMBOL_ERR]; + break; + default: + break; + } + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void +lan9645x_stats_get_eth_ctrl_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + u64 *port_cnt; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + switch (ctrl_stats->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ctrl_stats->MACControlFramesReceived = + port_cnt[SCNT_RX_CONTROL]; + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + ctrl_stats->MACControlFramesReceived = + port_cnt[SCNT_RX_PMAC_CONTROL]; + break; + default: + break; + } + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void lan9645x_stats_get_pause_stats(struct lan9645x *lan9645x, int port, + struct ethtool_pause_stats *ps) +{ + u64 *port_cnt; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + switch (ps->src) { + case ETHTOOL_MAC_STATS_SRC_EMAC: + ps->tx_pause_frames = port_cnt[SCNT_TX_PAUSE]; + ps->rx_pause_frames = port_cnt[SCNT_RX_PAUSE]; + break; + case ETHTOOL_MAC_STATS_SRC_PMAC: + ps->tx_pause_frames = port_cnt[SCNT_TX_PMAC_PAUSE]; + ps->rx_pause_frames = port_cnt[SCNT_RX_PMAC_PAUSE]; + break; + default: + break; + } + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void lan9645x_stats_get_mm_stats(struct lan9645x *lan9645x, int port, + struct ethtool_mm_stats *stats) +{ + u64 *port_cnt; + + mutex_lock(&lan9645x->stats->hw_lock); + + __lan9645x_stats_view_idx_update(lan9645x, LAN9645X_STAT_PORTS, port); + + port_cnt = STAT_COUNTERS(lan9645x, LAN9645X_STAT_PORTS, port); + + stats->MACMergeFrameAssErrorCount = port_cnt[SCNT_RX_ASSEMBLY_ERR]; + stats->MACMergeFrameSmdErrorCount = port_cnt[SCNT_RX_SMD_ERR]; + stats->MACMergeFrameAssOkCount = port_cnt[SCNT_RX_ASSEMBLY_OK]; + stats->MACMergeFragCountRx = port_cnt[SCNT_RX_MERGE_FRAG]; + stats->MACMergeFragCountTx = port_cnt[SCNT_TX_MERGE_FRAG]; + stats->MACMergeHoldCount = port_cnt[SCNT_TX_MM_HOLD]; + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +void lan9645x_stats_clear_counters(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type type, int idx) +{ + struct lan9645x_view_stats *vstats = + lan9645x_get_vstats(lan9645x, type); + u64 *idx_grp; + int cntr; + u32 sel; + + switch (type) { + case LAN9645X_STAT_PORTS: + /* Drop, TX and RX counters */ + sel = BIT(2) | BIT(1) | BIT(0); + break; + case LAN9645X_STAT_ISDX: + /* ISDX and FRER seq gen */ + sel = BIT(5) | BIT(3); + break; + case LAN9645X_STAT_ESDX: + /* ESDX */ + sel = BIT(6); + break; + case LAN9645X_STAT_SFID: + /* Stream filter */ + sel = BIT(4); + break; + default: + return; + } + + mutex_lock(&lan9645x->stats->hw_lock); + + lan_wr(SYS_STAT_CFG_STAT_CLEAR_SHOT_SET(sel) | + SYS_STAT_CFG_STAT_VIEW_SET(idx), + lan9645x, SYS_STAT_CFG); + + idx_grp = STATS_INDEX(vstats, idx); + for (cntr = 0; cntr < vstats->num_cnts; cntr++) + idx_grp[cntr] = 0; + + mutex_unlock(&lan9645x->stats->hw_lock); +} + +static void lan9645x_check_stats_work(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct lan9645x_stats *stats; + + stats = container_of(del_work, struct lan9645x_stats, work); + + lan9645x_stats_update(stats->lan9645x); + + queue_delayed_work(stats->queue, &stats->work, + LAN9645X_STATS_CHECK_DELAY); +} + +static int lan9645x_stats_debugfs_show(struct seq_file *m, void *unused) +{ + struct lan9645x_view_stats *vstats = m->private; + int idx, cntr; + size_t total; + u64 *snap; + + total = vstats->num_cnts * vstats->num_indexes; + + /* Snapshot counters under lock to avoid holding hw_lock during + * slow seq_printf output. + */ + snap = kmalloc_array(total, sizeof(u64), GFP_KERNEL); + if (!snap) + return -ENOMEM; + + mutex_lock(&vstats->stats->hw_lock); + memcpy(snap, vstats->cnts, total * sizeof(u64)); + mutex_unlock(&vstats->stats->hw_lock); + + for (idx = 0; idx < vstats->num_indexes; idx++) { + for (cntr = 0; cntr < vstats->num_cnts; cntr++) { + seq_printf(m, "%s_%d_%-*s %llu\n", vstats->name, idx, + 30, vstats->layout[cntr].name, + snap[vstats->num_cnts * idx + cntr]); + } + } + + kfree(snap); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(lan9645x_stats_debugfs); + +static void lan9645x_stats_debugfs(struct lan9645x *lan9645x, + struct dentry *parent) +{ + struct lan9645x_stats *stats = lan9645x->stats; + struct dentry *dir; + int i; + + dir = debugfs_create_dir("stats", parent); + if (PTR_ERR_OR_ZERO(dir)) + return; + + for (i = 0; i < ARRAY_SIZE(stats->view); i++) + debugfs_create_file(stats->view[i].name, 0444, dir, + &stats->view[i], + &lan9645x_stats_debugfs_fops); +} + +static int lan9645x_view_stat_init(struct lan9645x *lan9645x, + struct lan9645x_view_stats *vstat, + const struct lan9645x_view_stats *cfg) +{ + size_t total = cfg->num_cnts * cfg->num_indexes; + + memcpy(vstat, cfg, sizeof(*cfg)); + + vstat->cnts = devm_kcalloc(lan9645x->dev, total, sizeof(u64), + GFP_KERNEL); + if (!vstat->cnts) + return -ENOMEM; + + vstat->buf = devm_kcalloc(lan9645x->dev, total, sizeof(u32), + GFP_KERNEL); + if (!vstat->buf) + return -ENOMEM; + + vstat->stats = lan9645x->stats; + + return 0; +} + +int lan9645x_stats_init(struct lan9645x *lan9645x) +{ + const struct lan9645x_view_stats *vs; + struct lan9645x_stats *stats; + int err, i; + + lan9645x->stats = devm_kzalloc(lan9645x->dev, sizeof(*stats), + GFP_KERNEL); + if (!lan9645x->stats) + return -ENOMEM; + + stats = lan9645x->stats; + stats->lan9645x = lan9645x; + + mutex_init(&stats->hw_lock); + stats->queue = alloc_ordered_workqueue("lan9645x-stats", 0); + if (!stats->queue) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(lan9645x_view_stat_cfgs); i++) { + vs = &lan9645x_view_stat_cfgs[i]; + + if (!vs->num_cnts) + continue; + + err = lan9645x_view_stat_init(lan9645x, &stats->view[vs->type], + vs); + if (err) + return err; + } + + INIT_DELAYED_WORK(&stats->work, lan9645x_check_stats_work); + queue_delayed_work(stats->queue, &stats->work, + LAN9645X_STATS_CHECK_DELAY); + + lan9645x_stats_debugfs(lan9645x, lan9645x->debugfs_root); + + return 0; +} + +void lan9645x_stats_deinit(struct lan9645x *lan9645x) +{ + cancel_delayed_work_sync(&lan9645x->stats->work); + destroy_workqueue(lan9645x->stats->queue); + mutex_destroy(&lan9645x->stats->hw_lock); + lan9645x->stats->queue = NULL; +} diff --git a/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.h b/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.h new file mode 100644 index 000000000000..0bb1e1140cc7 --- /dev/null +++ b/drivers/net/dsa/microchip/lan9645x/lan9645x_stats.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (C) 2026 Microchip Technology Inc. + */ + +#ifndef _LAN9645X_STATS_H_ +#define _LAN9645X_STATS_H_ + +#include "lan9645x_main.h" + +#define STATS_INDEX(vstats, idx) (&(vstats)->cnts[(vstats)->num_cnts * (idx)]) + +#define STAT_COUNTERS(lan9645x, type, idx) \ + STATS_INDEX(lan9645x_get_vstats(lan9645x, type), idx) + +/* Counter indices into stat layout structs */ +#define SCNT_FRER_SID_IN_PKT 0 +#define SCNT_ISDX_GREEN_OCT 1 +#define SCNT_ISDX_GREEN_PKT 2 +#define SCNT_ISDX_YELLOW_OCT 3 +#define SCNT_ISDX_YELLOW_PKT 4 +#define SCNT_ISDX_RED_OCT 5 +#define SCNT_ISDX_RED_PKT 6 +#define SCNT_ISDX_DROP_GREEN_OCT 7 +#define SCNT_ISDX_DROP_GREEN_PKT 8 +#define SCNT_ISDX_DROP_YELLOW_OCT 9 +#define SCNT_ISDX_DROP_YELLOW_PKT 10 + +#define SCNT_SF_MATCHING_FRAMES_COUNT 0 +#define SCNT_SF_NOT_PASSING_FRAMES_COUNT 1 +#define SCNT_SF_NOT_PASSING_SDU_COUNT 2 +#define SCNT_SF_RED_FRAMES_COUNT 3 +#define SCNT_SF_STREAM_BLOCK_COUNT 4 + +#define SCNT_ESDX_GREEN_OCT 0 +#define SCNT_ESDX_GREEN_PKT 1 +#define SCNT_ESDX_YELLOW_OCT 2 +#define SCNT_ESDX_YELLOW_PKT 3 + +#define SCNT_RX_OCT 0 +#define SCNT_RX_UC 1 +#define SCNT_RX_MC 2 +#define SCNT_RX_BC 3 +#define SCNT_RX_SHORT 4 +#define SCNT_RX_FRAG 5 +#define SCNT_RX_JABBER 6 +#define SCNT_RX_CRC 7 +#define SCNT_RX_SYMBOL_ERR 8 +#define SCNT_RX_SZ_64 9 +#define SCNT_RX_SZ_65_127 10 +#define SCNT_RX_SZ_128_255 11 +#define SCNT_RX_SZ_256_511 12 +#define SCNT_RX_SZ_512_1023 13 +#define SCNT_RX_SZ_1024_1526 14 +#define SCNT_RX_SZ_JUMBO 15 +#define SCNT_RX_PAUSE 16 +#define SCNT_RX_CONTROL 17 +#define SCNT_RX_LONG 18 +#define SCNT_RX_CAT_DROP 19 +#define SCNT_RX_RED_PRIO_0 20 +#define SCNT_RX_RED_PRIO_1 21 +#define SCNT_RX_RED_PRIO_2 22 +#define SCNT_RX_RED_PRIO_3 23 +#define SCNT_RX_RED_PRIO_4 24 +#define SCNT_RX_RED_PRIO_5 25 +#define SCNT_RX_RED_PRIO_6 26 +#define SCNT_RX_RED_PRIO_7 27 +#define SCNT_RX_YELLOW_PRIO_0 28 +#define SCNT_RX_YELLOW_PRIO_1 29 +#define SCNT_RX_YELLOW_PRIO_2 30 +#define SCNT_RX_YELLOW_PRIO_3 31 +#define SCNT_RX_YELLOW_PRIO_4 32 +#define SCNT_RX_YELLOW_PRIO_5 33 +#define SCNT_RX_YELLOW_PRIO_6 34 +#define SCNT_RX_YELLOW_PRIO_7 35 +#define SCNT_RX_GREEN_PRIO_0 36 +#define SCNT_RX_GREEN_PRIO_1 37 +#define SCNT_RX_GREEN_PRIO_2 38 +#define SCNT_RX_GREEN_PRIO_3 39 +#define SCNT_RX_GREEN_PRIO_4 40 +#define SCNT_RX_GREEN_PRIO_5 41 +#define SCNT_RX_GREEN_PRIO_6 42 +#define SCNT_RX_GREEN_PRIO_7 43 +#define SCNT_RX_ASSEMBLY_ERR 44 +#define SCNT_RX_SMD_ERR 45 +#define SCNT_RX_ASSEMBLY_OK 46 +#define SCNT_RX_MERGE_FRAG 47 +#define SCNT_RX_PMAC_OCT 48 +#define SCNT_RX_PMAC_UC 49 +#define SCNT_RX_PMAC_MC 50 +#define SCNT_RX_PMAC_BC 51 +#define SCNT_RX_PMAC_SHORT 52 +#define SCNT_RX_PMAC_FRAG 53 +#define SCNT_RX_PMAC_JABBER 54 +#define SCNT_RX_PMAC_CRC 55 +#define SCNT_RX_PMAC_SYMBOL_ERR 56 +#define SCNT_RX_PMAC_SZ_64 57 +#define SCNT_RX_PMAC_SZ_65_127 58 +#define SCNT_RX_PMAC_SZ_128_255 59 +#define SCNT_RX_PMAC_SZ_256_511 60 +#define SCNT_RX_PMAC_SZ_512_1023 61 +#define SCNT_RX_PMAC_SZ_1024_1526 62 +#define SCNT_RX_PMAC_SZ_JUMBO 63 +#define SCNT_RX_PMAC_PAUSE 64 +#define SCNT_RX_PMAC_CONTROL 65 +#define SCNT_RX_PMAC_LONG 66 +#define SCNT_TX_OCT 67 +#define SCNT_TX_UC 68 +#define SCNT_TX_MC 69 +#define SCNT_TX_BC 70 +#define SCNT_TX_COL 71 +#define SCNT_TX_DROP 72 +#define SCNT_TX_PAUSE 73 +#define SCNT_TX_SZ_64 74 +#define SCNT_TX_SZ_65_127 75 +#define SCNT_TX_SZ_128_255 76 +#define SCNT_TX_SZ_256_511 77 +#define SCNT_TX_SZ_512_1023 78 +#define SCNT_TX_SZ_1024_1526 79 +#define SCNT_TX_SZ_JUMBO 80 +#define SCNT_TX_YELLOW_PRIO_0 81 +#define SCNT_TX_YELLOW_PRIO_1 82 +#define SCNT_TX_YELLOW_PRIO_2 83 +#define SCNT_TX_YELLOW_PRIO_3 84 +#define SCNT_TX_YELLOW_PRIO_4 85 +#define SCNT_TX_YELLOW_PRIO_5 86 +#define SCNT_TX_YELLOW_PRIO_6 87 +#define SCNT_TX_YELLOW_PRIO_7 88 +#define SCNT_TX_GREEN_PRIO_0 89 +#define SCNT_TX_GREEN_PRIO_1 90 +#define SCNT_TX_GREEN_PRIO_2 91 +#define SCNT_TX_GREEN_PRIO_3 92 +#define SCNT_TX_GREEN_PRIO_4 93 +#define SCNT_TX_GREEN_PRIO_5 94 +#define SCNT_TX_GREEN_PRIO_6 95 +#define SCNT_TX_GREEN_PRIO_7 96 +#define SCNT_TX_AGED 97 +#define SCNT_TX_LLCT 98 +#define SCNT_TX_CT 99 +#define SCNT_TX_BUFDROP 100 +#define SCNT_TX_MM_HOLD 101 +#define SCNT_TX_MERGE_FRAG 102 +#define SCNT_TX_PMAC_OCT 103 +#define SCNT_TX_PMAC_UC 104 +#define SCNT_TX_PMAC_MC 105 +#define SCNT_TX_PMAC_BC 106 +#define SCNT_TX_PMAC_PAUSE 107 +#define SCNT_TX_PMAC_SZ_64 108 +#define SCNT_TX_PMAC_SZ_65_127 109 +#define SCNT_TX_PMAC_SZ_128_255 110 +#define SCNT_TX_PMAC_SZ_256_511 111 +#define SCNT_TX_PMAC_SZ_512_1023 112 +#define SCNT_TX_PMAC_SZ_1024_1526 113 +#define SCNT_TX_PMAC_SZ_JUMBO 114 +#define SCNT_DR_LOCAL 115 +#define SCNT_DR_TAIL 116 +#define SCNT_DR_YELLOW_PRIO_0 117 +#define SCNT_DR_YELLOW_PRIO_1 118 +#define SCNT_DR_YELLOW_PRIO_2 119 +#define SCNT_DR_YELLOW_PRIO_3 120 +#define SCNT_DR_YELLOW_PRIO_4 121 +#define SCNT_DR_YELLOW_PRIO_5 122 +#define SCNT_DR_YELLOW_PRIO_6 123 +#define SCNT_DR_YELLOW_PRIO_7 124 +#define SCNT_DR_GREEN_PRIO_0 125 +#define SCNT_DR_GREEN_PRIO_1 126 +#define SCNT_DR_GREEN_PRIO_2 127 +#define SCNT_DR_GREEN_PRIO_3 128 +#define SCNT_DR_GREEN_PRIO_4 129 +#define SCNT_DR_GREEN_PRIO_5 130 +#define SCNT_DR_GREEN_PRIO_6 131 +#define SCNT_DR_GREEN_PRIO_7 132 + +struct lan9645x_stat_layout { + u32 offset; + char name[ETH_GSTRING_LEN]; +}; + +enum lan9645x_view_stat_type { + LAN9645X_STAT_PORTS = 0, + LAN9645X_STAT_ISDX, + LAN9645X_STAT_ESDX, + LAN9645X_STAT_SFID, + + LAN9645X_STAT_NUM, +}; + +struct lan9645x_stat_region { + u32 base_offset; + u32 cnt; + u32 cnts_base_idx; +}; + +/* Counters are organized by indices/views such as + * + * - physical ports + * - isdx + * - esdx + * - frer + * - sfid + * + * Each view contains regions, which is a linear address range of related + * stats. I.e. the ports index has RX, TX and Drop regions. + * + * + * and you have a given counter replicated per index. + */ +struct lan9645x_view_stats { + /* Individual counter descriptions in this view */ + const struct lan9645x_stat_layout *layout; + /* Region description for this view, used for bulk reading */ + const struct lan9645x_stat_region *regions; + struct lan9645x_stats *stats; + char name[16]; + /* 64bit software counters with the same addr layout hw */ + u64 *cnts; + /* Buffer for bulk reading counter regions from hw */ + u32 *buf; + /* Number of counters per index in view */ + u32 num_cnts; + /* Number of indexes in view */ + u32 num_indexes; + /* Number of counter regions with counters at sequential addresses */ + size_t num_regions; + enum lan9645x_view_stat_type type; +}; + +struct lan9645x_stats { + struct lan9645x *lan9645x; + struct mutex hw_lock; /* lock r/w to stat registers */ + struct delayed_work work; + struct workqueue_struct *queue; + + struct lan9645x_view_stats view[LAN9645X_STAT_NUM]; +}; + +static inline struct lan9645x_view_stats * +lan9645x_get_vstats(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type type) +{ + if (WARN_ON(!(type < LAN9645X_STAT_NUM))) + return NULL; + + return &lan9645x->stats->view[type]; +} + +/* Add a possibly wrapping 32 bit value to a 64 bit counter */ +static inline void lan9645x_stats_add_cnt(u64 *cnt, u32 val) +{ + if (val < (*cnt & U32_MAX)) + *cnt += (u64)1 << 32; /* value has wrapped */ + + *cnt = (*cnt & ~(u64)U32_MAX) + val; +} + +void lan9645x_stats_clear_counters(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type type, int idx); +int lan9645x_stats_init(struct lan9645x *lan9645x); +void lan9645x_stats_deinit(struct lan9645x *lan9645x); +void lan9645x_stats_get_strings(struct lan9645x *lan9645x, int port, + u32 stringset, u8 *data); +int lan9645x_stats_get_sset_count(struct lan9645x *lan9645x, int port, + int sset); +void lan9645x_stats_get_ethtool_stats(struct lan9645x *lan9645x, int port, + uint64_t *data); +void lan9645x_stats_get_eth_mac_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_mac_stats *mac_stats); +void +lan9645x_stats_get_rmon_stats(struct lan9645x *lan9645x, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges); +void lan9645x_stats_get_stats64(struct lan9645x *lan9645x, int port, + struct rtnl_link_stats64 *s); +void lan9645x_stats_get_mm_stats(struct lan9645x *lan9645x, int port, + struct ethtool_mm_stats *stats); +void lan9645x_stats_get_pause_stats(struct lan9645x *lan9645x, int port, + struct ethtool_pause_stats *ps); +void +lan9645x_stats_get_eth_ctrl_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_ctrl_stats *ctrl_stats); +void lan9645x_stats_get_eth_phy_stats(struct lan9645x *lan9645x, int port, + struct ethtool_eth_phy_stats *phy_stats); +void lan9645x_stats_view_update(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type view_type); +void lan9645x_stats_view_idx_update(struct lan9645x *lan9645x, + enum lan9645x_view_stat_type vtype, + int idx); + +#endif -- 2.52.0