WLAN sensing operation uses signal processing on the Wi-Fi signals received by a station device to detect physical obstructions and interpret movements with the help of Channel State Information (CSI), defined in IEEE 802.11bf. Introduce a new wdev (non-netdev) interface type WLAN SENSE for 802.11bf, similar to a NAN or P2P Discovery interface. This new wdev interface helps in separating WLAN Sensing operation and the normal WLAN traffic. Also this new interface is used for any 802.11 frame exchanges to be done as part of the WLAN Sensing Measurement operation. Also add new WLAN SENSE Start and Stop nl80211 commends to allow the userspace to control this interface. Signed-off-by: Gokul Sivakumar --- include/net/cfg80211.h | 7 ++++ include/uapi/linux/nl80211.h | 11 +++++++ net/mac80211/cfg.c | 1 + net/mac80211/chan.c | 2 ++ net/mac80211/iface.c | 2 ++ net/mac80211/util.c | 1 + net/wireless/chan.c | 2 ++ net/wireless/core.c | 26 +++++++++++++++ net/wireless/nl80211.c | 63 ++++++++++++++++++++++++++++++++++++ net/wireless/rdev-ops.h | 19 +++++++++++ net/wireless/trace.h | 10 ++++++ net/wireless/util.c | 1 + 12 files changed, 145 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 4072a67c9cc9..d6145556fcf4 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4762,6 +4762,8 @@ struct mgmt_frame_regs { * links by calling cfg80211_mlo_reconf_add_done(). When calling * cfg80211_mlo_reconf_add_done() the bss pointer must be given for each * link for which MLO reconfiguration 'add' operation was requested. + * @start_wlan_sense: Start the wlan sense device. + * @stop_wlan_sense: Stop the given wlan sense device. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -5133,6 +5135,11 @@ struct cfg80211_ops { struct cfg80211_ml_reconf_req *req); int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev, bool val); + + int (*start_wlan_sense)(struct wiphy *wiphy, + struct wireless_dev *wdev); + void (*stop_wlan_sense)(struct wiphy *wiphy, + struct wireless_dev *wdev); }; /* diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index aed0b4c5d5e8..5599fb9d2f08 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1344,6 +1344,12 @@ * control EPCS configuration. Used to notify userland on the current state * of EPCS. * + * @NL80211_CMD_START_WLAN_SENSE: Start WLAN SENSE operation, identified + * by its %NL80211_ATTR_WDEV interface. This interface must have been + * previously created with %NL80211_CMD_NEW_INTERFACE. + * @NL80211_CMD_STOP_WLAN_SENSE: Stop WLAN SENSE operation, identified by + * its %NL80211_ATTR_WDEV interface. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1604,6 +1610,9 @@ enum nl80211_commands { NL80211_CMD_ASSOC_MLO_RECONF, NL80211_CMD_EPCS_CFG, + NL80211_CMD_START_WLAN_SENSE, + NL80211_CMD_STOP_WLAN_SENSE, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -3592,6 +3601,7 @@ enum nl80211_attrs { * @NL80211_IFTYPE_OCB: Outside Context of a BSS * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev) + * @NL80211_IFTYPE_WLAN_SENSE: WLAN Sensing device interface type (not a netdev) * @NL80211_IFTYPE_MAX: highest interface type number currently defined * @NUM_NL80211_IFTYPES: number of defined interface types * @@ -3613,6 +3623,7 @@ enum nl80211_iftype { NL80211_IFTYPE_P2P_DEVICE, NL80211_IFTYPE_OCB, NL80211_IFTYPE_NAN, + NL80211_IFTYPE_WLAN_SENSE, /* keep last */ NUM_NL80211_IFTYPES, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b26f61f13605..9f62f30a842c 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -592,6 +592,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 57065714cf8c..5b304988e712 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -353,6 +353,7 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: continue; case NL80211_IFTYPE_MONITOR: WARN_ON_ONCE(!ieee80211_hw_check(&local->hw, @@ -1301,6 +1302,7 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link) case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: case NUM_NL80211_IFTYPES: WARN_ON(1); break; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 07ba68f7cd81..2537c4255efc 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1320,6 +1320,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: /* no special treatment */ break; case NL80211_IFTYPE_UNSPECIFIED: @@ -1888,6 +1889,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_P2P_DEVICE: sdata->vif.bss_conf.bssid = sdata->vif.addr; break; + case NL80211_IFTYPE_WLAN_SENSE: case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_WDS: case NUM_NL80211_IFTYPES: diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 9eb35e3b9e52..3a6c2536f338 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2064,6 +2064,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_WLAN_SENSE: /* nothing to do */ break; case NL80211_IFTYPE_UNSPECIFIED: diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 193734b7f9dc..3593a48f9a15 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -769,6 +769,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_UNSPECIFIED: @@ -892,6 +893,7 @@ bool cfg80211_beaconing_iface_active(struct wireless_dev *wdev) case NL80211_IFTYPE_P2P_DEVICE: /* Can NAN type be considered as beaconing interface? */ case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_WDS: diff --git a/net/wireless/core.c b/net/wireless/core.c index 797f9f2004a6..a8e982ae8628 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -265,6 +265,23 @@ void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, rdev->opencount--; } +void cfg80211_stop_wlan_sense(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + lockdep_assert_held(&rdev->wiphy.mtx); + + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WLAN_SENSE)) + return; + + if (!wdev_running(wdev)) + return; + + rdev_stop_wlan_sense(rdev, wdev); + wdev->is_running = false; + + rdev->opencount--; +} + void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); @@ -667,6 +684,11 @@ int wiphy_verify_iface_combinations(struct wiphy *wiphy, c->limits[j].max > 1)) return -EINVAL; + /* Only a single WLAN Sense interface can be allowed */ + if (WARN_ON(types & BIT(NL80211_IFTYPE_WLAN_SENSE) && + c->limits[j].max > 1)) + return -EINVAL; + /* * This isn't well-defined right now. If you have an * IBSS interface, then its beacon interval may change @@ -1297,6 +1319,9 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev, case NL80211_IFTYPE_NAN: cfg80211_stop_nan(rdev, wdev); break; + case NL80211_IFTYPE_WLAN_SENSE: + cfg80211_stop_wlan_sense(rdev, wdev); + break; default: break; } @@ -1400,6 +1425,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, break; case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: /* cannot happen, has no netdev */ break; case NL80211_IFTYPE_AP_VLAN: diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b7bc7e5e81dd..70717af1fbcd 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1642,6 +1642,7 @@ static int nl80211_key_allowed(struct wireless_dev *wdev) case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_WLAN_SENSE: case NUM_NL80211_IFTYPES: return -EINVAL; } @@ -4642,6 +4643,7 @@ static int _nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) wdev->u.mesh.id_up_len); break; case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: case NL80211_IFTYPE_P2P_DEVICE: /* * P2P Device and NAN do not have a netdev, so don't go @@ -13687,6 +13689,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_WLAN_SENSE: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, @@ -13748,6 +13751,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_WLAN_SENSE: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, @@ -13872,6 +13876,7 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: + case NL80211_IFTYPE_WLAN_SENSE: break; case NL80211_IFTYPE_NAN: if (!wiphy_ext_feature_isset(wdev->wiphy, @@ -15455,6 +15460,50 @@ static int nl80211_stop_nan(struct sk_buff *skb, struct genl_info *info) return 0; } +static int nl80211_start_wlan_sense(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct wireless_dev *wdev = info->user_ptr[1]; + int err; + + if (wdev->iftype != NL80211_IFTYPE_WLAN_SENSE) + return -EOPNOTSUPP; + + if (!rdev->ops->start_wlan_sense) + return -EOPNOTSUPP; + + if (wdev_running(wdev)) + return -EEXIST; + + if (rfkill_blocked(rdev->wiphy.rfkill)) + return -ERFKILL; + + err = rdev_start_wlan_sense(rdev, wdev); + if (err) + return err; + + wdev->is_running = true; + rdev->opencount++; + + return 0; +} + +static int nl80211_stop_wlan_sense(struct sk_buff *skb, struct genl_info *info) +{ + struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct wireless_dev *wdev = info->user_ptr[1]; + + if (wdev->iftype != NL80211_IFTYPE_WLAN_SENSE) + return -EOPNOTSUPP; + + if (!rdev->ops->stop_wlan_sense) + return -EOPNOTSUPP; + + cfg80211_stop_wlan_sense(rdev, wdev); + + return 0; +} + static int validate_nan_filter(struct nlattr *filter_attr) { struct nlattr *attr; @@ -18755,6 +18804,20 @@ static const struct genl_small_ops nl80211_small_ops[] = { .flags = GENL_UNS_ADMIN_PERM, .internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP), }, + { + .cmd = NL80211_CMD_START_WLAN_SENSE, + .doit = nl80211_start_wlan_sense, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV | + NL80211_FLAG_NEED_RTNL), + }, + { + .cmd = NL80211_CMD_STOP_WLAN_SENSE, + .doit = nl80211_stop_wlan_sense, + .flags = GENL_UNS_ADMIN_PERM, + .internal_flags = IFLAGS(NL80211_FLAG_NEED_WDEV_UP | + NL80211_FLAG_NEED_RTNL), + }, }; static struct genl_family nl80211_fam __ro_after_init = { diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index ac6884bacf3f..eb599b9f33d1 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -1060,6 +1060,25 @@ rdev_nan_change_conf(struct cfg80211_registered_device *rdev, return ret; } +static inline int rdev_start_wlan_sense(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + int ret; + + trace_rdev_start_wlan_sense(&rdev->wiphy, wdev); + ret = rdev->ops->start_wlan_sense(&rdev->wiphy, wdev); + trace_rdev_return_int(&rdev->wiphy, ret); + return ret; +} + +static inline void rdev_stop_wlan_sense(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ + trace_rdev_stop_wlan_sense(&rdev->wiphy, wdev); + rdev->ops->stop_wlan_sense(&rdev->wiphy, wdev); + trace_rdev_return_void(&rdev->wiphy); +} + static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev, struct net_device *dev, struct cfg80211_acl_data *params) diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 9b6074155d59..91109c27e7a6 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2372,6 +2372,16 @@ TRACE_EVENT(rdev_del_nan_func, WIPHY_PR_ARG, WDEV_PR_ARG, __entry->cookie) ); +DEFINE_EVENT(wiphy_wdev_evt, rdev_start_wlan_sense, + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), + TP_ARGS(wiphy, wdev) +); + +DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_wlan_sense, + TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev), + TP_ARGS(wiphy, wdev) +); + TRACE_EVENT(rdev_set_mac_acl, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_acl_data *params), diff --git a/net/wireless/util.c b/net/wireless/util.c index d12d49134c88..6cffd01c4668 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1297,6 +1297,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, case NL80211_IFTYPE_P2P_DEVICE: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_NAN: + case NL80211_IFTYPE_WLAN_SENSE: WARN_ON(1); break; } -- 2.25.1 Introduce a mechanism to initiate WLAN Sensing request to collect the Channel State Information (CSI) from the WLAN device, by leveraging and extending the existing Peer Measurement Request (PMSR) infrastructure. PMSR so far was supporting only one type of peer measurement, which is FTM. * Allow the driver to register/advertise the WLAN Sensing Measurement Capabilities of the Device to the userspace through cfg80211. * Add support to parse the WLAN Sensing Measurement Request params passed by userspace and send it to the driver using the start_pmsr cfg8021_ops cb. And let driver return the collected CSI raw data to the userspace using the cfg80211_pmsr_report() notification. * Also have a provision for passing vendor-specific params while initiating the WLAN Sensing Mesaurement request to the driver. * Add the corresponding nl80211 attributes, struct and enum definitions to do the above mentioned operations. Signed-off-by: Gokul Sivakumar --- include/linux/ieee80211.h | 5 ++ include/net/cfg80211.h | 84 +++++++++++++++++++- include/uapi/linux/nl80211.h | 119 +++++++++++++++++++++++++++++ net/wireless/core.c | 93 +++++++++++++++-------- net/wireless/core.h | 3 + net/wireless/nl80211.c | 70 +++++++++++++++++ net/wireless/pmsr.c | 143 +++++++++++++++++++++++++++++++++++ 7 files changed, 485 insertions(+), 32 deletions(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d350263f23f3..a3a2ac682fd4 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -3499,6 +3499,11 @@ ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len) #define WLAN_ERP_USE_PROTECTION (1<<1) #define WLAN_ERP_BARKER_PREAMBLE (1<<2) +/* IEEE Std 802.11bf, Sensing Capabilities */ +#define IEEE80211_SENSING_CAPA_MAX_LTF_REP 8 +#define IEEE80211_SENSING_CAPA_MAX_CHAINS 8 +#define IEEE80211_SENSING_CAPA_MAX_MEASUREMENT_INTERVAL 0x7FFFFF + /* WLAN_ERP_BARKER_PREAMBLE values */ enum { WLAN_ERP_PREAMBLE_SHORT = 0, diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d6145556fcf4..4a753d92c2e5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4178,6 +4178,19 @@ struct cfg80211_pmsr_ftm_result { dist_spread_valid:1; }; +/** + * struct cfg80211_pmsr_sensing_result - Sensing measurement report + * @seq_number: sequence number of the sensing measurement report + * @data_len: length of @data + * @data: raw data of the sensing measurement report whose format is + * defined/parsed by user + */ +struct cfg80211_pmsr_sensing_result { + u16 seq_number; + u32 data_len; + u8 *data; +}; + /** * struct cfg80211_pmsr_result - peer measurement result * @addr: address of the peer @@ -4192,6 +4205,7 @@ struct cfg80211_pmsr_ftm_result { * one type at a time, but you can report multiple results separately and * they're all aggregated for userspace. * @ftm: FTM result + * @sensing: Sensing result */ struct cfg80211_pmsr_result { u64 host_time, ap_tsf; @@ -4206,6 +4220,7 @@ struct cfg80211_pmsr_result { union { struct cfg80211_pmsr_ftm_result ftm; + struct cfg80211_pmsr_sensing_result sensing; }; }; @@ -4252,18 +4267,60 @@ struct cfg80211_pmsr_ftm_request_peer { u8 bss_color; }; +/** + * struct cfg80211_pmsr_sensing_request_peer - Sensing request data + * @associated: indicates device is in associated state + * @interval: interval between two consecutive sensing exchange + * @duration: duration of a sensing exchange + * @expiry_exp: measurement session expiry exponent + * @tx_ltf_rep: TX LTF Repetition + * @rx_ltf_rep: RX LTF Repetition + * @tx_sts: number of TX space-time streams + * @rx_sts: number of RX space-time streams + * @num_rx_chains: number of RX chains + * @min_interval: minimum measurement interval between two consecutive + * non-TB sensing measurement exchanges + * @vendor_req_len: length of @vendor_req + * works only if vendor_mode of sensing capabilities is set + * @vendor_req: vendor request block, interpreted by vendor implementation + * works only if vendor_mode of sensing capabilities is set + * + * See also nl80211 for the respective attribute documentation + */ +struct cfg80211_pmsr_sensing_request_peer { + u8 associated:1; + u32 interval; + u16 duration; + u8 expiry_exp; + u8 tx_ltf_rep; + u8 rx_ltf_rep; + u8 tx_sts; + u8 rx_sts; + u8 num_rx_chains; + u32 min_interval; + u16 vendor_req_len; + u8 *vendor_req; +}; + /** * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request * @addr: MAC address * @chandef: channel to use * @report_ap_tsf: report the associated AP's TSF + * @type: type of peer measurement, see &enum nl80211_peer_measurement_type * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer + * @sensing: sensing data, see &struct cfg80211_pmsr_sensing_request_peer */ struct cfg80211_pmsr_request_peer { u8 addr[ETH_ALEN]; struct cfg80211_chan_def chandef; u8 report_ap_tsf:1; - struct cfg80211_pmsr_ftm_request_peer ftm; + + enum nl80211_peer_measurement_type type; + union { + struct cfg80211_pmsr_ftm_request_peer ftm; + struct cfg80211_pmsr_sensing_request_peer sensing; + }; }; /** @@ -5556,6 +5613,7 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type); * @max_peers: maximum number of peers in a single measurement * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement * @randomize_mac_addr: can randomize MAC address for measurement + * * @ftm: FTM measurement data * @ftm.supported: FTM measurement is supported * @ftm.asap: ASAP-mode is supported @@ -5571,6 +5629,18 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type); * not limited) * @ftm.trigger_based: trigger based ranging measurement is supported * @ftm.non_trigger_based: non trigger based ranging measurement is supported + * + * @sensing.supported: Sensing measurement is supported + * @sensing.associated: support sensing under device associated + * @sensing.unassocaited: support sensing under device un-associated + * @sensing.vendor_mode: indicating vendor sensing measurement procedure + * is adopted + * @sensing.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width) + * @sensing.max_tx_ltf_rep: maximum number of TX LTF Repetition + * @sensing.max_rx_ltf_rep: maximum number of RX LTF Repetition + * @sensing.min_interval: minimum measurement interval between two consecutive + * non-TB sensing measurement exchanges + * @sensing.max_rx_chains: maximum number of RX chains */ struct cfg80211_pmsr_capabilities { unsigned int max_peers; @@ -5590,6 +5660,18 @@ struct cfg80211_pmsr_capabilities { trigger_based:1, non_trigger_based:1; } ftm; + + struct { + u8 supported:1, + associated:1, + unassociated:1, + vendor_mode:1; + u32 bandwidths; + u8 max_tx_ltf_rep; + u8 max_rx_ltf_rep; + u32 min_interval; + u8 max_rx_chains; + } sensing; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5599fb9d2f08..36152192e09b 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -7439,6 +7439,7 @@ enum nl80211_preamble { * these numbers also for attributes * * @NL80211_PMSR_TYPE_FTM: flight time measurement + * @NL80211_PMSR_TYPE_SENSING: WLAN sensing measurement * * @NUM_NL80211_PMSR_TYPES: internal * @NL80211_PMSR_TYPE_MAX: highest type number @@ -7447,6 +7448,7 @@ enum nl80211_peer_measurement_type { NL80211_PMSR_TYPE_INVALID, NL80211_PMSR_TYPE_FTM, + NL80211_PMSR_TYPE_SENSING, NUM_NL80211_PMSR_TYPES, NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1 @@ -7831,6 +7833,123 @@ enum nl80211_peer_measurement_ftm_resp { NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 }; +/** + * enum nl80211_peer_measurement_sensing_capa - Sensing capabilities + * @__NL80211_PMSR_SENSING_CAPA_ATTR_INVALID: invalid + * + * @NL80211_PMSR_SENSING_CAPA_ATTR_ASSOCIATED: flag attribute represents + * supporting sensing under device associated + * @NL80211_PMSR_SENSING_CAPA_ATTR_UNASSOCIATED: flag attribute represents + * supporting sensing under device unassociated + * @NL80211_PMSR_SENSING_CAPA_ATTR_VENDOR_MODE: flag attribute indicating + * support vendor sensing measurement procedure + * @NL80211_PMSR_SENSING_CAPA_ATTR_BANDWIDTHS: bitmap of values from + * &enum nl80211_chan_width indicating the supported channel + * bandwidths for sensing + * @NL80211_PMSR_SENSING_CAPA_ATTR_MAX_TX_LTF_REP: set to maximum number of + * TX LTF Repetition minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_LTF_REP: set to maximum number of + * RX LTF Repetition minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_CAPA_ATTR_MIN_INTERVAL: indicating minimum + * measurement interval between two consecutive non-TB sensing + * measurement exchanges (u32, 100us, 0-8388607) + * @NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_CHAINS: set to maximum number of + * RX chains minus 1 (u8, 0-7) + * + * @NUM_NL80211_PMSR_SENSING_CAPA_ATTR: internal + * @NL80211_PMSR_SENSING_CAPA_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_sensing_capa { + __NL80211_PMSR_SENSING_CAPA_ATTR_INVALID, + + NL80211_PMSR_SENSING_CAPA_ATTR_ASSOCIATED, + NL80211_PMSR_SENSING_CAPA_ATTR_UNASSOCIATED, + NL80211_PMSR_SENSING_CAPA_ATTR_VENDOR_MODE, + NL80211_PMSR_SENSING_CAPA_ATTR_BANDWIDTHS, + NL80211_PMSR_SENSING_CAPA_ATTR_MAX_TX_LTF_REP, + NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_LTF_REP, + NL80211_PMSR_SENSING_CAPA_ATTR_MIN_INTERVAL, + NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_CHAINS, + + /* keep last */ + NUM_NL80211_PMSR_SENSING_CAPA_ATTR, + NL80211_PMSR_SENSING_CAPA_ATTR_MAX = NUM_NL80211_PMSR_SENSING_CAPA_ATTR - 1 +}; + +/** + * enum nl80211_peer_measurement_sensing_req - Sensing request attributes + * @__NL80211_PMSR_SENSING_REQ_ATTR_INVALID: invalid + * + * @NL80211_PMSR_SENSING_REQ_ATTR_ASSOCIATED: device is in associated state + * (flag) + * @NL80211_PMSR_SENSING_REQ_ATTR_INTERVAL: interval between two consecutive + * sensing exchange (u32, optional, ms) + * @NL80211_PMSR_SENSING_REQ_ATTR_DURATION: duration of a sensing exchange + * (u16, optional, ms) + * @NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP: measurement session expiry + * exponent (u8, 0-15) + * @NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP: set to the number of LTF + * repetitions minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP: set to the number of LTF + * repetitions minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_REQ_ATTR_TX_STS: set to the number of TX space-time + * streams minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_REQ_ATTR_RX_STS: set to the number of RX space-time + * streams minus 1 (u8, 0-7) + * @NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS: requested number of RX chains + * (u8, 0-7) + * @NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL: minimum measurement interval + * between two consecutive non-TB sensing measurement exchanges + * (u32, 100us, 0-8388607) + * @NL80211_PMSR_SENSING_REQ_ATTR_VENDOR_REQ: vendor request block if vendor + * procedure is adopted (binary, optional) + * + * @NUM_NL80211_PMSR_SENSING_REQ_ATTR: internal + * @NL80211_PMSR_SENSING_REQ_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_sensing_req { + __NL80211_PMSR_SENSING_REQ_ATTR_INVALID, + + NL80211_PMSR_SENSING_REQ_ATTR_ASSOCIATED, + NL80211_PMSR_SENSING_REQ_ATTR_INTERVAL, + NL80211_PMSR_SENSING_REQ_ATTR_DURATION, + NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP, + NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP, + NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP, + NL80211_PMSR_SENSING_REQ_ATTR_TX_STS, + NL80211_PMSR_SENSING_REQ_ATTR_RX_STS, + NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS, + NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL, + NL80211_PMSR_SENSING_REQ_ATTR_VENDOR_REQ, + + /* keep last */ + NUM_NL80211_PMSR_SENSING_REQ_ATTR, + NL80211_PMSR_SENSING_REQ_ATTR_MAX = NUM_NL80211_PMSR_SENSING_REQ_ATTR - 1 +}; + +/** + * enum nl80211_peer_measurement_sensing_req - Sensing request attributes + * @__NL80211_PMSR_SENSING_RESP_ATTR_INVALID: invalid + * + * @NL80211_PMSR_SENSING_RESP_ATTR_REPORT_INDEX: index of the sensing measurement + * report (u8) + * @NL80211_PMSR_SENSING_RESP_ATTR_DATA: raw data of the sensing measurement + * report (binary) + * + * @NUM_NL80211_PMSR_SENSING_RESP_ATTR: internal + * @NL80211_PMSR_SENSING_RESP_ATTR_MAX: highest attribute number + */ +enum nl80211_peer_measurement_sensing_resp { + __NL80211_PMSR_SENSING_RESP_ATTR_INVALID, + + NL80211_PMSR_SENSING_RESP_ATTR_REPORT_INDEX, + NL80211_PMSR_SENSING_RESP_ATTR_DATA, + + /* keep last */ + NUM_NL80211_PMSR_SENSING_RESP_ATTR, + NL80211_PMSR_SENSING_RESP_ATTR_MAX = NUM_NL80211_PMSR_SENSING_RESP_ATTR - 1 +}; + /** * enum nl80211_obss_pd_attributes - OBSS packet detection attributes * @__NL80211_HE_OBSS_PD_ATTR_INVALID: Invalid diff --git a/net/wireless/core.c b/net/wireless/core.c index a8e982ae8628..0692469dca60 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -786,39 +786,70 @@ int wiphy_register(struct wiphy *wiphy) if (WARN_ON(wiphy->interface_modes & BIT(NL80211_IFTYPE_WDS))) return -EINVAL; - if (WARN_ON(wiphy->pmsr_capa && !wiphy->pmsr_capa->ftm.supported)) + if (WARN_ON(wiphy->pmsr_capa && + !(wiphy->pmsr_capa->ftm.supported || + wiphy->pmsr_capa->sensing.supported))) return -EINVAL; - if (wiphy->pmsr_capa && wiphy->pmsr_capa->ftm.supported) { - if (WARN_ON(!wiphy->pmsr_capa->ftm.asap && - !wiphy->pmsr_capa->ftm.non_asap)) - return -EINVAL; - if (WARN_ON(!wiphy->pmsr_capa->ftm.preambles || - !wiphy->pmsr_capa->ftm.bandwidths)) - return -EINVAL; - if (WARN_ON(wiphy->pmsr_capa->ftm.preambles & - ~(BIT(NL80211_PREAMBLE_LEGACY) | - BIT(NL80211_PREAMBLE_HT) | - BIT(NL80211_PREAMBLE_VHT) | - BIT(NL80211_PREAMBLE_HE) | - BIT(NL80211_PREAMBLE_DMG)))) - return -EINVAL; - if (WARN_ON((wiphy->pmsr_capa->ftm.trigger_based || - wiphy->pmsr_capa->ftm.non_trigger_based) && - !(wiphy->pmsr_capa->ftm.preambles & - BIT(NL80211_PREAMBLE_HE)))) - return -EINVAL; - if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths & - ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_80P80) | - BIT(NL80211_CHAN_WIDTH_160) | - BIT(NL80211_CHAN_WIDTH_320) | - BIT(NL80211_CHAN_WIDTH_5) | - BIT(NL80211_CHAN_WIDTH_10)))) - return -EINVAL; + if (wiphy->pmsr_capa) { + if (wiphy->pmsr_capa->ftm.supported) { + if (WARN_ON(!wiphy->pmsr_capa->ftm.asap && + !wiphy->pmsr_capa->ftm.non_asap)) + return -EINVAL; + if (WARN_ON(!wiphy->pmsr_capa->ftm.preambles || + !wiphy->pmsr_capa->ftm.bandwidths)) + return -EINVAL; + if (WARN_ON(wiphy->pmsr_capa->ftm.preambles & + ~(BIT(NL80211_PREAMBLE_LEGACY) | + BIT(NL80211_PREAMBLE_HT) | + BIT(NL80211_PREAMBLE_VHT) | + BIT(NL80211_PREAMBLE_HE) | + BIT(NL80211_PREAMBLE_DMG)))) + return -EINVAL; + if (WARN_ON((wiphy->pmsr_capa->ftm.trigger_based || + wiphy->pmsr_capa->ftm.non_trigger_based) && + !(wiphy->pmsr_capa->ftm.preambles & + BIT(NL80211_PREAMBLE_HE)))) + return -EINVAL; + if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths & + ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_320) | + BIT(NL80211_CHAN_WIDTH_5) | + BIT(NL80211_CHAN_WIDTH_10)))) + return -EINVAL; + } + + if (wiphy->pmsr_capa->sensing.supported) { + if (WARN_ON(!wiphy->pmsr_capa->sensing.associated && + !wiphy->pmsr_capa->sensing.unassociated)) + return -EINVAL; + if (WARN_ON((wiphy->pmsr_capa->sensing.max_tx_ltf_rep > + IEEE80211_SENSING_CAPA_MAX_LTF_REP - 1) || + (wiphy->pmsr_capa->sensing.max_rx_ltf_rep > + IEEE80211_SENSING_CAPA_MAX_LTF_REP - 1) || + (wiphy->pmsr_capa->sensing.max_rx_chains > + IEEE80211_SENSING_CAPA_MAX_CHAINS - 1))) + return -EINVAL; + if (WARN_ON(wiphy->pmsr_capa->sensing.min_interval > + IEEE80211_SENSING_CAPA_MAX_MEASUREMENT_INTERVAL)) + return -EINVAL; + if (WARN_ON(wiphy->pmsr_capa->sensing.bandwidths & + ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_80P80) | + BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_320) | + BIT(NL80211_CHAN_WIDTH_5) | + BIT(NL80211_CHAN_WIDTH_10)))) + return -EINVAL; + } } if (WARN_ON((wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) && diff --git a/net/wireless/core.h b/net/wireless/core.h index b6bd7f4d6385..9ddf4075e969 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -544,6 +544,9 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev); +void cfg80211_stop_wlan_sense(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); + struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *tmp, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 70717af1fbcd..5ce75034cf3c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -343,10 +343,28 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR] = { .type = NLA_U8 }, }; +static const struct nla_policy +nl80211_pmsr_sensing_req_attr_policy[NL80211_PMSR_SENSING_REQ_ATTR_MAX + 1] = { + [NL80211_PMSR_SENSING_REQ_ATTR_ASSOCIATED] = { .type = NLA_FLAG }, + [NL80211_PMSR_SENSING_REQ_ATTR_INTERVAL] = { .type = NLA_U32 }, + [NL80211_PMSR_SENSING_REQ_ATTR_DURATION] = { .type = NLA_U16 }, + [NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_TX_STS] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_RX_STS] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS] = { .type = NLA_U8 }, + [NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL] = { .type = NLA_U32 }, + [NL80211_PMSR_SENSING_REQ_ATTR_VENDOR_REQ] = { .type = NLA_BINARY, + .len = U8_MAX }, +}; + static const struct nla_policy nl80211_pmsr_req_data_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(nl80211_pmsr_ftm_req_attr_policy), + [NL80211_PMSR_TYPE_SENSING] = + NLA_POLICY_NESTED(nl80211_pmsr_sensing_req_attr_policy), }; static const struct nla_policy @@ -2267,6 +2285,55 @@ nl80211_send_pmsr_ftm_capa(const struct cfg80211_pmsr_capabilities *cap, return 0; } +static int +nl80211_send_pmsr_sensing_capa(const struct cfg80211_pmsr_capabilities *cap, + struct sk_buff *msg) +{ + struct nlattr *sensing; + + if (!cap->sensing.supported) + return 0; + + sensing = nla_nest_start_noflag(msg, NL80211_PMSR_TYPE_SENSING); + if (!sensing) + return -ENOBUFS; + + if (cap->sensing.associated && + nla_put_flag(msg, NL80211_PMSR_SENSING_CAPA_ATTR_ASSOCIATED)) + return -ENOBUFS; + + if (cap->sensing.unassociated && + nla_put_flag(msg, NL80211_PMSR_SENSING_CAPA_ATTR_UNASSOCIATED)) + return -ENOBUFS; + + if (cap->sensing.vendor_mode && + nla_put_flag(msg, NL80211_PMSR_SENSING_CAPA_ATTR_VENDOR_MODE)) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_PMSR_SENSING_CAPA_ATTR_BANDWIDTHS, + cap->sensing.bandwidths)) + return -ENOBUFS; + + if (nla_put_u8(msg, NL80211_PMSR_SENSING_CAPA_ATTR_MAX_TX_LTF_REP, + cap->sensing.max_tx_ltf_rep)) + return -ENOBUFS; + + if (nla_put_u8(msg, NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_LTF_REP, + cap->sensing.max_rx_ltf_rep)) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_PMSR_SENSING_CAPA_ATTR_MIN_INTERVAL, + cap->sensing.min_interval)) + return -ENOBUFS; + + if (nla_put_u8(msg, NL80211_PMSR_SENSING_CAPA_ATTR_MAX_RX_CHAINS, + cap->sensing.max_rx_chains)) + return -ENOBUFS; + + nla_nest_end(msg, sensing); + return 0; +} + static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev, struct sk_buff *msg) { @@ -2303,6 +2370,9 @@ static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev, if (nl80211_send_pmsr_ftm_capa(cap, msg)) return -ENOBUFS; + if (nl80211_send_pmsr_sensing_capa(cap, msg)) + return -ENOBUFS; + nla_nest_end(msg, caps); nla_nest_end(msg, pmsr); diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index a117f5093ca2..6eddb7a75356 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -189,6 +189,114 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, return 0; } +static int pmsr_parse_sensing(struct cfg80211_registered_device *rdev, + struct nlattr *sensingreq, + struct cfg80211_pmsr_request_peer *out, + struct genl_info *info) +{ + const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa; + struct nlattr *tb[NL80211_PMSR_SENSING_REQ_ATTR_MAX + 1]; + + nla_parse_nested(tb, NL80211_PMSR_SENSING_REQ_ATTR_MAX, + sensingreq, NULL, NULL); + + out->sensing.associated = !!tb[NL80211_PMSR_SENSING_REQ_ATTR_ASSOCIATED]; + if (out->sensing.associated && !capa->sensing.associated) { + NL_SET_ERR_MSG(info->extack, "SENSING: associated mode not supported"); + return -EINVAL; + } + if (!out->sensing.associated && !capa->sensing.unassociated) { + NL_SET_ERR_MSG(info->extack, "SENSING: unassociated mode not supported"); + return -EINVAL; + } + + out->sensing.interval = 10; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_INTERVAL]) + out->sensing.interval = nla_get_u32(tb[NL80211_PMSR_SENSING_REQ_ATTR_INTERVAL]); + + out->sensing.duration = 10; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_DURATION]) + out->sensing.duration = nla_get_u16(tb[NL80211_PMSR_SENSING_REQ_ATTR_DURATION]); + + out->sensing.expiry_exp = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP]) + out->sensing.expiry_exp = nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP]); + if (out->sensing.expiry_exp > 15) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_EXPIRY_EXP], + "SENSING: invalid Session Expiry Exponent"); + return -EINVAL; + } + + out->sensing.tx_ltf_rep = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP]) + out->sensing.tx_ltf_rep = nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP]); + if (out->sensing.tx_ltf_rep > capa->sensing.max_tx_ltf_rep) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_LTF_REP], + "SENSING: invalid TX LTF Repetition"); + return -EINVAL; + } + + out->sensing.rx_ltf_rep = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP]) + out->sensing.rx_ltf_rep = nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP]); + if (out->sensing.rx_ltf_rep > capa->sensing.max_rx_ltf_rep) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_LTF_REP], + "SENSING: invalid RX LTF Repetition"); + return -EINVAL; + } + + out->sensing.tx_sts = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_STS]) + out->sensing.tx_sts = nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_STS]); + if (out->sensing.tx_sts > 7) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_TX_STS], + "SENSING: invalid TX STS"); + return -EINVAL; + } + + out->sensing.rx_sts = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_STS]) + out->sensing.rx_sts = nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_STS]); + if (out->sensing.rx_sts > 7) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_RX_STS], + "SENSING: invalid RX STS"); + return -EINVAL; + } + + out->sensing.num_rx_chains = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS]) + out->sensing.num_rx_chains = + nla_get_u8(tb[NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS]); + if (out->sensing.num_rx_chains > capa->sensing.max_rx_chains) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_NUM_RX_CHAINS], + "SENSING: invalid number of RX Chains"); + return -EINVAL; + } + + out->sensing.min_interval = 0; + if (tb[NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL]) + out->sensing.min_interval = + nla_get_u32(tb[NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL]); + if (out->sensing.min_interval > capa->sensing.min_interval) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_SENSING_REQ_ATTR_MIN_INTERVAL], + "SENSING: invalid min Measurement Interval"); + return -EINVAL; + } + + if (capa->sensing.vendor_mode) { + out->sensing.vendor_req = nla_data(tb[NL80211_PMSR_SENSING_REQ_ATTR_VENDOR_REQ]); + out->sensing.vendor_req_len = nla_len(tb[NL80211_PMSR_SENSING_REQ_ATTR_VENDOR_REQ]); + } + return 0; +} + static int pmsr_parse_peer(struct cfg80211_registered_device *rdev, struct nlattr *peer, struct cfg80211_pmsr_request_peer *out, @@ -250,8 +358,13 @@ static int pmsr_parse_peer(struct cfg80211_registered_device *rdev, nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) { switch (nla_type(treq)) { case NL80211_PMSR_TYPE_FTM: + out->type = NL80211_PMSR_TYPE_FTM; err = pmsr_parse_ftm(rdev, treq, out, info); break; + case NL80211_PMSR_TYPE_SENSING: + out->type = NL80211_PMSR_TYPE_SENSING; + err = pmsr_parse_sensing(rdev, treq, out, info); + break; default: NL_SET_ERR_MSG_ATTR(info->extack, treq, "unsupported measurement type"); @@ -328,6 +441,15 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info); if (err) goto out_err; + + /* + * Sensing operation is restricted to run in sense interface only, + * so it will not mix with normal data + */ + if (req->peers[idx].type == NL80211_PMSR_TYPE_SENSING && + wdev->iftype != NL80211_IFTYPE_WLAN_SENSE) + goto out_err; + idx++; } req->cookie = cfg80211_assign_cookie(rdev); @@ -399,6 +521,23 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev, } EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete); +static int nl80211_pmsr_send_sensing_res(struct sk_buff *msg, + struct cfg80211_pmsr_result *res) +{ + if (nla_put_u16(msg, NL80211_PMSR_SENSING_RESP_ATTR_REPORT_INDEX, + res->sensing.seq_number)) + goto error; + + if (res->sensing.data_len && res->sensing.data) + if ((nla_put(msg, NL80211_PMSR_SENSING_RESP_ATTR_DATA, + res->sensing.data_len, res->sensing.data))) + goto error; + + return 0; +error: + return -ENOSPC; +} + static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg, struct cfg80211_pmsr_result *res) { @@ -537,6 +676,10 @@ static int nl80211_pmsr_send_result(struct sk_buff *msg, if (nl80211_pmsr_send_ftm_res(msg, res)) goto error; break; + case NL80211_PMSR_TYPE_SENSING: + if (nl80211_pmsr_send_sensing_res(msg, res)) + goto error; + break; default: WARN_ON(1); } -- 2.25.1 Add the MACRO definitions for Radio Measurement (RM) Enabled Capabilities defined in IEEE Std 802.11-2024, 9.4.2.43, Table 9-218. Signed-off-by: Gokul Sivakumar --- include/linux/ieee80211.h | 42 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a3a2ac682fd4..2ab5ab2bee2b 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4028,6 +4028,48 @@ enum ieee80211_s1g_actioncode { WLAN_S1G_TWT_INFORMATION = 11, }; +/* RM Enabled Capabilities (IEEE Std 802.11-2024, 9.4.2.43, Table 9-218) */ +#define IEEE80211_RM_ENAB_CAP0_LINK_MSR BIT(0) +#define IEEE80211_RM_ENAB_CAP0_NEIGHBOR_REPORT BIT(1) +#define IEEE80211_RM_ENAB_CAP0_PARALLEL_MSR BIT(2) +#define IEEE80211_RM_ENAB_CAP0_REPEATED_MSR BIT(3) +#define IEEE80211_RM_ENAB_CAP0_BCN_PASSIVE_MSR BIT(4) +#define IEEE80211_RM_ENAB_CAP0_BCN_ACTIVE_MSR BIT(5) +#define IEEE80211_RM_ENAB_CAP0_BCN_TABLE_MSR BIT(6) +#define IEEE80211_RM_ENAB_CAP0_BCN_MSR_REPORTING_COND BIT(7) + +#define IEEE80211_RM_ENAB_CAP1_FRAME_MSR BIT(0) +#define IEEE80211_RM_ENAB_CAP1_CHANNEL_LOAD_MSR BIT(1) +#define IEEE80211_RM_ENAB_CAP1_NOISE_HISTOGRAM_MSR BIT(2) +#define IEEE80211_RM_ENAB_CAP1_STATISTICS_MSR BIT(3) +#define IEEE80211_RM_ENAB_CAP1_LCI_MSR BIT(4) +#define IEEE80211_RM_ENAB_CAP1_LCI_AZIMUTH BIT(5) +#define IEEE80211_RM_ENAB_CAP1_TRANSMIT_STREAM_MSR BIT(6) +#define IEEE80211_RM_ENAB_CAP1_TRIGGERED_TRANSMIT_STREAM_MSR BIT(7) + +#define IEEE80211_RM_ENAB_CAP2_AP_CHANNEL_REPORT BIT(0) +#define IEEE80211_RM_ENAB_CAP2_RM_MIB BIT(1) +#define IEEE80211_RM_ENAB_CAP2_OP_CH_MAX_MSR_DUR_SHIFT 2 +#define IEEE80211_RM_ENAB_CAP2_OP_CH_MAX_MSR_DUR_MASK \ + (7 << IEEE80211_RM_ENAB_CAP_OP_CH_MAX_MSR_DUR_SHIFT) +#define IEEE80211_RM_ENAB_CAP2_NONOP_CH_MAX_MSR_DUR_SHIFT 5 +#define IEEE80211_RM_ENAB_CAP2_NONOP_CH_MAX_MSR_DUR_MASK \ + (7 << IEEE80211_RM_ENAB_CAP_NONOP_CH_MAX_MSR_DUR_SHIFT) + +#define IEEE80211_RM_ENAB_CAP3_MSR_PILOT_SHIFT 0 +#define IEEE80211_RM_ENAB_CAP3_MSR_PILOR_MASK \ + (7 << IEEE80211_RM_ENAB_CAP_MSR_PILOT_SHIFT) +#define IEEE80211_RM_ENAB_CAP3_MSR_PILOT_TRANSMISSION_INFO BIT(3) +#define IEEE80211_RM_ENAB_CAP3_NEIGHBOR_REPORT_TSF_OFFSET BIT(4) +#define IEEE80211_RM_ENAB_CAP3_RCPI_MSR BIT(5) +#define IEEE80211_RM_ENAB_CAP3_RSNI_MSR BIT(6) +#define IEEE80211_RM_ENAB_CAP3_BSS_AVERAGE_ACCESS_DELAY BIT(7) + +#define IEEE80211_RM_ENAB_CAP4_BSS_AVAIL_ADMIISSION_CAPACITY BIT(0) +#define IEEE80211_RM_ENAB_CAP4_ANTENNA BIT(1) +#define IEEE80211_RM_ENAB_CAP4_FTM_RANGE_REPORT BIT(2) +#define IEEE80211_RM_ENAB_CAP4_CIVIC_LOCATION_MSR BIT(3) + /* Radio measurement action codes as defined in IEEE 802.11-2024 - Table 9-470 */ enum ieee80211_radio_measurement_actioncode { WLAN_RM_ACTION_RADIO_MEASUREMENT_REQUEST = 0, -- 2.25.1 Add the SDIO Device IDs of new Generation Infineon(Cypress) SDIO chipsets, which gets enumerated with the Cypress SDIO Vendor ID (0x04b4). Signed-off-by: Gokul Sivakumar --- include/linux/mmc/sdio_ids.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 673cbdf43453..44ed76e89539 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -81,6 +81,10 @@ #define SDIO_VENDOR_ID_CYPRESS 0x04b4 #define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43439 0xbd3d +#define SDIO_DEVICE_ID_CYPRESS_55572 0xbd31 +#define SDIO_DEVICE_ID_CYPRESS_55500 0xbd3e +#define SDIO_DEVICE_ID_CYPRESS_43022 0xbd3f +#define SDIO_DEVICE_ID_CYPRESS_55900 0xbd40 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 -- 2.25.1 Update the wireless driver common Kconfig and Makefile to include the new WLAN vendor directory for Infineon(Cypress). Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 1 + drivers/net/wireless/infineon/Kconfig | 36 ++++++++++++++++++++++++++ drivers/net/wireless/infineon/Makefile | 12 +++++++++ 4 files changed, 50 insertions(+) create mode 100644 drivers/net/wireless/infineon/Kconfig create mode 100644 drivers/net/wireless/infineon/Makefile diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index c6599594dc99..c85c085741db 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -22,6 +22,7 @@ source "drivers/net/wireless/admtek/Kconfig" source "drivers/net/wireless/ath/Kconfig" source "drivers/net/wireless/atmel/Kconfig" source "drivers/net/wireless/broadcom/Kconfig" +source "drivers/net/wireless/infineon/Kconfig" source "drivers/net/wireless/intel/Kconfig" source "drivers/net/wireless/intersil/Kconfig" source "drivers/net/wireless/marvell/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index e1c4141c6004..c934578aefe8 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/ obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/ obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/ obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/ +obj-$(CONFIG_WLAN_VENDOR_INFINEON) += infineon/ obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ diff --git a/drivers/net/wireless/infineon/Kconfig b/drivers/net/wireless/infineon/Kconfig new file mode 100644 index 000000000000..f2d56d15a741 --- /dev/null +++ b/drivers/net/wireless/infineon/Kconfig @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: ISC + +config WLAN_VENDOR_INFINEON + bool "Infineon devices" + default y + help + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_INFINEON + +source "drivers/net/wireless/infineon/inffmac/Kconfig" + +config INF_TRACING + bool "Infineon device tracing" + depends on INFFMAC + help + If you say Y here, the Infineon wireless drivers will register + with ftrace to dump event information into the trace ringbuffer. + Tracing can be enabled at runtime to aid in debugging wireless + issues. This option adds a small amount of overhead when tracing + is disabled. If unsure, say Y to allow developers to better help + you when wireless problems occur. + +config INF_DEBUG + bool "Infineon driver debug functions" + depends on INFFMAC + select WANT_DEV_COREDUMP + help + Selecting this enables additional code for debug purposes. + +endif # WLAN_VENDOR_INFINEON diff --git a/drivers/net/wireless/infineon/Makefile b/drivers/net/wireless/infineon/Makefile new file mode 100644 index 000000000000..1f1872569a3c --- /dev/null +++ b/drivers/net/wireless/infineon/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: ISC +# +# Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. +# All rights reserved. +# +# Makefile fragment for Infineon 802.11 Networking Device Driver +# + +# common flags +subdir-ccflags-$(CONFIG_INF_DEBUG) += -DDEBUG + +obj-$(CONFIG_INFFMAC) += inffmac/ -- 2.25.1 Driver implementation for Initating Peer Measurement (PMSR) Request and returning the collected result back to the userspace through cfg80211. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/pmsr.c | 282 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/pmsr.h | 35 +++ 2 files changed, 317 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/pmsr.c create mode 100644 drivers/net/wireless/infineon/inffmac/pmsr.h diff --git a/drivers/net/wireless/infineon/inffmac/pmsr.c b/drivers/net/wireless/infineon/inffmac/pmsr.c new file mode 100644 index 000000000000..049b39c984cc --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pmsr.c @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include "cfg80211.h" +#include "debug.h" +#include "pmsr.h" + +void inff_wiphy_pmsr_params(struct wiphy *wiphy, struct inff_if *ifp) +{ + struct cfg80211_pmsr_capabilities *pmsr_capa; + + if (!inff_feat_is_enabled(ifp, INFF_FEAT_FTM) && + !inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE)) + return; + + pmsr_capa = kzalloc(sizeof(*pmsr_capa), GFP_KERNEL); + if (!pmsr_capa) + return; + + pmsr_capa->max_peers = INFF_PMSR_PEER_MAX; + pmsr_capa->report_ap_tsf = 0; + pmsr_capa->randomize_mac_addr = 1; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_FTM)) { + pmsr_capa->ftm.preambles = BIT(NL80211_PREAMBLE_HT) | + BIT(NL80211_PREAMBLE_VHT); + pmsr_capa->ftm.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20); + pmsr_capa->ftm.max_bursts_exponent = -1; /* all supported */ + pmsr_capa->ftm.max_ftms_per_burst = 0; /* no limits */ + pmsr_capa->ftm.supported = 1; + pmsr_capa->ftm.asap = 1; + pmsr_capa->ftm.non_asap = 1; + pmsr_capa->ftm.request_lci = 1; + pmsr_capa->ftm.request_civicloc = 1; + pmsr_capa->ftm.trigger_based = 0; + pmsr_capa->ftm.non_trigger_based = 0; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE)) { + pmsr_capa->sensing.supported = 1; + pmsr_capa->sensing.associated = 1; + pmsr_capa->sensing.unassociated = 1; + pmsr_capa->sensing.vendor_mode = 1; + pmsr_capa->sensing.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20); + pmsr_capa->sensing.max_tx_ltf_rep = 0; + pmsr_capa->sensing.max_rx_ltf_rep = 0; + pmsr_capa->sensing.min_interval = 0; + pmsr_capa->sensing.max_rx_chains = 0; + } + + wiphy->pmsr_capa = pmsr_capa; +} + +int inff_cfg80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct cfg80211_pmsr_request_peer *peer = &request->peers[0]; + struct inff_ftm_info *ftm_info = cfg->pmsr_info->ftm_info; + struct inff_wlan_sense_info *wlan_sense_info = cfg->pmsr_info->wlan_sense_info; + struct inff_wlan_sense_cfg wlan_sense_cfg = {0}; + s32 err = 0; + + if (request->n_peers > INFF_PMSR_PEER_MAX) { + inff_err("PMSR: n_peers %d exceeds max peers %d!\n", + request->n_peers, INFF_PMSR_PEER_MAX); + err = -EINVAL; + goto exit; + } + + switch (peer->type) { + case NL80211_PMSR_TYPE_FTM: + if (!ftm_info || !ftm_info->vif) { + inff_err("FTM: context not created!\n"); + err = -EACCES; + goto exit; + } + + if (wdev != &ftm_info->vif->wdev) { + inff_err("FTM: get request from invalid wdev interface!\n"); + err = -EINVAL; + goto exit; + } + + if (ftm_info->ftm_req) { + inff_err("FTM: task is running!\n"); + err = -EBUSY; + goto exit; + } + + inff_dbg(TRACE, "FTM: FTM request\n" + "peer_mac : %pM\n" + "center_freq : %d\n" + "bandwidth : %d\n" + "preamble : %d\n" + "burst_period : %d\n" + "requested : %d\n" + "asap : %d\n" + "request_lci : %d\n" + "request_civicloc : %d\n" + "trigger_based : %d\n" + "non_trigger_based : %d\n" + "num_bursts_exp : %d\n" + "burst_duration : %d\n" + "ftms_per_burst : %d\n" + "ftmr_retries : %d\n", + peer->addr, peer->chandef.chan->center_freq, peer->chandef.width, + peer->ftm.preamble, peer->ftm.burst_period, peer->ftm.requested, + peer->ftm.asap, peer->ftm.request_lci, peer->ftm.request_civicloc, + peer->ftm.trigger_based, peer->ftm.non_trigger_based, + peer->ftm.num_bursts_exp, peer->ftm.burst_duration, + peer->ftm.ftms_per_burst, peer->ftm.ftmr_retries); + + /* FTM global/session configure */ + err = inff_ftm_set_global_config(ftm_info, peer); + if (err) + goto exit; + err = inff_ftm_set_session_config(ftm_info, peer, + INFF_PROXD_SESSION_ID_DEFAULT_FTM); + if (err) + goto exit; + + ftm_info->ftm_req = request; + break; + case NL80211_PMSR_TYPE_SENSING: + if (!wlan_sense_info || !wlan_sense_info->vif) { + inff_err("WLAN SENSE: interface not created!\n"); + err = -EACCES; + goto exit; + } + + if (wdev != &wlan_sense_info->vif->wdev) { + inff_err("WLAN SENSE: get request from invalid wdev interface!\n"); + err = -EINVAL; + goto exit; + } + + if (wlan_sense_info->sense_req) { + inff_err("WLAN SENSE: task is running!\n"); + err = -EBUSY; + goto exit; + } + + err = inff_wlan_sense_parse_req(peer, &wlan_sense_cfg); + if (err) + goto exit; + + inff_dbg(TRACE, "WLAN SENSE: SENSING request\n" + "interval : %d\n" + "duration : %d\n" + "mode_flags : %d\n" + "bss_scope : %d\n" + "ignore_fcs : %d\n" + "ta[0] : %pM\n" + "ta[1] : %pM\n" + "ta[2] : %pM\n" + "ta[3] : %pM\n" + "frmtyp_subtyp[0]: 0x%x\n" + "frmtyp_subtyp[1]: 0x%x\n", + wlan_sense_cfg.interval, wlan_sense_cfg.duration, + wlan_sense_cfg.mode_flags, wlan_sense_cfg.filter.bss_scope, + wlan_sense_cfg.filter.ignore_fcs, + wlan_sense_cfg.filter.ta[0].octet, wlan_sense_cfg.filter.ta[1].octet, + wlan_sense_cfg.filter.ta[2].octet, wlan_sense_cfg.filter.ta[3].octet, + wlan_sense_cfg.filter.frmtyp_subtyp[0], + wlan_sense_cfg.filter.frmtyp_subtyp[1]); + + err = inff_wlan_sense_oper_handler(wiphy, wdev, + INFF_WLAN_SENSE_OPER_CONFIGURE, + wlan_sense_cfg); + if (err) + goto exit; + err = inff_wlan_sense_oper_handler(wiphy, wdev, + INFF_WLAN_SENSE_OPER_ENABLE, + wlan_sense_cfg); + if (err) + goto exit; + + wlan_sense_info->sense_req = request; + break; + default: + inff_err("PMSR: type %d not support!\n", peer->type); + err = -EINVAL; + break; + } + +exit: + return err; +} + +void inff_cfg80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request) +{ + s32 err = 0; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_ftm_info *ftm_info = cfg->pmsr_info->ftm_info; + struct inff_wlan_sense_info *wlan_sense_info = cfg->pmsr_info->wlan_sense_info; + struct inff_wlan_sense_cfg wlan_sense_cfg = {0}; + + if (ftm_info && ftm_info->vif && + &ftm_info->vif->wdev == wdev) { + /* FTM case */ + if (!ftm_info->ftm_req) { + inff_err("FTM: task not running!\n"); + return; + } + + inff_err("FTM: not support abort currently!\n"); + return; + } else if (wlan_sense_info && wlan_sense_info->vif && + &wlan_sense_info->vif->wdev == wdev) { + /* WLAN_SENSE case */ + if (!wlan_sense_info->sense_req || !wlan_sense_info->sensing) { + inff_err("WLAN SENSE: task not running!\n"); + return; + } + + err = inff_wlan_sense_oper_handler(wiphy, wdev, + INFF_WLAN_SENSE_OPER_DISABLE, + wlan_sense_cfg); + inff_dbg(TRACE, "WLAN SENSE: err %d\n", err); + } else { + inff_err("PMSR: no matching interface!\n"); + } +} + +void inff_pmsr_debugfs_create(struct inff_pub *drvr) +{ + struct wiphy *wiphy = drvr->wiphy; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_ftm_info *ftm_info = cfg->pmsr_info->ftm_info; + struct dentry *dentry = inff_debugfs_get_devdir(drvr); + + debugfs_create_u8("ftm_partial_report", 0644, dentry, + &ftm_info->ftm_partial_report); + debugfs_create_u32("ftm_debug_mask", 0644, dentry, + &ftm_info->ftm_debug_mask); + inff_debugfs_add_entry(drvr, "wlan_sense_stats", + inff_wlan_sense_stats_read); +} + +s32 inff_pmsr_attach(struct inff_cfg80211_info *cfg) +{ + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pmsr_info *pmsr_info; + s32 err = 0; + + pmsr_info = kzalloc(sizeof(*pmsr_info), GFP_KERNEL); + if (!pmsr_info) + return -ENOMEM; + cfg->pmsr_info = pmsr_info; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_FTM)) { + err = inff_ftm_attach(cfg); + if (err) + return err; + } + if (inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE)) { + err = inff_wlan_sense_attach(cfg); + if (err) + return err; + } + + return 0; +} + +void inff_pmsr_detach(struct inff_cfg80211_info *cfg) +{ + inff_wlan_sense_detach(cfg); + inff_ftm_detach(cfg); + + kfree(cfg->pmsr_info); + cfg->pmsr_info = NULL; +} diff --git a/drivers/net/wireless/infineon/inffmac/pmsr.h b/drivers/net/wireless/infineon/inffmac/pmsr.h new file mode 100644 index 000000000000..83d88eb2c229 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pmsr.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_PMSR_H +#define INFF_PMSR_H + +#include "ftm.h" +#include "wlan_sense.h" + +#define INFF_PMSR_PEER_MAX 1 + +/** + * struct inff_pmsr_info - context for each kind of PMSR measurement procedure + * + * @ftm_info: FTM context + * @wlan_sense_info: WLAN Sensing context + */ +struct inff_pmsr_info { + struct inff_ftm_info *ftm_info; + struct inff_wlan_sense_info *wlan_sense_info; +}; + +void inff_wiphy_pmsr_params(struct wiphy *wiphy, struct inff_if *ifp); +int inff_cfg80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request); +void inff_cfg80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_pmsr_request *request); +void inff_pmsr_debugfs_create(struct inff_pub *drvr); +s32 inff_pmsr_attach(struct inff_cfg80211_info *cfg); +void inff_pmsr_detach(struct inff_cfg80211_info *cfg); + +#endif /* INFF_PMSR_H */ -- 2.25.1 Driver implementation of Fine timing measurement (FTM) support, which helps in finding the accurate round-trip time (RTT) between another supported peer WLAN device using the Peer Measurement Request (PMSR) infrastructure currently available in nl80211/cfg80211. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/ftm.c | 605 ++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/ftm.h | 382 ++++++++++++ 2 files changed, 987 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/ftm.c create mode 100644 drivers/net/wireless/infineon/inffmac/ftm.h diff --git a/drivers/net/wireless/infineon/inffmac/ftm.c b/drivers/net/wireless/infineon/inffmac/ftm.c new file mode 100644 index 000000000000..ec82e3adbb31 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/ftm.c @@ -0,0 +1,605 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include "cfg80211.h" +#include "debug.h" +#include "fwil.h" +#include "pmsr.h" + +static s64 +intvl_to_picosecond(struct inff_proxd_intvl intvl) +{ + switch (intvl.tmu) { + case INFF_PROXD_TMU_TU: + return (intvl.intvl * 1024 * 1000 * 1000); + case INFF_PROXD_TMU_SEC: + return (intvl.intvl * 1000 * 1000 * 1000 * 1000); + case INFF_PROXD_TMU_MILLI_SEC: + return (intvl.intvl * 1000 * 1000 * 1000); + case INFF_PROXD_TMU_MICRO_SEC: + return (intvl.intvl * 1000 * 1000); + case INFF_PROXD_TMU_NANO_SEC: + return (intvl.intvl * 1000); + case INFF_PROXD_TMU_PICO_SEC: + return (intvl.intvl); + default: + return (-1); + } +} + +static void +inff_print_rtt_info(struct inff_proxd_rtt_result_v2 *rtt_result, + struct inff_proxd_rtt_sample_v2 *rtt_sample) +{ + inff_dbg(TRACE, "FTM: RTT result (%d)\n" + "version : 0x%x\n" + "length : %d\n" + "sid : %d\n" + "flags : 0x%x\n" + "status : %d\n" + "peer_mac : %pM\n" + "state : %d\n" + "burst_duration : %d / %d\n" + "avg_dist : %d\n" + "sd_rtt : %d\n" + "num_valid_rtt : %d\n" + "num_ftm : %d\n" + "burst_num : %d\n" + "num_rtt : %d\n" + "num_meas : %d\n", + rtt_result->burst_num, // show burst_num in the first line as result index + rtt_result->version, rtt_result->length, rtt_result->sid, rtt_result->flags, + rtt_result->status, rtt_result->peer, rtt_result->state, + rtt_result->u.burst_duration.intvl, rtt_result->u.burst_duration.tmu, + rtt_result->avg_dist, rtt_result->sd_rtt, rtt_result->num_valid_rtt, + rtt_result->num_ftm, rtt_result->burst_num, rtt_result->num_rtt, + rtt_result->num_meas); + inff_dbg(TRACE, "FTM: RTT sample\n" + "version : 0x%x\n" + "length : %d\n" + "id : %d\n" + "flags : 0x%x\n" + "rssi : %d\n" + "rtt : %d / %d\n" + "ratespec : 0x%x\n" + "snr : %d\n" + "bitflips : 0x%x\n" + "status : %d\n" + "distance : %d\n" + "tof_phy_error : %d\n" + "tof_tgt_phy_error : %d\n" + "tof_tgt_snr : %d\n" + "tof_tgt_bitflips : 0x%x\n" + "coreid : %d\n" + "chanspec : 0x%x\n", + rtt_sample->version, rtt_sample->length, rtt_sample->id, rtt_sample->flags, + rtt_sample->rssi, rtt_sample->rtt.intvl, rtt_sample->rtt.tmu, + rtt_sample->ratespec, rtt_sample->snr, rtt_sample->bitflips, rtt_sample->status, + rtt_sample->distance, rtt_sample->tof_phy_error, rtt_sample->tof_tgt_phy_error, + rtt_sample->tof_tgt_snr, rtt_sample->tof_tgt_bitflips, rtt_sample->coreid, + rtt_sample->chanspec); +} + +/** + * inff_ftm_get_pmsr_result_status() - Mapping rtt_result status to pmsr_result status. + * + * @status: rtt_result status + * + * return: pmsr_result status + */ +static u8 +inff_ftm_get_pmsr_result_status(s32 status) +{ + u8 ret = 0; + + switch (status) { + case INFF_PROXD_E_OK: + ret = NL80211_PMSR_STATUS_SUCCESS; + break; + case INFF_PROXD_E_TIMEOUT: + ret = NL80211_PMSR_STATUS_TIMEOUT; + break; + case INFF_PROXD_E_ERROR: + ret = NL80211_PMSR_STATUS_FAILURE; + break; + default: + ret = NL80211_PMSR_STATUS_REFUSED; + break; + } + + return ret; +} + +static s32 +inff_ftm_set_op(struct inff_if *ifp, enum inff_proxd_cmd cmd_id, + enum inff_proxd_session_id session_id) +{ + struct inff_proxd_iov *ftm_buf = NULL; + u8 len = 0; + s32 err = 0; + + len = sizeof(struct inff_proxd_iov) + sizeof(struct inff_xtlv); + ftm_buf = kzalloc(len, GFP_KERNEL); + if (!ftm_buf) + return -ENOMEM; + + ftm_buf->version = cpu_to_le16(INFF_PROXD_API_VERSION); + ftm_buf->len = cpu_to_le16(len); + ftm_buf->cmd = cpu_to_le16(cmd_id); + ftm_buf->method = cpu_to_le16(INFF_PROXD_METHOD_FTM); + ftm_buf->sid = cpu_to_le16(session_id); + ftm_buf->tlvs[0].id = cpu_to_le16(INFF_PROXD_TLV_ID_NONE); + ftm_buf->tlvs[0].len = 0; + err = inff_fil_iovar_data_set(ifp, "proxd", (char *)ftm_buf, len); + if (err) + inff_err("proxd ftm cmd %d error: %d\n", cmd_id, err); + + kfree(ftm_buf); + return err; +} + +static s32 +inff_ftm_get_op(struct inff_if *ifp, enum inff_proxd_cmd cmd_id, + enum inff_proxd_session_id session_id, + u8 *data, u16 data_len) +{ + struct inff_proxd_iov *ftm_buf = (struct inff_proxd_iov *)data; + s32 err = 0; + + ftm_buf->version = cpu_to_le16(INFF_PROXD_API_VERSION); + ftm_buf->len = cpu_to_le16(sizeof(struct inff_proxd_iov)); + ftm_buf->cmd = cpu_to_le16(cmd_id); + ftm_buf->method = cpu_to_le16(INFF_PROXD_METHOD_FTM); + ftm_buf->sid = cpu_to_le16(session_id); + ftm_buf->tlvs[0].id = cpu_to_le16(INFF_PROXD_TLV_ID_NONE); + ftm_buf->tlvs[0].len = 0; + err = inff_fil_iovar_data_get(ifp, "proxd", data, data_len); + if (err) + inff_err("proxd ftm cmd %d error: %d\n", cmd_id, err); + + return err; +} + +static s32 +inff_ftm_set_config_op(struct inff_if *ifp, + enum inff_proxd_session_id session_id, + u8 *data, u16 data_len) +{ + struct inff_proxd_iov *ftm_buf = NULL; + u16 ftm_buf_len = 0; + s32 err = 0; + + ftm_buf_len = offsetof(struct inff_proxd_iov, tlvs) + data_len; + ftm_buf = kzalloc(ftm_buf_len, GFP_KERNEL); + if (!ftm_buf) + return -ENOMEM; + + ftm_buf->version = cpu_to_le16(INFF_PROXD_API_VERSION); + ftm_buf->len = cpu_to_le16(ftm_buf_len); + ftm_buf->cmd = cpu_to_le16(INFF_PROXD_CMD_CONFIG); + ftm_buf->method = cpu_to_le16(INFF_PROXD_METHOD_FTM); + ftm_buf->sid = cpu_to_le16(session_id); + memcpy(&ftm_buf->tlvs[0], data, data_len); + err = inff_fil_iovar_data_set(ifp, "proxd", ftm_buf, ftm_buf_len); + if (err) + inff_err("proxd ftm cmd %d error: %d\n", INFF_PROXD_CMD_CONFIG, err); + + kfree(ftm_buf); + return err; +} + +static void +inff_ftm_update_burst_report(struct inff_ftm_info *ftm_info, + struct inff_proxd_event *p_event) +{ + struct cfg80211_pmsr_result result = {0}; + struct inff_xtlv *proxd_tlv = NULL; + struct inff_proxd_rtt_result_v2 *rtt_result = NULL; + struct inff_proxd_rtt_sample_v2 *rtt_sample = NULL; + + proxd_tlv = (struct inff_xtlv *)((u8 *)p_event + offsetof(struct inff_proxd_iov, tlvs)); + if (proxd_tlv->id != INFF_PROXD_TLV_ID_RTT_RESULT_V2) { + inff_dbg(TRACE, "FTM: wrong len/id in rtt result!\n"); + goto fail_result; + } + + rtt_result = (struct inff_proxd_rtt_result_v2 *)((u8 *)p_event + + offsetof(struct inff_proxd_iov, tlvs) + + offsetof(struct inff_xtlv, data)); + rtt_sample = rtt_result->rtt; + inff_print_rtt_info(rtt_result, rtt_sample); + + /* update result to upper layer + * please refer the unit of cfg80211_pmsr_result from include/uapi/linux/nl80211.h + */ + result.host_time = ftm_info->host_time; + result.status = inff_ftm_get_pmsr_result_status(rtt_result->status); + if (rtt_result) + memcpy(result.addr, rtt_result->peer, ETH_ALEN); + if (rtt_result->burst_num == (1 << ftm_info->ftm_req->peers[0].ftm.num_bursts_exp)) { + /* final report */ + result.final = 1; + } else { + result.final = 0; + } + result.ap_tsf_valid = 0; + result.type = NL80211_PMSR_TYPE_FTM; + result.ftm.burst_index = rtt_result->burst_num; + result.ftm.num_bursts_exp = ftm_info->ftm_req->peers[0].ftm.num_bursts_exp; + result.ftm.burst_duration = ftm_info->ftm_req->peers[0].ftm.burst_duration; + result.ftm.ftms_per_burst = rtt_result->num_ftm; + result.ftm.rssi_avg = (2 * rtt_sample->rssi); // unit: 1/2dBm + result.ftm.rtt_avg = intvl_to_picosecond(rtt_sample->rtt); // unit: picosecond + result.ftm.rtt_variance = (rtt_result->sd_rtt * rtt_result->sd_rtt); + result.ftm.dist_avg = ((rtt_result->avg_dist * 1000) / 256); // unit: mm + result.ftm.rssi_avg_valid = 1; + result.ftm.rtt_avg_valid = 1; + result.ftm.rtt_variance_valid = 1; + result.ftm.dist_avg_valid = 1; + cfg80211_pmsr_report(&ftm_info->vif->wdev, ftm_info->ftm_req, &result, GFP_KERNEL); + + return; + +fail_result: + result.status = NL80211_PMSR_STATUS_FAILURE; + if (rtt_result) + memcpy(result.addr, rtt_result->peer, ETH_ALEN); + cfg80211_pmsr_report(&ftm_info->vif->wdev, ftm_info->ftm_req, &result, GFP_KERNEL); +} + +static void +inff_ftm_update_final_report(struct inff_ftm_info *ftm_info, + struct inff_proxd_event *p_event) +{ + struct cfg80211_pmsr_result result = {0}; + s32 err = 0; + u8 proxd_buf[512] = {0}; + struct inff_proxd_iov *proxd_iov = NULL; + struct inff_xtlv *proxd_tlv = NULL; + struct inff_proxd_rtt_result_v2 *rtt_result = NULL; + struct inff_proxd_rtt_sample_v2 *rtt_sample = NULL; + + /* FTM get result */ + err = inff_ftm_get_op(ftm_info->ifp, INFF_PROXD_CMD_GET_RESULT, + INFF_PROXD_SESSION_ID_DEFAULT_FTM, + (u8 *)proxd_buf, sizeof(proxd_buf)); + if (err) { + inff_dbg(TRACE, "FTM: get rtt result fail!\n"); + goto fail_result; + } + + proxd_iov = (struct inff_proxd_iov *)proxd_buf; + proxd_tlv = (struct inff_xtlv *)(proxd_buf + offsetof(struct inff_proxd_iov, tlvs)); + if ((proxd_iov->len < + sizeof(struct inff_proxd_iov) + sizeof(struct inff_xtlv) + + sizeof(struct inff_proxd_rtt_result_v2) - 2) || + proxd_tlv->id != INFF_PROXD_TLV_ID_RTT_RESULT_V2) { + inff_dbg(TRACE, "FTM: wrong len/id in rtt result!\n"); + goto fail_result; + } + rtt_result = (struct inff_proxd_rtt_result_v2 *)(proxd_buf + + offsetof(struct inff_proxd_iov, tlvs) + + offsetof(struct inff_xtlv, data)); + rtt_sample = rtt_result->rtt; + inff_print_rtt_info(rtt_result, rtt_sample); + + /* update result to upper layer + * please refer the unit of cfg80211_pmsr_result from include/uapi/linux/nl80211.h + */ + result.host_time = ftm_info->host_time; + result.status = inff_ftm_get_pmsr_result_status(rtt_result->status); + if (rtt_result) + memcpy(result.addr, rtt_result->peer, ETH_ALEN); + result.final = 1; + result.ap_tsf_valid = 0; + result.type = NL80211_PMSR_TYPE_FTM; + result.ftm.burst_index = -1; + result.ftm.num_bursts_exp = ilog2(rtt_result->burst_num); + result.ftm.burst_duration = ftm_info->ftm_req->peers[0].ftm.burst_duration; + result.ftm.ftms_per_burst = rtt_result->num_ftm; + result.ftm.rssi_avg = (2 * rtt_sample->rssi); // unit: 1/2dBm + result.ftm.rtt_avg = intvl_to_picosecond(rtt_sample->rtt); // unit: picosecond + result.ftm.rtt_variance = (rtt_result->sd_rtt * rtt_result->sd_rtt); + result.ftm.dist_avg = ((rtt_result->avg_dist * 1000) / 256); // unit: mm + result.ftm.rssi_avg_valid = 1; + result.ftm.rtt_avg_valid = 1; + result.ftm.rtt_variance_valid = 1; + result.ftm.dist_avg_valid = 1; + cfg80211_pmsr_report(&ftm_info->vif->wdev, ftm_info->ftm_req, &result, GFP_KERNEL); + + return; + +fail_result: + result.status = NL80211_PMSR_STATUS_FAILURE; + if (rtt_result) + memcpy(result.addr, rtt_result->peer, ETH_ALEN); + cfg80211_pmsr_report(&ftm_info->vif->wdev, ftm_info->ftm_req, &result, GFP_KERNEL); +} + +s32 +inff_notify_ftm_evt(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_ftm_info *ftm_info = cfg->pmsr_info->ftm_info; + struct inff_proxd_event *p_event; + u16 event_type; + struct inff_xtlv *proxd_tlv = NULL; + struct inff_proxd_ftm_session_status *proxd_status = NULL; + s32 err = 0; + + p_event = (struct inff_proxd_event *)data; + event_type = p_event->type; + inff_dbg(INFO, "FTM: event %s (%d), status=%d, proxd_type=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status, event_type); + + /* Currently do not handle event for softap mode */ + if (cfg->num_softap || !(ftm_info->ftm_req) || + !inff_feat_is_enabled(ifp, INFF_FEAT_FTM)) { + inff_dbg(TRACE, "FTM: condition check fail!"); + return 0; + } + + /* check session status tlv */ + proxd_tlv = (struct inff_xtlv *)((u8 *)p_event + offsetof(struct inff_proxd_iov, tlvs)); + if (proxd_tlv->id == INFF_PROXD_TLV_ID_SESSION_STATUS) { + proxd_status = (struct inff_proxd_ftm_session_status *)proxd_tlv->data; + inff_dbg(TRACE, "FTM: session status tlv\n" + "sid : %d\n" + "state : %d\n" + "status : %d\n" + "burst_num : %d\n", + proxd_status->sid, proxd_status->state, proxd_status->status, + proxd_status->burst_num); + } + + switch (event_type) { + case INFF_PROXD_EVENT_SESSION_CREATE: + /* FTM session start */ + err = inff_ftm_set_op(ifp, INFF_PROXD_CMD_START_SESSION, + INFF_PROXD_SESSION_ID_DEFAULT_FTM); + if (err) { + inff_err("FTM: start session fail!\n"); + ftm_info->ftm_req = NULL; + ftm_info->host_time = 0; + } + break; + case INFF_PROXD_EVENT_SESSION_START: + /* update host_time for non partial report mode*/ + if (!ftm_info->ftm_partial_report) + ftm_info->host_time = (u64)ktime_to_ns(ktime_get_boottime()); + break; + case INFF_PROXD_EVENT_BURST_START: + /* update host_time for partial report mode*/ + if (ftm_info->ftm_partial_report) + ftm_info->host_time = (u64)ktime_to_ns(ktime_get_boottime()); + break; + case INFF_PROXD_EVENT_BURST_END: + if (ftm_info->ftm_partial_report) + inff_ftm_update_burst_report(ftm_info, p_event); + break; + case INFF_PROXD_EVENT_SESSION_END: + if (!ftm_info->ftm_partial_report) + inff_ftm_update_final_report(ftm_info, p_event); + + /* FTM session delete */ + err = inff_ftm_set_op(ifp, INFF_PROXD_CMD_DELETE_SESSION, + INFF_PROXD_SESSION_ID_DEFAULT_FTM); + if (err) + inff_err("FTM: delete session fail!\n"); + + break; + case INFF_PROXD_EVENT_SESSION_DESTROY: + cfg80211_pmsr_complete(&ifp->vif->wdev, ftm_info->ftm_req, GFP_KERNEL); + ftm_info->ftm_req = NULL; + ftm_info->host_time = 0; + break; + default: + break; + } + + return 0; +} + +s32 +inff_ftm_set_global_config(struct inff_ftm_info *ftm_info, + struct cfg80211_pmsr_request_peer *peer) +{ + u8 buf[512] = {0}, *bufp = NULL; + u16 buf_len = 0, buf_len_start = 0; + u32 param32 = 0; + + buf_len_start = sizeof(buf); + buf_len = sizeof(buf); + bufp = &buf[0]; + + /* ex: proxd ftm config options +mburst-followup */ + param32 = cpu_to_le32(INFF_PROXD_FLAG_MBURST_FOLLOWUP); + inff_pack_xtlv(INFF_PROXD_TLV_ID_FLAGS_MASK, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + param32 = cpu_to_le32(peer->ftm.num_bursts_exp > 0 ? + INFF_PROXD_FLAG_MBURST_FOLLOWUP : 0x00); + inff_pack_xtlv(INFF_PROXD_TLV_ID_FLAGS, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + + buf_len = buf_len_start - buf_len; + return inff_ftm_set_config_op(ftm_info->ifp, INFF_PROXD_SESSION_ID_GLOBAL, buf, buf_len); +} + +s32 +inff_ftm_set_session_config(struct inff_ftm_info *ftm_info, + struct cfg80211_pmsr_request_peer *peer, + enum inff_proxd_session_id session_id) +{ + u8 buf[512] = {0}, *bufp = NULL; + u16 buf_len = 0, buf_len_start = 0; + u16 param16 = 0; + u32 param32 = 0; + struct inff_proxd_intvl param_intvl = {0}; + u8 burst_no_pref = 0; + + buf_len_start = sizeof(buf); + buf_len = sizeof(buf); + bufp = &buf[0]; + + /* ex: proxd ftm $SID config debug-mask 0xffffffde */ + param32 = cpu_to_le32(ftm_info->ftm_debug_mask); + inff_pack_xtlv(INFF_PROXD_TLV_ID_DEBUG_MASK, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config burst-duration 128ms */ + param_intvl.intvl = cpu_to_le32(0); + param_intvl.tmu = cpu_to_le16(INFF_PROXD_TMU_TU); + switch (peer->ftm.burst_duration) { + case 2: + case 3: + param_intvl.intvl = cpu_to_le32(250 * (peer->ftm.burst_duration - 1)); + param_intvl.tmu = cpu_to_le16(INFF_PROXD_TMU_MICRO_SEC); + break; + case 4: + case 5: + case 6: + case 7: + case 8: + case 9: + case 10: + case 11: + param_intvl.intvl = cpu_to_le32(1 << (peer->ftm.burst_duration - 4)); + param_intvl.tmu = cpu_to_le16(INFF_PROXD_TMU_MILLI_SEC); + break; + default: + burst_no_pref = 1; + break; + } + if (!burst_no_pref) + inff_pack_xtlv(INFF_PROXD_TLV_ID_BURST_DURATION, (u8 *)¶m_intvl, + sizeof(param_intvl), (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config options ..... + * must set: +initiator -rtt-detail +rx-auto-burst +pre-scan -immediate + * set by user: +asap +tx-lci-req +tx-civic-req + */ + param32 = cpu_to_le32(INFF_PROXD_SESSION_FLAG_INITIATOR | + INFF_PROXD_SESSION_FLAG_RTT_DETAIL | + INFF_PROXD_SESSION_FLAG_RX_AUTO_BURST | + INFF_PROXD_SESSION_FLAG_ASAP | + INFF_PROXD_SESSION_FLAG_REQ_LCI | + INFF_PROXD_SESSION_FLAG_REQ_CIV | + INFF_PROXD_SESSION_FLAG_PRE_SCAN | + INFF_PROXD_SESSION_FLAG_BDUR_NOPREF | + INFF_PROXD_SESSION_FLAG_MBURST_NODELAY); + inff_pack_xtlv(INFF_PROXD_TLV_ID_SESSION_FLAGS_MASK, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + param32 = cpu_to_le32(INFF_PROXD_SESSION_FLAG_INITIATOR | + INFF_PROXD_SESSION_FLAG_RX_AUTO_BURST | + (peer->ftm.asap ? INFF_PROXD_SESSION_FLAG_ASAP : 0x00) | + (peer->ftm.request_lci ? INFF_PROXD_SESSION_FLAG_REQ_LCI : 0x00) | + (peer->ftm.request_civicloc ? + INFF_PROXD_SESSION_FLAG_REQ_CIV : 0x00) | + (burst_no_pref ? INFF_PROXD_SESSION_FLAG_BDUR_NOPREF : 0x00) | + INFF_PROXD_SESSION_FLAG_PRE_SCAN); + inff_pack_xtlv(INFF_PROXD_TLV_ID_SESSION_FLAGS, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config peer $PEER_MAC */ + inff_pack_xtlv(INFF_PROXD_TLV_ID_PEER_MAC, &peer->addr[0], ETH_ALEN, + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config chanspec 36/20 */ + param32 = cpu_to_le32(chandef_to_chanspec(&ftm_info->cfg80211_info->d11inf, + &peer->chandef)); + inff_pack_xtlv(INFF_PROXD_TLV_ID_CHANSPEC, (u8 *)¶m32, sizeof(u32), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config num-burst 1 */ + param16 = cpu_to_le16(1 << peer->ftm.num_bursts_exp); + inff_pack_xtlv(INFF_PROXD_TLV_ID_NUM_BURST, (u8 *)¶m16, sizeof(u16), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config num-ftm 6 */ + param16 = cpu_to_le16(peer->ftm.ftms_per_burst); + inff_pack_xtlv(INFF_PROXD_TLV_ID_BURST_NUM_FTM, (u8 *)¶m16, sizeof(u16), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config burst-period 5000ms */ + param_intvl.intvl = cpu_to_le32(peer->ftm.burst_period * 100); + param_intvl.tmu = cpu_to_le16(INFF_PROXD_TMU_MILLI_SEC); + inff_pack_xtlv(INFF_PROXD_TLV_ID_BURST_PERIOD, (u8 *)¶m_intvl, sizeof(param_intvl), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config req-retries 3 */ + inff_pack_xtlv(INFF_PROXD_TLV_ID_FTM_REQ_RETRIES, (u8 *)&peer->ftm.ftmr_retries, sizeof(u8), + (char **)&bufp, &buf_len); + + /* ex: proxd ftm $SID config ftm-sep 3ms */ + param_intvl.intvl = cpu_to_le32(INFF_PROXD_TLV_FTM_SEP_VAL); + param_intvl.tmu = cpu_to_le16(INFF_PROXD_TMU_MILLI_SEC); + inff_pack_xtlv(INFF_PROXD_TLV_ID_BURST_FTM_SEP, (u8 *)¶m_intvl, sizeof(param_intvl), + (char **)&bufp, &buf_len); + + buf_len = buf_len_start - buf_len; + return inff_ftm_set_config_op(ftm_info->ifp, session_id, buf, buf_len); +} + +s32 inff_ftm_attach(struct inff_cfg80211_info *cfg) +{ + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_ftm_info *ftm_info; + s32 err = 0; + u8 rrm_cap[RM_ENAB_CAP_SIZE] = { 0 }; + + ftm_info = kzalloc(sizeof(*ftm_info), GFP_KERNEL); + if (!ftm_info) + return -ENOMEM; + + ftm_info->cfg80211_info = cfg; + ftm_info->vif = ifp->vif; + ftm_info->ifp = ifp; + ftm_info->ftm_req = NULL; + ftm_info->ftm_debug_mask = 0xFFFFFFDE; + ftm_info->ftm_partial_report = 1; + ftm_info->host_time = 0; + cfg->pmsr_info->ftm_info = ftm_info; + + /* FTM enable */ + err = inff_ftm_set_op(ifp, INFF_PROXD_CMD_ENABLE, INFF_PROXD_SESSION_ID_GLOBAL); + if (err) { + inff_err("FTM: enable fail!\n"); + /* disable feature */ + ifp->drvr->feat_flags[INFF_FEAT_FTM / 8] &= ~BIT(INFF_FEAT_FTM % 8); + return err; + } + + /* set rrm capabilities */ + err = inff_fil_iovar_data_get(ifp, "rrm", rrm_cap, sizeof(rrm_cap)); + if (err) { + inff_err("get rrm error: %d\n", err); + } else { + rrm_cap[4] |= IEEE80211_RM_ENAB_CAP4_FTM_RANGE_REPORT; + rrm_cap[4] |= IEEE80211_RM_ENAB_CAP4_CIVIC_LOCATION_MSR; + + err = inff_fil_iovar_data_set(ifp, "rrm", rrm_cap, sizeof(rrm_cap)); + if (err) + inff_err("set rrm error: %d\n", err); + } + + return err; +} + +void inff_ftm_detach(struct inff_cfg80211_info *cfg) +{ + struct inff_ftm_info *ftm_info = cfg->pmsr_info->ftm_info; + + if (!ftm_info || !ftm_info->vif) + return; + + kfree(ftm_info); + cfg->pmsr_info->ftm_info = NULL; +} diff --git a/drivers/net/wireless/infineon/inffmac/ftm.h b/drivers/net/wireless/infineon/inffmac/ftm.h new file mode 100644 index 000000000000..3d7ca9ff401a --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/ftm.h @@ -0,0 +1,382 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FTM_H +#define INFF_FTM_H + +#include "xtlv.h" + +#define RM_ENAB_CAP_SIZE 5 + +/** tlv IDs - data length 4 bytes unless overridden by type, alignment 32 bits */ +enum inff_proxd_tlv_id { + INFF_PROXD_TLV_ID_NONE = 0, + INFF_PROXD_TLV_ID_METHOD = 1, + INFF_PROXD_TLV_ID_FLAGS = 2, + INFF_PROXD_TLV_ID_CHANSPEC = 3, /* note: uint32 */ + INFF_PROXD_TLV_ID_TX_POWER = 4, + INFF_PROXD_TLV_ID_RATESPEC = 5, + INFF_PROXD_TLV_ID_BURST_DURATION = 6, /* intvl - length of burst */ + INFF_PROXD_TLV_ID_BURST_PERIOD = 7, /* intvl - between bursts */ + INFF_PROXD_TLV_ID_BURST_FTM_SEP = 8, /* intvl - between FTMs */ + INFF_PROXD_TLV_ID_BURST_NUM_FTM = 9, /* uint16 - per burst */ + INFF_PROXD_TLV_ID_NUM_BURST = 10, /* uint16 */ + INFF_PROXD_TLV_ID_FTM_RETRIES = 11, /* uint16 at FTM level */ + INFF_PROXD_TLV_ID_BSS_INDEX = 12, /* uint8 */ + INFF_PROXD_TLV_ID_BSSID = 13, + INFF_PROXD_TLV_ID_INIT_DELAY = 14, /* intvl - optional,non-standalone only */ + INFF_PROXD_TLV_ID_BURST_TIMEOUT = 15, /* expect response within - intvl */ + INFF_PROXD_TLV_ID_EVENT_MASK = 16, /* interested events - in/out */ + INFF_PROXD_TLV_ID_FLAGS_MASK = 17, /* interested flags - in only */ + INFF_PROXD_TLV_ID_PEER_MAC = 18, /* mac address of peer */ + INFF_PROXD_TLV_ID_FTM_REQ = 19, /* dot11_ftm_req */ + INFF_PROXD_TLV_ID_LCI_REQ = 20, + INFF_PROXD_TLV_ID_LCI = 21, + INFF_PROXD_TLV_ID_CIVIC_REQ = 22, + INFF_PROXD_TLV_ID_CIVIC = 23, + INFF_PROXD_TLV_ID_AVAIL24 = 24, /* ROM compatibility */ + INFF_PROXD_TLV_ID_SESSION_FLAGS = 25, + INFF_PROXD_TLV_ID_SESSION_FLAGS_MASK = 26, /* in only */ + INFF_PROXD_TLV_ID_RX_MAX_BURST = 27, /* uint16 - limit bursts per session */ + INFF_PROXD_TLV_ID_RANGING_INFO = 28, /* ranging info */ + INFF_PROXD_TLV_ID_RANGING_FLAGS = 29, /* uint16 */ + INFF_PROXD_TLV_ID_RANGING_FLAGS_MASK = 30, /* uint16, in only */ + INFF_PROXD_TLV_ID_NAN_MAP_ID = 31, + INFF_PROXD_TLV_ID_DEV_ADDR = 32, + INFF_PROXD_TLV_ID_AVAIL = 33, /* wl_proxd_avail_t */ + INFF_PROXD_TLV_ID_TLV_ID = 34, /* uint16 tlv-id */ + INFF_PROXD_TLV_ID_FTM_REQ_RETRIES = 35, /* uint16 FTM request retries */ + INFF_PROXD_TLV_ID_TPK = 36, /* 32byte TPK */ + INFF_PROXD_TLV_ID_RI_RR = 36, /* RI_RR */ + INFF_PROXD_TLV_ID_TUNE = 37, /* wl_proxd_pararms_tof_tune_t */ + INFF_PROXD_TLV_ID_CUR_ETHER_ADDR = 38, /* Source Address used for Tx */ + + /* output - 512 + x */ + INFF_PROXD_TLV_ID_STATUS = 512, + INFF_PROXD_TLV_ID_COUNTERS = 513, + INFF_PROXD_TLV_ID_INFO = 514, + INFF_PROXD_TLV_ID_RTT_RESULT = 515, + INFF_PROXD_TLV_ID_AOA_RESULT = 516, + INFF_PROXD_TLV_ID_SESSION_INFO = 517, + INFF_PROXD_TLV_ID_SESSION_STATUS = 518, + INFF_PROXD_TLV_ID_SESSION_ID_LIST = 519, + INFF_PROXD_TLV_ID_RTT_RESULT_V2 = 520, + + /* debug tlvs can be added starting 1024 */ + INFF_PROXD_TLV_ID_DEBUG_MASK = 1024, + INFF_PROXD_TLV_ID_COLLECT = 1025, /**< output only */ + INFF_PROXD_TLV_ID_STRBUF = 1026, + + INFF_PROXD_TLV_ID_COLLECT_HEADER = 1025, /* wl_proxd_collect_header_t */ + INFF_PROXD_TLV_ID_COLLECT_INFO = 1028, /* wl_proxd_collect_info_t */ + INFF_PROXD_TLV_ID_COLLECT_DATA = 1029, /* wl_proxd_collect_data_t */ + INFF_PROXD_TLV_ID_COLLECT_CHAN_DATA = 1030, /* wl_proxd_collect_data_t */ + INFF_PROXD_TLV_ID_MF_STATS_DATA = 1031, /* mf_stats_buffer */ + + INFF_PROXD_TLV_ID_MAX +}; + +/** commands that can apply to proxd, method or a session */ +enum inff_proxd_cmd { + INFF_PROXD_CMD_NONE = 0, + INFF_PROXD_CMD_GET_VERSION = 1, + INFF_PROXD_CMD_ENABLE = 2, + INFF_PROXD_CMD_DISABLE = 3, + INFF_PROXD_CMD_CONFIG = 4, + INFF_PROXD_CMD_START_SESSION = 5, + INFF_PROXD_CMD_BURST_REQUEST = 6, + INFF_PROXD_CMD_STOP_SESSION = 7, + INFF_PROXD_CMD_DELETE_SESSION = 8, + INFF_PROXD_CMD_GET_RESULT = 9, + INFF_PROXD_CMD_GET_INFO = 10, + INFF_PROXD_CMD_GET_STATUS = 11, + INFF_PROXD_CMD_GET_SESSIONS = 12, + INFF_PROXD_CMD_GET_COUNTERS = 13, + INFF_PROXD_CMD_CLEAR_COUNTERS = 14, + INFF_PROXD_CMD_COLLECT = 15, /* not supported, see 'wl proxd_collect' */ + INFF_PROXD_CMD_TUNE = 16, /* not supported, see 'wl proxd_tune' */ + INFF_PROXD_CMD_DUMP = 17, + INFF_PROXD_CMD_START_RANGING = 18, + INFF_PROXD_CMD_STOP_RANGING = 19, + INFF_PROXD_CMD_GET_RANGING_INFO = 20, + INFF_PROXD_CMD_IS_TLV_SUPPORTED = 21, + + INFF_PROXD_CMD_MAX +}; + +/** proximity detection methods */ +enum inff_proxd_method { + INFF_PROXD_METHOD_NONE = 0, + INFF_PROXD_METHOD_RSVD1 = 1, /* backward compatibility - RSSI, not supported */ + INFF_PROXD_METHOD_TOF = 2, + INFF_PROXD_METHOD_RSVD2 = 3, /* 11v only - if needed */ + INFF_PROXD_METHOD_FTM = 4, /* IEEE rev mc/2014 */ + INFF_PROXD_METHOD_MAX +}; + +/** global and method configuration flags */ +enum inff_proxd_global_flag { + INFF_PROXD_FLAG_NONE = 0x00000000, + INFF_PROXD_FLAG_RX_ENABLED = 0x00000001, /* respond to requests, per bss */ + INFF_PROXD_FLAG_RX_RANGE_REQ = 0x00000002, /* 11mc range requests enabled */ + INFF_PROXD_FLAG_TX_LCI = 0x00000004, /* tx lci, if known */ + INFF_PROXD_FLAG_TX_CIVIC = 0x00000008, /* tx civic, if known */ + INFF_PROXD_FLAG_RX_AUTO_BURST = 0x00000010, /* auto respond w/o host action */ + INFF_PROXD_FLAG_TX_AUTO_BURST = 0x00000020, /* continue tx w/o host action */ + INFF_PROXD_FLAG_AVAIL_PUBLISH = 0x00000040, /* publish availability */ + INFF_PROXD_FLAG_AVAIL_SCHEDULE = 0x00000080, /* schedule using availability */ + INFF_PROXD_FLAG_ASAP_CAPABLE = 0x00000100, /* ASAP capable */ + INFF_PROXD_FLAG_MBURST_FOLLOWUP = 0x00000200, /* new multi-burst algorithm */ + INFF_PROXD_FLAG_SECURE = 0x00000400, /* per bsscfg option */ + INFF_PROXD_FLAG_NO_TSF_SYNC = 0x00000800, /* disable tsf sync */ + INFF_PROXD_FLAG_ALL = 0xffffffff +}; + +/* session ids: + * id 0 is reserved + * ids 1..0x7fff - allocated by host/app + * 0x8000-0xffff - allocated by firmware, used for auto/rx + */ +/* typedef u16 wl_proxd_session_id_t; */ +enum inff_proxd_session_id { + INFF_PROXD_SESSION_ID_GLOBAL = 0, + INFF_PROXD_SESSION_ID_DEFAULT_FTM = 1 +}; + +/** session flags */ +enum inff_proxd_session_flag { + INFF_PROXD_SESSION_FLAG_NONE = 0x00000000, /* no flags */ + INFF_PROXD_SESSION_FLAG_INITIATOR = 0x00000001, /* local device is initiator */ + INFF_PROXD_SESSION_FLAG_TARGET = 0x00000002, /* local device is target */ + INFF_PROXD_SESSION_FLAG_ONE_WAY = 0x00000004, /* (initiated) 1-way rtt */ + INFF_PROXD_SESSION_FLAG_AUTO_BURST = 0x00000008, /* created w/ rx_auto_burst */ + INFF_PROXD_SESSION_FLAG_PERSIST = 0x00000010, /* good until cancelled */ + INFF_PROXD_SESSION_FLAG_RTT_DETAIL = 0x00000020, /* rtt detail in results */ + INFF_PROXD_SESSION_FLAG_SECURE = 0x00000040, /* sessionis secure */ + INFF_PROXD_SESSION_FLAG_AOA = 0x00000080, /* AOA along w/ RTT */ + INFF_PROXD_SESSION_FLAG_RX_AUTO_BURST = 0x00000100, /* Same as proxd flags above */ + INFF_PROXD_SESSION_FLAG_TX_AUTO_BURST = 0x00000200, /* Same as proxd flags above */ + INFF_PROXD_SESSION_FLAG_NAN_BSS = 0x00000400, /* Use NAN BSS, if applicable */ + INFF_PROXD_SESSION_FLAG_TS1 = 0x00000800, /* e.g. FTM1 - ASAP-capable */ + INFF_PROXD_SESSION_FLAG_REPORT_FAILURE = 0x00002000, /* report failure to target */ + INFF_PROXD_SESSION_FLAG_INITIATOR_RPT = 0x00004000, /* report distance to target */ + INFF_PROXD_SESSION_FLAG_NOCHANSWT = 0x00008000, + INFF_PROXD_SESSION_FLAG_NETRUAL = 0x00010000, /* netrual mode */ + INFF_PROXD_SESSION_FLAG_SEQ_EN = 0x00020000, /* Toast */ + INFF_PROXD_SESSION_FLAG_NO_PARAM_OVRD = 0x00040000, /* no param override */ + INFF_PROXD_SESSION_FLAG_ASAP = 0x00080000, /* ASAP session */ + INFF_PROXD_SESSION_FLAG_REQ_LCI = 0x00100000, /* transmit LCI req */ + INFF_PROXD_SESSION_FLAG_REQ_CIV = 0x00200000, /* transmit civic loc req */ + INFF_PROXD_SESSION_FLAG_PRE_SCAN = 0x00400000, /* enable pre-scan for asap=1 */ + INFF_PROXD_SESSION_FLAG_AUTO_VHTACK = 0x00800000, /* use vhtack based on ie */ + INFF_PROXD_SESSION_FLAG_VHTACK = 0x01000000, /* vht ack is in use */ + INFF_PROXD_SESSION_FLAG_BDUR_NOPREF = 0x02000000, /* burst-duration: no pref */ + INFF_PROXD_SESSION_FLAG_NUM_FTM_NOPREF = 0x04000000, /* num of FTM frames: no pref */ + INFF_PROXD_SESSION_FLAG_FTM_SEP_NOPREF = 0x08000000, /* time btw FTM frams: no pref */ + INFF_PROXD_SESSION_FLAG_NUM_BURST_NOPREF = 0x10000000, /* num of bursts: no pref */ + INFF_PROXD_SESSION_FLAG_BURST_PERIOD_NOPREF = 0x20000000, /* burst period: no pref */ + INFF_PROXD_SESSION_FLAG_MBURST_FOLLOWUP = 0x40000000, /* new mburst algo - reserved */ + INFF_PROXD_SESSION_FLAG_MBURST_NODELAY = 0x80000000, /* good until cancelled */ + INFF_PROXD_SESSION_FLAG_ALL = 0xffffffff +}; + +/** time units - mc supports up to 0.1ns resolution */ +enum inff_proxd_tmu { + INFF_PROXD_TMU_TU = 0, /* 1024us */ + INFF_PROXD_TMU_SEC = 1, + INFF_PROXD_TMU_MILLI_SEC = 2, + INFF_PROXD_TMU_MICRO_SEC = 3, + INFF_PROXD_TMU_NANO_SEC = 4, + INFF_PROXD_TMU_PICO_SEC = 5 +}; + +/** result flags */ +enum inff_proxd_result { + INFF_PRXOD_RESULT_FLAG_NONE = 0x0000, + INFF_PROXD_RESULT_FLAG_NLOS = 0x0001, /* LOS - if available */ + INFF_PROXD_RESULT_FLAG_LOS = 0x0002, /* NLOS - if available */ + INFF_PROXD_RESULT_FLAG_FATAL = 0x0004, /* Fatal error during burst */ + INFF_PROXD_RESULT_FLAG_VHTACK = 0x0008, /* VHTACK or Legacy ACK used */ + INFF_PROXD_REQUEST_SENT = 0x0010, /* FTM request was sent */ + INFF_PROXD_REQUEST_ACKED = 0x0020, /* FTM request was acked */ + INFF_PROXD_LTFSEQ_STARTED = 0x0040, /* LTF sequence started */ + INFF_PROXD_RESULT_FLAG_ALL = 0xffff +}; + +/** status */ +enum inff_proxd_status { + INFF_PROXD_E_LAST = -1056, + INFF_PROXD_E_NOAVAIL = -1056, + INFF_PROXD_E_EXT_SCHED = -1055, + INFF_PROXD_E_NOT_INF = -1054, + INFF_PROXD_E_FRAME_TYPE = -1053, + INFF_PROXD_E_VERNOSUPPORT = -1052, + INFF_PROXD_E_SEC_NOKEY = -1051, + INFF_PROXD_E_SEC_POLICY = -1050, + INFF_PROXD_E_SCAN_INPROCESS = -1049, + INFF_PROXD_E_BAD_PARTIAL_TSF = -1048, + INFF_PROXD_E_SCANFAIL = -1047, + INFF_PROXD_E_NOTSF = -1046, + INFF_PROXD_E_POLICY = -1045, + INFF_PROXD_E_INCOMPLETE = -1044, + INFF_PROXD_E_OVERRIDDEN = -1043, + INFF_PROXD_E_ASAP_FAILED = -1042, + INFF_PROXD_E_NOTSTARTED = -1041, + INFF_PROXD_E_INVALIDMEAS = -1040, + INFF_PROXD_E_INCAPABLE = -1039, + INFF_PROXD_E_MISMATCH = -1038, + INFF_PROXD_E_DUP_SESSION = -1037, + INFF_PROXD_E_REMOTE_FAIL = -1036, + INFF_PROXD_E_REMOTE_INCAPABLE = -1035, + INFF_PROXD_E_SCHED_FAIL = -1034, + INFF_PROXD_E_PROTO = -1033, + INFF_PROXD_E_EXPIRED = -1032, + INFF_PROXD_E_TIMEOUT = -1031, + INFF_PROXD_E_NOACK = -1030, + INFF_PROXD_E_DEFERRED = -1029, + INFF_PROXD_E_INVALID_SID = -1028, + INFF_PROXD_E_REMOTE_CANCEL = -1027, + INFF_PROXD_E_CANCELED = -1026, /* local */ + INFF_PROXD_E_INVALID_SESSION = -1025, + INFF_PROXD_E_BAD_STATE = -1024, + INFF_PROXD_E_START = -1024, + INFF_PROXD_E_ERROR = -1, + INFF_PROXD_E_OK = 0 +}; + +/* typedef u16 wl_proxd_event_type_t; */ +enum inff_proxd_event_type { + INFF_PROXD_EVENT_NONE = 0, /* not an event, reserved */ + INFF_PROXD_EVENT_SESSION_CREATE = 1, + INFF_PROXD_EVENT_SESSION_START = 2, + INFF_PROXD_EVENT_FTM_REQ = 3, + INFF_PROXD_EVENT_BURST_START = 4, + INFF_PROXD_EVENT_BURST_END = 5, + INFF_PROXD_EVENT_SESSION_END = 6, + INFF_PROXD_EVENT_SESSION_RESTART = 7, + INFF_PROXD_EVENT_BURST_RESCHED = 8, /* burst rescheduled-e.g. partial TSF */ + INFF_PROXD_EVENT_SESSION_DESTROY = 9, + INFF_PROXD_EVENT_RANGE_REQ = 10, + INFF_PROXD_EVENT_FTM_FRAME = 11, + INFF_PROXD_EVENT_DELAY = 12, + INFF_PROXD_EVENT_VS_INITIATOR_RPT = 13, /* (target) rx initiator-report */ + INFF_PROXD_EVENT_RANGING = 14, + INFF_PROXD_EVENT_LCI_MEAS_REP = 15, /* LCI measurement report */ + INFF_PROXD_EVENT_CIVIC_MEAS_REP = 16, /* civic measurement report */ + INFF_PROXD_EVENT_COLLECT = 17, + INFF_PROXD_EVENT_START_WAIT = 18, /* waiting to start */ + INFF_PROXD_EVENT_MF_STATS = 19, /* mf stats event */ + INFF_PROXD_EVENT_MAX +}; + +#define INFF_PROXD_API_VERSION 0x0300 /* version 3.0 */ +#define INFF_PROXD_RTT_RESULT_VERSION_2 2 +#define INFF_PROXD_RTT_SAMPLE_VERSION_2 2 +#define INFF_PROXD_TLV_FTM_SEP_VAL 3 /* 3ms */ + +/** proxd iovar - applies to proxd, method or session */ +struct inff_proxd_iov { + u16 version; + u16 len; + u16 cmd; + u16 method; + u16 sid; + u8 PAD[2]; + struct inff_xtlv tlvs[]; /* variable */ +}; + +/** time interval e.g. 10ns */ +struct inff_proxd_intvl { + u32 intvl; + u16 tmu; + u8 pad[2]; +}; + +struct inff_proxd_rtt_sample_v2 { + u16 version; + u16 length; + u8 id; /* id for the sample - non-zero */ + u8 flags; + s16 rssi; + struct inff_proxd_intvl rtt; /* round trip time */ + u32 ratespec; + u16 snr; + u16 bitflips; + s32 status; + s32 distance; + u32 tof_phy_error; + u32 tof_tgt_phy_error; /* target phy error bit map */ + u16 tof_tgt_snr; + u16 tof_tgt_bitflips; + u8 coreid; + u8 pad[3]; + u32 chanspec; +}; + +/** rtt measurement result */ +struct inff_proxd_rtt_result_v2 { + u16 version; + u16 length; /* up to rtt[] */ + u16 sid; + u16 flags; + s32 status; + u8 peer[ETH_ALEN]; + s16 state; /* current state */ + union { + struct inff_proxd_intvl retry_after; /* hint for errors */ + struct inff_proxd_intvl burst_duration; /* burst duration */ + } u; + u32 avg_dist; /* 1/256m units */ + u16 sd_rtt; /* RTT standard deviation */ + u8 num_valid_rtt; /* valid rtt cnt */ + u8 num_ftm; /* actual num of ftm cnt (Configured) */ + u16 burst_num; /* in a session */ + u16 num_rtt; /* 0 if no detail */ + u16 num_meas; /* number of ftm frames seen OTA */ + u8 pad[2]; + struct inff_proxd_rtt_sample_v2 rtt[1]; /* variable, first element is avg_rtt */ +}; + +/** proxd event - applies to proxd, method or session */ +struct inff_proxd_event { + u16 version; + u16 len; + u16 type; + u16 method; + u16 sid; + u8 pad[2]; + struct inff_xtlv tlvs[1]; /* variable */ +}; + +struct inff_proxd_ftm_session_status { + u16 sid; + s16 state; + s32 status; + u16 burst_num; + u16 pad; +}; + +struct inff_ftm_info { + struct inff_cfg80211_info *cfg80211_info; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct cfg80211_pmsr_request *ftm_req; + u32 ftm_debug_mask; + u8 ftm_partial_report; /* set to 1 if each burst report is need */ + u64 host_time; +}; + +s32 inff_ftm_set_global_config(struct inff_ftm_info *ftm_info, + struct cfg80211_pmsr_request_peer *peer); +s32 inff_ftm_set_session_config(struct inff_ftm_info *ftm_info, + struct cfg80211_pmsr_request_peer *peer, + enum inff_proxd_session_id session_id); +s32 inff_notify_ftm_evt(struct inff_if *ifp, + const struct inff_event_msg *e, void *data); +s32 inff_ftm_attach(struct inff_cfg80211_info *cfg); +void inff_ftm_detach(struct inff_cfg80211_info *cfg); + +#endif /* INFF_FTM_H */ -- 2.25.1 Driver implementation of WLAN sensing (802.11bf) Measurement. Handles the WLAN Sensing PMSR requests from cfg80211 and sends the configs to the Device firmware. Also collects Channel State Information (CSI) from the WLAN device and notifies it to the userspace through the cfg80211 driver. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/wlan_sense.c | 916 ++++++++++++++++++ .../wireless/infineon/inffmac/wlan_sense.h | 177 ++++ 2 files changed, 1093 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/wlan_sense.c create mode 100644 drivers/net/wireless/infineon/inffmac/wlan_sense.h diff --git a/drivers/net/wireless/infineon/inffmac/wlan_sense.c b/drivers/net/wireless/infineon/inffmac/wlan_sense.c new file mode 100644 index 000000000000..083586767980 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/wlan_sense.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include + +#include "core.h" +#include "cfg80211.h" +#include "debug.h" +#include "fwil.h" +#include "feature.h" +#include "bus.h" +#include "pmsr.h" + +/** + * inff_wlan_sense_stats_read() - Read the contents of the debugfs file "wlan_sense_stats". + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + * + * return: 0. + */ +int +inff_wlan_sense_stats_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_pub *drvr = bus_if->drvr; + struct inff_if *ifp = NULL; + struct inff_cfg80211_info *cfg80211_info = NULL; + struct inff_wlan_sense_info *wlan_sense = NULL; + struct inff_wlan_sense_cfg *cfg = NULL; + struct inff_wlan_sense_counters *counters = NULL; + int i, j; + + if (!inff_feat_is_enabled(inff_get_ifp(drvr, 0), INFF_FEAT_WLAN_SENSE)) { + inff_err("the low layer not support WLAN SENSE\n"); + return -EOPNOTSUPP; + } + + /* Iterate the interface list in struct inff_pub */ + for (i = 0; i < INFF_MAX_IFS; i++) { + ifp = drvr->iflist[i]; + + if (!ifp || !ifp->vif || + ifp->vif->wdev.iftype != NL80211_IFTYPE_WLAN_SENSE) + continue; + + cfg80211_info = ifp->drvr->config; + wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + if (!wlan_sense) + continue; + cfg = &wlan_sense->cfg; + counters = &wlan_sense->counters; + + seq_printf(seq, "ifname: %s, ifidx: %u, bsscfgidx: %d: MAC: %pM\n", + inff_ifname(ifp), ifp->ifidx, ifp->bsscfgidx, + wlan_sense->dev_addr.octet); + + /* WLAN Sensing Started */ + seq_printf(seq, "\tSensing State: %s\n", + wlan_sense->sensing ? "ENABLED" : "DISABLED"); + + /* WLAN Sensing Schedule Configuration */ + seq_puts(seq, "\tSensing Schedule Config:\n"); + seq_printf(seq, "\t\tInterval : %u ms%s\n", cfg->interval, + cfg->interval ? "" : " (Non-Periodic)"); + seq_printf(seq, "\t\tDuration : %u ms\n", cfg->duration); + + /* WLAN Sensing Mode Configuration */ + seq_puts(seq, "\tSensing Mode Config:\n"); + seq_printf(seq, "\t\t%s Mode\n", + cfg->mode_flags & INFF_WLAN_SENSE_MODE_SOLICITED ? + "Solicited" : "Un-Soclited"); + seq_printf(seq, "\t\t%s Mode\n", + cfg->mode_flags & INFF_WLAN_SENSE_MODE_ASSOCIATED ? + "Associated" : "Un-Associated"); + + /* WLAN Sensing Filter Configuration */ + seq_puts(seq, "\tSensing Filter Config:\n"); + seq_printf(seq, "\t\tBSS Scope :%u\n", cfg->filter.bss_scope); + seq_printf(seq, "\t\tIgnore FCS:%u\n", cfg->filter.ignore_fcs); + + seq_puts(seq, "\t\tFrame Transmitter ADDR:\n"); + for (j = 0; j < INFF_WLAN_SENSE_FILTER_FRM_RA_NUM; j++) + seq_printf(seq, "\t\t\t#%u: %pM\n", j, + cfg->filter.ta[j].octet); + + seq_puts(seq, "\t\tFrame Type & Subtype:\n"); + for (j = 0; j < INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM; j++) + seq_printf(seq, "\t\t\t#%u: 0x%x\n", j, + cfg->filter.frmtyp_subtyp[j]); + + seq_puts(seq, "\tCSI Fragment & De-fragemented Data realted Counters:\n"); + seq_printf(seq, "\t\tFragment avail FW Event Total : %u\n", + counters->csi_frag_fw_evt_tot_ct); + seq_printf(seq, "\t\tFragment avail FW Event handle Success : %u\n", + counters->csi_frag_fw_evt_handle_succ_ct); + seq_printf(seq, "\t\tFragment avail FW Event handle Failure : %u\n", + counters->csi_frag_fw_evt_handle_fail_ct); + } + + return 0; +} + +static inline void +inff_wlan_sense_dump_fil_cfg(struct inff_wlan_sense_fil_cfg fil_cfg) +{ + inff_dbg(WLAN_SENSE, "FIL CONFIG :\n" + "csi_enable : %u\n" + "capture_interval_ms : %d\n" + "capture_duration_ms : %u\n" + "solicit_mode : %u\n" + "assoc_mode : %u\n" + "bss_mode : %u\n" + "ignore_fcs : %u\n" + "macaddr[0] : %pM\n" + "macaddr[1] : %pM\n" + "macaddr[2] : %pM\n" + "macaddr[3] : %pM\n" + "chanspec : %u\n" + "multi_csi_per_mac : %u\n" + "link_protection : %u\n" + "subcarriers : %u\n" + "frmtyp_subtyp[0] : %u\n" + "frmtyp_subtyp[1] : %u\n", + fil_cfg.csi_enable, + fil_cfg.capture_interval_ms, + fil_cfg.capture_duration_ms, + fil_cfg.solicit_mode, + fil_cfg.assoc_mode, + fil_cfg.bss_mode, + fil_cfg.ignore_fcs, + fil_cfg.macaddr[0].octet, + fil_cfg.macaddr[1].octet, + fil_cfg.macaddr[2].octet, + fil_cfg.macaddr[3].octet, + fil_cfg.chanspec, + fil_cfg.multi_csi_per_mac, + fil_cfg.link_protection, + fil_cfg.subcarriers, + fil_cfg.frmtyp_subtyp[0], + fil_cfg.frmtyp_subtyp[1]); +} + +/** + * inff_wlan_sense_generate_vif_mac() - derive mac addresses for WLAN Sensing. + * + * @wlan_sense: WLAN Sensing specific data. + * @dev_addr: optional device address. + * + * WLAN Sensing interface needs mac address. If no device + * address it specified, these are derived from a random ethernet + * address. + */ +static void +inff_wlan_sense_generate_vif_mac(struct inff_wlan_sense_info *wlan_sense, u8 *dev_addr) +{ + struct inff_cfg80211_info *cfg80211_info = wlan_sense->cfg80211_info; + struct inff_if *pri_ifp; + + pri_ifp = netdev_priv(cfg_to_ndev(cfg80211_info)); + + if (!dev_addr || is_zero_ether_addr(dev_addr)) { + /* Generate the WLAN Sensing Device Address obtaining a random ethernet + * address with the locally administered bit set. + */ + eth_random_addr(wlan_sense->dev_addr.octet); + } else { + memcpy(wlan_sense->dev_addr.octet, dev_addr, ETH_ALEN); + } + + wlan_sense->dev_addr.octet[0] |= 0x02; +} + +/** + * inff_wlan_sense_add_vif() - create a new WLAN Sensing virtual interface. + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * @name_assign_type: origin of the interface name + * @iftype: nl80211 interface type. + * @params: contains mac address for WLAN Sensing device. + */ +struct wireless_dev * +inff_wlan_sense_add_vif(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype iftype, + struct vif_params *params) +{ + struct inff_cfg80211_info *cfg80211_info = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg80211_info->pub; + struct inff_if *pri_ifp; + struct inff_if *wlan_sense_ifp; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + struct inff_cfg80211_vif *wlan_sense_vif; + struct inff_fil_wlan_sense_if_le if_request; + int err = 0; + + pri_ifp = netdev_priv(cfg_to_ndev(cfg80211_info)); + + if (!inff_feat_is_enabled(pri_ifp, INFF_FEAT_WLAN_SENSE) || + iftype != NL80211_IFTYPE_WLAN_SENSE) + return ERR_PTR(-EOPNOTSUPP); + + if (inff_cfg80211_vif_event_armed(cfg80211_info)) + return ERR_PTR(-EBUSY); + + inff_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, iftype); + + wlan_sense_vif = inff_alloc_vif(wlan_sense->cfg80211_info, NL80211_IFTYPE_WLAN_SENSE); + if (IS_ERR(wlan_sense_vif)) { + iphy_err(drvr, "could not create discovery vif\n"); + return (struct wireless_dev *)wlan_sense_vif; + } + + /* firmware requires unique mac address for wlan_sensedev interface */ + if (params && ether_addr_equal(params->macaddr, pri_ifp->mac_addr)) { + iphy_err(drvr, "wlan_sense vif must be different from primary interface\n"); + err = -EINVAL; + goto fail; + } + + /* Generate MAC */ + inff_wlan_sense_generate_vif_mac(wlan_sense, params->macaddr); + + /* fill the firmware request */ + ether_addr_copy(if_request.addr.octet, wlan_sense->dev_addr.octet); + inff_fweh_wlan_sensedev_setup(pri_ifp, true); + + inff_cfg80211_arm_vif_event(wlan_sense->cfg80211_info, wlan_sense_vif); + /* Create WLAN Sensing interface in the firmware */ + err = inff_fil_iovar_data_set(pri_ifp, "csi_ifadd", &if_request, + sizeof(struct inff_fil_wlan_sense_if_le)); + if (err < 0) { + iphy_err(drvr, "set csi_ifadd error\n"); + inff_fweh_wlan_sensedev_setup(pri_ifp, false); + inff_cfg80211_arm_vif_event(wlan_sense->cfg80211_info, NULL); + goto fail; + } + + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(wlan_sense->cfg80211_info, INFF_E_IF_ADD, + INFF_VIF_EVENT_TIMEOUT); + inff_cfg80211_arm_vif_event(wlan_sense->cfg80211_info, NULL); + inff_fweh_wlan_sensedev_setup(pri_ifp, false); + if (!err) { + iphy_err(drvr, "timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* WLAN Sensing interface created */ + wlan_sense_ifp = wlan_sense_vif->ifp; + wlan_sense->vif = wlan_sense_vif; + ether_addr_copy(wlan_sense_ifp->mac_addr, wlan_sense->dev_addr.octet); + ether_addr_copy(wlan_sense_vif->wdev.address, wlan_sense->dev_addr.octet); + + return &wlan_sense_vif->wdev; +fail: + inff_free_vif(wlan_sense_vif); + return ERR_PTR(err); +} + +void inff_wlan_sense_ifp_removed(struct inff_if *ifp, bool locked) +{ + struct inff_cfg80211_info *cfg; + struct inff_cfg80211_vif *vif; + + inff_dbg(INFO, "WLAN Sense: device interface removed\n"); + vif = ifp->vif; + cfg = wdev_to_cfg(&vif->wdev); + cfg->pmsr_info->wlan_sense_info->vif = NULL; + if (!locked) { + rtnl_lock(); + wiphy_lock(cfg->wiphy); + cfg80211_unregister_wdev(&vif->wdev); + wiphy_unlock(cfg->wiphy); + rtnl_unlock(); + } else { + cfg80211_unregister_wdev(&vif->wdev); + } + inff_free_vif(vif); +} + +/** + * inff_wlan_sense_del_vif() - delete a WLAN sensing virtual interface. + * + * @wiphy: wiphy device of interface. + * @wdev: wireless device of interface. + */ +int +inff_wlan_sense_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg80211_info = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg80211_info->pub; + struct inff_if *wlan_sense_ifp = NULL; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + struct inff_cfg80211_vif *vif; + enum nl80211_iftype iftype; + int err; + + vif = wdev_to_vif(wdev); + if (!vif) { + err = -EIO; + goto fail; + } + + wlan_sense_ifp = vif->ifp; + if (!wlan_sense_ifp) { + err = -EIO; + goto fail; + } + + if (!inff_feat_is_enabled(wlan_sense_ifp, INFF_FEAT_WLAN_SENSE)) + return -EOPNOTSUPP; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + iftype = vif->wdev.iftype; + + if (iftype != NL80211_IFTYPE_WLAN_SENSE) + return -EOPNOTSUPP; + + inff_dbg(WLAN_SENSE, "delete WLAN Sensing vif wlan_sense_ifp=0x%p\n", + wlan_sense_ifp); + + inff_cfg80211_arm_vif_event(cfg80211_info, vif); + err = inff_fil_iovar_data_set(wlan_sense_ifp, "csi_ifdel", wlan_sense->dev_addr.octet, + ETH_ALEN); + if (err) { + iphy_err(drvr, "set csi_ifdel error\n"); + goto fail; + } + + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(cfg80211_info, INFF_E_IF_DEL, + INFF_VIF_EVENT_TIMEOUT); + if (err) + err = -EIO; + + inff_remove_interface(drvr->iflist[wlan_sense_ifp->bsscfgidx], true); + + inff_cfg80211_arm_vif_event(cfg80211_info, NULL); +fail: + return err; +} + +/** + * inff_wlan_sense_enabled_event_handler() - Handle the WLAN Sensing enabled Event notification + * from the Firmware. + * + */ +static s32 +inff_wlan_sense_enabled_event_handler(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg80211_info = ifp->drvr->config; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + struct inff_wlan_sense_cfg *cfg = &wlan_sense->cfg; + struct inff_wlan_sense_fil_cfg fil_cfg = { 0 }; + int i = 0; + s32 ret = 0; + + ret = inff_fil_iovar_data_get(ifp, "csi", &fil_cfg, + sizeof(struct inff_wlan_sense_fil_cfg)); + if (ret) { + inff_err("WLAN SENSE: WLAN Sensing Config Fetch failed, Firmware error (%d)", + ret); + goto fail; + } + + /* WLAN Sensing Started */ + wlan_sense->sensing = fil_cfg.csi_enable ? true : false; + + /* WLAN Sensing Schedule Configuration */ + if (le32_to_cpu(fil_cfg.capture_interval_ms) == -1) + cfg->interval = 0; + else + cfg->interval = le32_to_cpu(fil_cfg.capture_interval_ms); + + cfg->duration = le16_to_cpu(fil_cfg.capture_duration_ms); + + /* WLAN Sensing Mode Configuration */ + cfg->mode_flags |= (fil_cfg.solicit_mode ? INFF_WLAN_SENSE_MODE_SOLICITED : 0); + cfg->mode_flags |= (fil_cfg.assoc_mode ? INFF_WLAN_SENSE_MODE_ASSOCIATED : 0); + + /* WLAN Sensing Filter Configuration */ + cfg->filter.bss_scope = fil_cfg.bss_mode; + cfg->filter.ignore_fcs = fil_cfg.ignore_fcs ? true : false; + + for (i = 0; i < INFF_WLAN_SENSE_FILTER_FRM_RA_NUM; i++) + ether_addr_copy(cfg->filter.ta[i].octet, fil_cfg.macaddr[i].octet); + + for (i = 0; i < INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM; i++) { + cfg->filter.frmtyp_subtyp[i] = 0; + + cfg->filter.frmtyp_subtyp[i] |= ((fil_cfg.frmtyp_subtyp[i] & 0x3) << 4); + cfg->filter.frmtyp_subtyp[i] |= ((fil_cfg.frmtyp_subtyp[i] >> 2) & 0xF); + } + +fail: + return ret; +} + +/** + * inff_wlan_sense_data_avail_event_handler() - Handle the new CSI data available event + * notification from the Firmware. + * + */ +static s32 +inff_wlan_sense_data_avail_event_handler(struct inff_if *ifp, + const struct inff_event_msg *emsg_hdr, + void *emsg) +{ + struct inff_cfg80211_info *cfg80211_info = ifp->drvr->config; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + struct inff_csi_data_frag_hdr *frag_hdr = (struct inff_csi_data_frag_hdr *)emsg; + struct cfg80211_pmsr_result result = {0}; + u32 frag_hdr_len = sizeof(struct inff_csi_data_frag_hdr); + void *data_frag = NULL; + u32 data_frag_len = 0, data_buf_old_len = 0, data_buf_new_len = 0; + struct inff_wlan_sense_counters *counters; + unsigned char *data_buf = NULL; + s32 ret = 0; + + /* Check if WLAN Sensing is in progress, before handling data available event */ + if (!wlan_sense || !wlan_sense->sensing) + return -EINVAL; + + counters = &wlan_sense->counters; + + data_frag_len = emsg_hdr->datalen - frag_hdr_len; + data_frag = emsg + frag_hdr_len; + inff_dbg(TRACE, "CSI Data frag len: %u frag num: %d frag ct: %d seq: %d hdr: %d\n", + data_frag_len, frag_hdr->fragment_num, frag_hdr->total_fragments, + frag_hdr->sequence_num, frag_hdr->hdr_version); + + counters->csi_frag_fw_evt_tot_ct++; + + /* + * TODO: Need to handle sequence number, fragment number mismatches. + * Also need check fragment header version. + */ + + if (frag_hdr->fragment_num == 0) { + /* Handling Head fragment of CSI Data */ + wlan_sense->data_buf = kcalloc(data_frag_len, sizeof(char), GFP_KERNEL); + if (!wlan_sense->data_buf) + goto fail; + + data_buf = wlan_sense->data_buf; + wlan_sense->data_buf_len = data_frag_len; + } else { + /* Handling Body fragment of CSI Data */ + data_buf_old_len = wlan_sense->data_buf_len; + data_buf_new_len = data_buf_old_len + data_frag_len; + + if (data_buf_new_len > INFF_WLAN_SENSE_DATA_LEN_MAX) { + counters->csi_frag_fw_evt_handle_fail_ct++; + goto skip_data_frag; + } + + data_buf = wlan_sense->data_buf; + wlan_sense->data_buf = krealloc(data_buf, data_buf_new_len, + GFP_KERNEL); + if (!wlan_sense->data_buf) { + kfree(data_buf); + goto fail; + } + + data_buf = wlan_sense->data_buf + data_buf_old_len; + wlan_sense->data_buf_len = data_buf_new_len; + } + + /* Copy CSI Data Fragment into the CSI Data buf */ + memcpy(data_buf, data_frag, data_frag_len); + + counters->csi_frag_fw_evt_handle_succ_ct++; + +skip_data_frag: + if (frag_hdr->fragment_num == (frag_hdr->total_fragments - 1)) { + inff_dbg(TRACE, "CSI Data len: %u", wlan_sense->data_buf_len); + result.host_time = (u64)ktime_to_ns(ktime_get_boottime()); + result.status = NL80211_PMSR_STATUS_SUCCESS; + result.final = 0; + result.type = NL80211_PMSR_TYPE_SENSING; + result.sensing.seq_number = frag_hdr->sequence_num; + result.sensing.data_len = wlan_sense->data_buf_len; + result.sensing.data = wlan_sense->data_buf; + + cfg80211_pmsr_report(&ifp->vif->wdev, wlan_sense->sense_req, + &result, GFP_KERNEL); + kfree(wlan_sense->data_buf); + } + + return ret; +fail: + counters->csi_frag_fw_evt_handle_fail_ct++; + inff_err("WLAN SENSE: Failed to allocate buffer for CSI Fragment Data\n"); + return -ENOMEM; +} + +/** + * inff_wlan_sense_disabled_event_handler() - Handle the WLAN Sensing disabled Event + * notification from the Firmware. + * + */ +static s32 +inff_wlan_sense_disabled_event_handler(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg80211_info = ifp->drvr->config; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + s32 ret = 0; + + if (!wlan_sense) + return ret; + + /* WLAN Sensing Stopped */ + wlan_sense->sensing = false; + + /* complete the PMSR request */ + cfg80211_pmsr_complete(&ifp->vif->wdev, wlan_sense->sense_req, GFP_KERNEL); + wlan_sense->sense_req = NULL; + + return ret; +} + +/** + * inff_notify_wlan_sense_event() - Handle the WLAN SENSE Event notifications from Firmware. + * + * @ifp: interface instatnce. + * @e: event message. + * @data: CSI data + * + * return: 0 on success, value < 0 on failure. + */ +s32 +inff_notify_wlan_sense_event(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + s32 ret = 0; + + inff_dbg(WLAN_SENSE, "WLAN SENSE: EVENT from firmware\n"); + + if (!ifp) { + ret = -EIO; + goto exit; + } + + switch (e->event_code) { + case INFF_E_WLAN_SENSE_ENABLED: + ret = inff_wlan_sense_enabled_event_handler(ifp, e, data); + if (ret) { + inff_err("WLAN_SENSE: EVENT: Failed to handle ENABLED event ret=%d\n", + ret); + goto exit; + } + inff_dbg(WLAN_SENSE, "WLAN SENSE: ENABLED\n"); + break; + case INFF_E_WLAN_SENSE_DATA: + ret = inff_wlan_sense_data_avail_event_handler(ifp, e, data); + if (ret) { + inff_err("WLAN_SENSE: EVENT: Failed to handle Data event ret=%d\n", ret); + goto exit; + } + break; + case INFF_E_WLAN_SENSE_DISABLED: + ret = inff_wlan_sense_disabled_event_handler(ifp, e, data); + if (ret) { + inff_err("WLAN_SENSE: EVENT: Failed to handle DISABLED event ret=%d\n", + ret); + goto exit; + } + inff_dbg(WLAN_SENSE, "WLAN SENSE: DISABLED\n"); + break; + default: + inff_err("WLAN_SENSE: Received event %d not handled", e->event_code); + ret = -EOPNOTSUPP; + goto exit; + } + +exit: + return ret; +} + +/** + * inff_wlan_sense_enable_oper_handler() - Handle the WLAN Sense enable Operation request + * from Userspace. + * + * @ifp: interface instance. + * @cfg: WLAN Sense parameters. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_wlan_sense_enable_oper_handler(struct inff_if *ifp, struct inff_wlan_sense_cfg cfg) +{ + struct inff_wlan_sense_fil_cfg fil_cfg; + int i = 0; + s32 ret = 0; + + memset(&fil_cfg, 0, sizeof(struct inff_wlan_sense_fil_cfg)); + + /* Enable WLAN Sensing Functionality */ + fil_cfg.csi_enable = 1; + + /* WLAN Sensing Schedule Configuration */ + if (cfg.interval == 0) + fil_cfg.capture_interval_ms = cpu_to_le32(-1); + else + fil_cfg.capture_interval_ms = cpu_to_le32(cfg.interval); + + fil_cfg.capture_duration_ms = cpu_to_le16(cfg.duration); + + /* WLAN Sensing Mode Configuration */ + fil_cfg.solicit_mode = (cfg.mode_flags & INFF_WLAN_SENSE_MODE_SOLICITED) ? 1 : 0; + fil_cfg.assoc_mode = (cfg.mode_flags & INFF_WLAN_SENSE_MODE_ASSOCIATED) ? 1 : 0; + + /* WLAN Sensing Filter Configuration */ + fil_cfg.bss_mode = cfg.filter.bss_scope; + fil_cfg.ignore_fcs = cfg.filter.ignore_fcs ? 1 : 0; + + for (i = 0; i < INFF_WLAN_SENSE_FILTER_FRM_RA_NUM; i++) + ether_addr_copy(fil_cfg.macaddr[i].octet, cfg.filter.ta[i].octet); + + for (i = 0; i < INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM; i++) { + fil_cfg.frmtyp_subtyp[i] |= ((cfg.filter.frmtyp_subtyp[i] & 0xF) << 2); + fil_cfg.frmtyp_subtyp[i] |= ((cfg.filter.frmtyp_subtyp[i] >> 4) & 0x3); + } + + fil_cfg.multi_csi_per_mac = 1; + fil_cfg.link_protection = 0; + fil_cfg.chanspec = 255; + fil_cfg.subcarriers = 0; + + ret = inff_fil_iovar_data_set(ifp, "csi", &fil_cfg, + sizeof(struct inff_wlan_sense_fil_cfg)); + if (ret) { + inff_err("WLAN SENSE: ENABLE: Failed, Firmware error (%d)", ret); + goto fail; + } + + inff_wlan_sense_dump_fil_cfg(fil_cfg); +fail: + return ret; +} + +/** + * inff_wlan_sense_disable_oper_handler() - Handle the WLAN Sense disable Operation request + * from Userspace. + * + * @ifp: interface instance. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_wlan_sense_disable_oper_handler(struct inff_if *ifp) +{ + struct inff_wlan_sense_fil_cfg fil_cfg; + s32 ret = 0; + + memset(&fil_cfg, 0, sizeof(struct inff_wlan_sense_fil_cfg)); + + /* Disable WLAN Sensing Functionality */ + fil_cfg.csi_enable = 0; + + ret = inff_fil_iovar_data_set(ifp, "csi", &fil_cfg, + sizeof(struct inff_wlan_sense_fil_cfg)); + if (ret) { + inff_err("WLAN SENSE: DISABLE: Failed, Firmware error (%d)", ret); + goto fail; + } + + inff_wlan_sense_dump_fil_cfg(fil_cfg); +fail: + return ret; +} + +/** + * inff_wlan_sense_configure_oper_handler() - Handle the WLAN Sense configure Operation + * request from Userspace. + * + * @ifp: interface instance. + * @cfg: WLAN Sense parameters. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_wlan_sense_configure_oper_handler(struct inff_if *ifp, struct inff_wlan_sense_cfg cfg) +{ + struct inff_cfg80211_info *cfg80211_info = ifp->drvr->config; + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + s32 ret = 0; + + if (!wlan_sense) { + inff_dbg(WLAN_SENSE, "WLAN_SENSE: no data structure\n"); + return ret; + } + + if (!memcmp(&wlan_sense->cfg, &cfg, sizeof(struct inff_wlan_sense_cfg))) { + inff_dbg(WLAN_SENSE, "WLAN_SENSE: Skipping new Duplicate configuration request\n"); + return ret; + } + + if (wlan_sense->sensing) { + /* If WLAN Sensing is already running, send IOVAR request + * to Firmware with the new configurations + */ + ret = inff_wlan_sense_enable_oper_handler(ifp, cfg); + } else { + /* If WLAN Sensing is not running, store the new configurations + * in memory and wait for WLAN Sensing enable request from the user. + */ + memcpy(&wlan_sense->cfg, &cfg, sizeof(struct inff_wlan_sense_cfg)); + } + + return ret; +} + +/** + * inff_wlan_sense_oper_handler() - Handle the WLAN Sense Operation requests from Userspace. + * + * @wiphy: wiphy object for cfg80211 interface. + * @wdev: wireless device. + * @oper: WLAN sensing operation + * @cfg: WLAN Sensing Configuration + * + * return: 0 on success, value < 0 on failure. + */ +s32 +inff_wlan_sense_oper_handler(struct wiphy *wiphy, struct wireless_dev *wdev, + enum inff_wlan_sense_oper oper, + struct inff_wlan_sense_cfg cfg) +{ + struct inff_cfg80211_info *cfg80211_info = wiphy_to_cfg(wiphy); + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + struct inff_cfg80211_vif *vif = NULL; + struct inff_if *ifp = NULL; + s32 ret = 0; + + vif = wdev_to_vif(wdev); + if (!vif) { + ret = -EIO; + goto exit; + } + + ifp = vif->ifp; + if (!ifp) { + ret = -EIO; + goto exit; + } + + /* Check if WLAN Sense feature is supported in the Firmware */ + if (!inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE)) { + inff_err("WLAN SENSE: Operation(%d) can't be handled, WLAN Sense not enabled on VIF(%s)", + oper, inff_ifname(ifp)); + ret = -EOPNOTSUPP; + goto exit; + } + + switch (oper) { + case INFF_WLAN_SENSE_OPER_CONFIGURE: + ret = inff_wlan_sense_configure_oper_handler(ifp, cfg); + break; + case INFF_WLAN_SENSE_OPER_ENABLE: + ret = inff_wlan_sense_enable_oper_handler(ifp, wlan_sense->cfg); + break; + case INFF_WLAN_SENSE_OPER_DISABLE: + ret = inff_wlan_sense_disable_oper_handler(ifp); + break; + default: + inff_err("WLAN SENSE: Operation(%d) not supported on VIF(%s)", + oper, inff_ifname(ifp)); + ret = -EOPNOTSUPP; + } +exit: + return ret; +} + +s32 +inff_wlan_sense_parse_req(struct cfg80211_pmsr_request_peer *peer, + struct inff_wlan_sense_cfg *wlan_sense_cfg) +{ + char *token; + char delim[] = ",\n"; + char *buf = peer->sensing.vendor_req; + unsigned long val; + s32 err = 0; + + inff_dbg(TRACE, "WLAN SENSE: vendor_req(%d): %s\n", peer->sensing.vendor_req_len, buf); + wlan_sense_cfg->interval = peer->sensing.interval; + wlan_sense_cfg->duration = peer->sensing.duration; + wlan_sense_cfg->mode_flags = 0; + if (peer->sensing.associated) + wlan_sense_cfg->mode_flags |= INFF_WLAN_SENSE_MODE_ASSOCIATED; + ether_addr_copy(wlan_sense_cfg->filter.ta[0].octet, peer->addr); + + /* parse vendor data */ + token = strsep(&buf, delim); + while (token) { + if (!strncmp(token, "solicit_mode=", 13)) { + err = kstrtoul(token + 13, 0, &val); + if (err) + break; + if (val) + wlan_sense_cfg->mode_flags |= INFF_WLAN_SENSE_MODE_SOLICITED; + } + if (!strncmp(token, "bss_scope=", 10)) { + err = kstrtoul(token + 10, 0, &val); + if (err) + break; + wlan_sense_cfg->filter.bss_scope = val; + } + if (!strncmp(token, "ignore_fcs=", 11)) { + err = kstrtoul(token + 11, 0, &val); + if (err) + break; + wlan_sense_cfg->filter.ignore_fcs = !!val; + } + if (!strncmp(token, "frmtyp_subtyp0=", 15)) { + err = kstrtoul(token + 15, 0, &val); + if (err) + break; + wlan_sense_cfg->filter.frmtyp_subtyp[0] = val; + } + if (!strncmp(token, "frmtyp_subtyp1=", 15)) { + err = kstrtoul(token + 15, 0, &val); + if (err) + break; + wlan_sense_cfg->filter.frmtyp_subtyp[1] = val; + } + token = strsep(&buf, delim); + } + + if (err) + inff_err("WLAN SENSE: Parse fail %d!\n", err); + + return err; +} + +/** + * inff_wlan_sense_attach() - attach for WLAN Sense. + * + * @cfg80211_info: driver private data for cfg80211 interface. + */ +s32 +inff_wlan_sense_attach(struct inff_cfg80211_info *cfg80211_info) +{ + struct inff_wlan_sense_info *wlan_sense; + struct inff_if *pri_ifp; + s32 err = 0; + + pri_ifp = netdev_priv(cfg_to_ndev(cfg80211_info)); + + wlan_sense = kzalloc(sizeof(*wlan_sense), GFP_KERNEL); + if (!wlan_sense) { + err = -ENOMEM; + inff_err("WLAN SENSE: Failed to allocate memory for wlan_sense\n"); + goto fail; + } + + wlan_sense->sensing = false; + wlan_sense->sense_req = NULL; + wlan_sense->cfg80211_info = cfg80211_info; + cfg80211_info->pmsr_info->wlan_sense_info = wlan_sense; + +fail: + return err; +} + +/** + * inff_wlan_sense_detach() - detach WLAN Sense. + * + * @cfg80211_info: driver private data for cfg80211 interface. + */ +void +inff_wlan_sense_detach(struct inff_cfg80211_info *cfg80211_info) +{ + struct inff_wlan_sense_info *wlan_sense = cfg80211_info->pmsr_info->wlan_sense_info; + + if (!wlan_sense || !wlan_sense->vif) + return; + + kfree(wlan_sense->data_buf); + kfree(wlan_sense); + cfg80211_info->pmsr_info->wlan_sense_info = NULL; +} + +int inff_wlan_sense_start(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + inff_dbg(TRACE, "WLAN SENSE: enter\n"); + + /* do nothing for now */ + return 0; +} + +void inff_wlan_sense_stop(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + s32 err = 0; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_wlan_sense_info *wlan_sense_info = cfg->pmsr_info->wlan_sense_info; + struct inff_wlan_sense_cfg wlan_sense_cfg = {0}; + + inff_dbg(TRACE, "WLAN SENSE: enter\n"); + + /* abort running sensing process if we get interface stop command */ + if (!wlan_sense_info->sense_req || !wlan_sense_info->sensing) + return; + + err = inff_wlan_sense_oper_handler(wiphy, wdev, + INFF_WLAN_SENSE_OPER_DISABLE, + wlan_sense_cfg); + inff_dbg(TRACE, "WLAN SENSE: err %d\n", err); +} diff --git a/drivers/net/wireless/infineon/inffmac/wlan_sense.h b/drivers/net/wireless/infineon/inffmac/wlan_sense.h new file mode 100644 index 000000000000..1345ff33b7ea --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/wlan_sense.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_WLAN_SENSE_H +#define INFF_WLAN_SENSE_H + +#define INFF_WLAN_SENSE_FILTER_FRM_RA_NUM 4 +#define INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM 2 +#define INFF_WLAN_SENSE_METADATA 64 +/* 16k + Meta(64) for 2x2 80Mhz QoS HE */ +#define INFF_WLAN_SENSE_DATA_LEN_MAX (16384 + INFF_WLAN_SENSE_METADATA) + +struct inff_csi_data_frag_hdr { + u8 hdr_version; + u8 sequence_num; + u8 fragment_num; + u8 total_fragments; +} __packed; + +enum inff_wlan_sense_oper { + INFF_WLAN_SENSE_OPER_UNSPECIFIED, + INFF_WLAN_SENSE_OPER_CONFIGURE, + INFF_WLAN_SENSE_OPER_ENABLE, + INFF_WLAN_SENSE_OPER_DISABLE, +}; + +enum inff_wlan_sense_mode_flags { + INFF_WLAN_SENSE_MODE_SOLICITED = 1 << 0, + INFF_WLAN_SENSE_MODE_ASSOCIATED = 1 << 1, +}; + +enum inff_wlan_sense_filter_bss_scope { + INFF_WLAN_SENSE_FILTER_BSS_SCOPE_OWN_RA, + INFF_WLAN_SENSE_FILTER_BSS_SCOPE_CURR_BSS, + INFF_WLAN_SENSE_FILTER_BSS_SCOPE_ALL_BSS, +}; + +struct inff_wlan_sense_filter { + enum inff_wlan_sense_filter_bss_scope bss_scope; + bool ignore_fcs; + struct ether_addr ta[INFF_WLAN_SENSE_FILTER_FRM_RA_NUM]; + u8 frmtyp_subtyp[INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM]; +}; + +struct inff_wlan_sense_cfg { + u32 interval; + u16 duration; + u8 mode_flags; + struct inff_wlan_sense_filter filter; +}; + +struct inff_wlan_sense_fil_cfg { + /* 1: Enable CSI capture 0: Disable CSI capture */ + u8 csi_enable; + + /* -1: Disable periodic CSI capture */ + s32 capture_interval_ms; + u16 capture_duration_ms; + + /* 0: Unsolicited Mode 1: Solicited Mode */ + u8 solicit_mode; + + /* 0: Unassociated Mode 1: Associated Mode */ + u8 assoc_mode; + + /* 0: Allow all Rx 1: My BSS 2: Other BSS */ + u8 bss_mode; + + /* Also capture badfcs packets */ + u8 ignore_fcs; + + /* Only capture pkts from specified macaddr (Unassociated Mode) */ + struct ether_addr + macaddr[INFF_WLAN_SENSE_FILTER_FRM_RA_NUM]; + + /* Capture CSI only for specified chanspec */ + u16 chanspec; + + /* Capture multiple CSI per mac address (Unsolicited Mode) */ + u8 multi_csi_per_mac; + + /* Enable PM indication before CSI window (Associated Mode) */ + u8 link_protection; + + /* Capture CSI only from selected subcarriers (Not Implemented) */ + u8 subcarriers; + + /* FrameType & SubType */ + u8 frmtyp_subtyp[INFF_WLAN_SENSE_FILTER_FRM_TYP_SUBTYP_NUM]; +}; + +struct inff_fil_wlan_sense_if_le { + struct ether_addr addr; +}; + +/** + * struct wlan_sense_bss - WLAN Sensing bss related information. + * + * @vif: virtual interface of this WLAN Sensing bss. + * @private_data: TBD + */ +struct wlan_sense_bss { + struct inff_cfg80211_vif *vif; + void *private_data; +}; + +/** + * struct inff_wlan_sense_counters - WLAN Sensing debug counters + * + * @csi_frag_fw_evt_tot_ct: CSI Data Fragment Firmware event total count. + * @csi_frag_fw_evt_handle_fail_ct: CSI Data Fragment Firmware event handled successfully count. + * @csi_frag_fw_evt_handle_succ_ct: CSI Data Fragment Firmware event handle failure count. + */ +struct inff_wlan_sense_counters { + u32 csi_frag_fw_evt_tot_ct; + u32 csi_frag_fw_evt_handle_succ_ct; + u32 csi_frag_fw_evt_handle_fail_ct; +}; + +/** + * struct inff_wlan_sense_info - wlan_sense specific driver information. + * + * @cfg80211_info: driver private data for cfg80211 interface. + * @vif: WLAN Sensing vif structure + * @dev_addr: WLAN Sensing device address. + * @cfg: WLAN Sensing Configuration. + * @sense_req: the pmsr request sent from cfg80211 + * @data_buf: CSI Data buffer pointer. + * @data_buf_len: CSI Data buffer allocated memory size. + * @counters: CSI Data Debug counters. + * @sensing: WLAN Sensing in progress. + */ +struct inff_wlan_sense_info { + struct inff_cfg80211_info *cfg80211_info; + struct inff_cfg80211_vif *vif; + struct ether_addr dev_addr; + struct inff_wlan_sense_cfg cfg; + struct cfg80211_pmsr_request *sense_req; + char *data_buf; + u32 data_buf_len; + struct inff_wlan_sense_counters counters; + bool sensing; +}; + +int inff_wlan_sense_start(struct wiphy *wiphy, struct wireless_dev *wdev); +void inff_wlan_sense_stop(struct wiphy *wiphy, struct wireless_dev *wdev); + +struct wireless_dev * +inff_wlan_sense_add_vif(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params); +void +inff_wlan_sense_ifp_removed(struct inff_if *ifp, bool locked); +int +inff_wlan_sense_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); +int +inff_wlan_sense_stats_read(struct seq_file *seq, void *data); +s32 +inff_notify_wlan_sense_event(struct inff_if *ifp, const struct inff_event_msg *e, + void *data); +s32 +inff_wlan_sense_oper_handler(struct wiphy *wiphy, struct wireless_dev *wdev, + enum inff_wlan_sense_oper oper, + struct inff_wlan_sense_cfg wlan_sense_cfg); +s32 +inff_wlan_sense_parse_req(struct cfg80211_pmsr_request_peer *peer, + struct inff_wlan_sense_cfg *wlan_sense_cfg); +s32 +inff_wlan_sense_attach(struct inff_cfg80211_info *cfg); +void +inff_wlan_sense_detach(struct inff_cfg80211_info *cfg); + +#endif /* INFF_WLAN_SENSE_H */ -- 2.25.1 Driver implementation of Packet offload. The device driver provides user option to configure a wide range of packet offloads in the device as per the end use case, like MQTT Keepalive Ping Request offload, ICMP Echo Ping Request offload, Multicast DNS Query Response offload, and ICMP Echo Response offload. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/offload.c | 429 ++++++++++++++++++ .../net/wireless/infineon/inffmac/offload.h | 215 +++++++++ 2 files changed, 644 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/offload.c create mode 100644 drivers/net/wireless/infineon/inffmac/offload.h diff --git a/drivers/net/wireless/infineon/inffmac/offload.c b/drivers/net/wireless/infineon/inffmac/offload.c new file mode 100644 index 000000000000..f0551ae0a26c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/offload.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include "offload.h" +#include "feature.h" +#include "fwil.h" +#include "cfg80211.h" +#include "debug.h" +#include "vendor_inf.h" + +unsigned int inff_offload_prof = INFF_OFFLOAD_PROF_TYPE_LOW_PWR; +module_param_named(offload_prof, inff_offload_prof, uint, 0400); +MODULE_PARM_DESC(offload_prof, + "Offload power profile: 1:low 2:mid 3:high (default:1)"); + +unsigned int inff_offload_feat = INFF_OFFLOAD_ARP | + INFF_OFFLOAD_ND | + INFF_OFFLOAD_BDO | + INFF_OFFLOAD_ICMP | + INFF_OFFLOAD_TKO | + INFF_OFFLOAD_DLTRO | + INFF_OFFLOAD_PNO | + INFF_OFFLOAD_KEEPALIVE | + INFF_OFFLOAD_GTKOE | + INFF_OFFLOAD_WOWLPF; +module_param_named(offload_feat, inff_offload_feat, uint, 0400); +MODULE_PARM_DESC(offload_feat, + "Offload feat bitmap: 0:arp 1:nd 2:mdns 3:icmp 4:tcp-keepalive 5:dhcp-renewal 6:pno 7:keepalive 8:gtk 9:wowlpf (default: 0x1FF)"); + +/* Offload features to firmware based on a user based power profile using module param + * offload_prof and offload_feat (provides flag list of all offloads). + * Default power profile : LowPwr with all offloads enabled. + */ +void +inff_offload_config(struct inff_if *ifp, unsigned int ol_feat, + unsigned int ol_profile, bool reset) +{ + struct inff_ol_cfg_v1 ol_cfg = {0}; + u32 ol_feat_skip = ~ol_feat; + int err = 0; + + ol_cfg.ver = INFF_OFFLOAD_CFG_VER_1; + ol_cfg.len = sizeof(ol_cfg); + ol_cfg.id = INFF_OFFLOAD_CFG_ID_PROF; + ol_cfg.offload_skip = ol_feat_skip; + ol_cfg.u.ol_profile.reset = reset; + ol_cfg.u.ol_profile.type = ol_profile; + + err = inff_fil_iovar_data_set(ifp, "offload_config", &ol_cfg, + sizeof(ol_cfg)); + if (err < 0) + inff_err("failed to %s generic offload profile:%u feat:0x%x, err = %d", + reset ? "reset" : "set", ol_profile, ol_feat, err); + else + inff_info("successfully %s generic offload profile:%u feat:0x%x", + reset ? "reset" : "set", ol_profile, ol_feat); +} + +/* Enable specific offloads that are not enabled in a power profile but have + * to be enabled in suspend state as host goes to sleep. + */ +void +inff_offload_enable(struct inff_if *ifp, unsigned int ol_feat, + bool enable) +{ + struct inff_ol_cfg_v1 ol_cfg = {0}; + u32 ol_feat_skip = ~ol_feat; + int err = 0; + + ol_cfg.ver = INFF_OFFLOAD_CFG_VER_1; + ol_cfg.len = sizeof(ol_cfg); + ol_cfg.id = INFF_OFFLOAD_CFG_ID_ACTIVATE; + ol_cfg.u.ol_activate.enable = enable; + ol_cfg.offload_skip = ol_feat_skip; + + err = inff_fil_iovar_data_set(ifp, "offload_config", &ol_cfg, + sizeof(ol_cfg)); + if (err < 0) + inff_err("failed to %s generic offload feat:0x%x, err = %d", + enable ? "enable" : "disable", ol_feat, err); + else + inff_info("successfully %s generic offload feat:0x%x", + enable ? "enabled" : "disabled", ol_feat); +} + +void +inff_offload_host_ipv4_update(struct inff_if *ifp, unsigned int ol_feat, + u32 ipaddr, bool is_add) +{ + struct inff_ol_cfg_v1 ol_cfg = {0}; + u32 ol_feat_skip = ~ol_feat; + int err = 0; + + ol_cfg.ver = INFF_OFFLOAD_CFG_VER_1; + ol_cfg.len = sizeof(ol_cfg); + ol_cfg.id = INFF_OFFLOAD_CFG_ID_INET_V4; + ol_cfg.u.ol_inet_v4.del = !is_add; + memcpy(ol_cfg.u.ol_inet_v4.host_ipv4.addr, &ipaddr, sizeof(struct ipv4_addr)); + ol_cfg.offload_skip = ol_feat_skip; + + err = inff_fil_iovar_data_set(ifp, "offload_config", &ol_cfg, + sizeof(ol_cfg)); + if (err < 0) + inff_err("failed to %s generic offload host address %pI4, err = %d", + is_add ? "add" : "del", &ipaddr, err); + else + inff_dbg(TRACE, "successfully %s generic offload host address %pI4", + is_add ? "added" : "deleted", &ipaddr); +} + +int +inff_offload_host_ipv6_update(struct inff_if *ifp, unsigned int ol_feat, + void *ptr, u8 type, bool is_add) +{ + struct inff_ol_cfg_v1 ol_cfg = {0}; + u32 ol_feat_skip = ~ol_feat; + int err = 0; + + ol_cfg.ver = INFF_OFFLOAD_CFG_VER_1; + ol_cfg.len = sizeof(ol_cfg); + ol_cfg.id = INFF_OFFLOAD_CFG_ID_INET_V6; + ol_cfg.u.ol_inet_v6.del = !is_add; + ol_cfg.u.ol_inet_v6.type = type; + memcpy(ol_cfg.u.ol_inet_v6.host_ipv6.addr, ptr, sizeof(struct ipv6_addr)); + ol_cfg.offload_skip = ol_feat_skip; + + err = inff_fil_iovar_data_set(ifp, "offload_config", &ol_cfg, + sizeof(ol_cfg)); + if (err < 0) + inff_err("failed to %s host address %pI6 err = %d", + is_add ? "add" : "del", ptr, err); + else + inff_dbg(TRACE, "successfully %s host address %pI6", + is_add ? "add" : "del", ptr); + + return err; +} + +void +inff_offload_configure_arp_nd(struct inff_if *ifp, bool enable) +{ + s32 err; + u32 mode; + + if (enable && inff_is_apmode_operating(ifp->drvr->wiphy)) { + inff_dbg(TRACE, "Skip ARP/ND offload enable when soft AP is running\n"); + return; + } + + if (enable) + mode = INFF_OFFLOAD_ARP_AGENT | INFF_OFFLOAD_ARP_PEER_AUTO_REPLY; + else + mode = 0; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + u32 feat_set = inff_offload_feat & (INFF_OFFLOAD_ARP | INFF_OFFLOAD_ND); + + if (!feat_set) + return; + + if (enable) + inff_offload_config(ifp, feat_set, inff_offload_prof, false); + else + inff_offload_config(ifp, feat_set, inff_offload_prof, true); + } else { + /* Try to set and enable ARP offload feature, this may fail, then it */ + /* is simply not supported and err 0 will be returned */ + err = inff_fil_iovar_int_set(ifp, "arp_ol", mode); + if (err) { + inff_dbg(TRACE, "failed to set ARP offload mode to 0x%x, err = %d\n", + mode, err); + } else { + err = inff_fil_iovar_int_set(ifp, "arpoe", enable); + if (err) { + inff_dbg(TRACE, "failed to configure (%d) ARP offload err = %d\n", + enable, err); + } else { + inff_dbg(TRACE, "successfully configured (%d) ARP offload to 0x%x\n", + enable, mode); + } + } + + err = inff_fil_iovar_int_set(ifp, "ndoe", enable); + if (err) { + inff_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n", + enable, err); + } else { + inff_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n", + enable, mode); + } + } +} + +int +inff_offload_configure_mkeep_alive(struct inff_if *ifp, bool immed_flag, + long *param0, long *param1, long *param2, + int max_param_ct) +{ + u8 buf[150] = {0}; + struct inff_mkeep_alive *mkeep_alive; + int ret = 0, i = 0, j = 0; + + mkeep_alive = (struct inff_mkeep_alive *)buf; + + mkeep_alive->period_msec = (u32)(*param1); + + if (immed_flag) { + if (mkeep_alive->period_msec & WL_MKEEP_ALIVE_IMMEDIATE) { + inff_err("Period %d too large\n", mkeep_alive->period_msec); + ret = -EINVAL; + goto exit; + } + if (mkeep_alive->period_msec) + mkeep_alive->period_msec |= WL_MKEEP_ALIVE_IMMEDIATE; + } + mkeep_alive->version = WL_MKEEP_ALIVE_VERSION; + mkeep_alive->keep_alive_id = (u8)(*param0); + mkeep_alive->length = offsetof(struct inff_mkeep_alive, data); + + /* If there is no hex value for pkt data, it is treated as NULL KA. + * If there is hex value for pkt data, then copy hex as data and is + * treated as NAT KA. + */ + if (mkeep_alive->period_msec > 0) { + if (param2[j] < 0) { + mkeep_alive->len_bytes = 0; + } else if (param2[j + 14] < 0) { + inff_err("Invalid pkt data. Required len bytes >= 14.\n"); + ret = -EINVAL; + goto exit; + } else { + while (param2[j] != ' ') { + if (j <= max_param_ct) { + mkeep_alive->data[i] = param2[j]; + j++; + } + i++; + } + mkeep_alive->len_bytes = i; + } + } + ret = inff_fil_iovar_data_set(ifp, "mkeep_alive", buf, sizeof(buf)); + if (ret) + inff_err("Failed to set mkeeplive params: %d\n", ret); + +exit: + return ret; +} + +int +inff_offload_configure_tko(struct inff_if *ifp, long tko_subcmd_id, long *param0, + long *param1, long *param2, long *param3) +{ + struct inff_tko *tko; + struct inff_tko_param *tko_param; + struct inff_tko_enable *tko_enable; + u8 buf[128] = {0}; + int length; + int ret = 0; + + tko = (struct inff_tko *)buf; + + tko->subcmd_id = tko_subcmd_id; + switch (tko->subcmd_id) { + case WL_TKO_SUBCMD_ENABLE: + tko_enable = (struct inff_tko_enable *)tko->data; + tko->len = sizeof(*tko_enable); + + tko_enable->enable = (u8)(*param0); + break; + case WL_TKO_SUBCMD_PARAM: + tko_param = (struct inff_tko_param *)tko->data; + tko->len = sizeof(*tko_param); + + tko_param->interval = (u16)(*param0); + tko_param->retry_interval = (u16)(*param1); + tko_param->retry_count = (u16)(*param2); + tko_param->rst_delay = (s16)(*param3); + break; + default: + inff_err("offload tko subcmd id %d not recognized", tko->subcmd_id); + ret = -EOPNOTSUPP; + goto exit; + } + + length = offsetof(struct inff_tko, data) + tko->len; + ret = inff_fil_iovar_data_set(ifp, "tko", buf, length); + if (ret) + inff_err("Failed to configure tko: %d\n", ret); +exit: + return ret; +} + +/** + * inff_offload_configure_icmp_echo_req() - Prepare ICMP Echo Request IOVAR + * based on the ICMP Echo Request Parameters. + * + * @ifp - Pointer to inff_if structure. + * @u8 - Sub Command Type. + * @enable - Enable. + * @ip_addr - IP Address to be filled. + * @ip_ver - IP version. + * @mac_addr - MAC Address to be filled. + * @periodicity - Periodicity of ping in sec. + * @duration - Duration in sec. + * Return + * 0 - success + * Non Zero - otherwise + */ +int +inff_offload_configure_icmp_echo_req(struct inff_if *ifp, u8 cmd_type, + u8 enable, u8 *ip_addr, u8 ip_ver, + u8 *mac_addr, u32 periodicity, u32 duration) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_icmp_echo_req_cmd *icmp_echo_req_cmd; + struct inff_icmp_echo_req_peer_config *icmp_echo_req_peer_config; + struct inff_icmp_echo_req_peer_ip *icmp_echo_req_peer_ip; + int ret = 0; + + memset(cfg->extra_buf, '\0', WL_EXTRA_BUF_MAX); + icmp_echo_req_cmd = (struct inff_icmp_echo_req_cmd *)cfg->extra_buf; + + icmp_echo_req_cmd->version = INFF_OFFLOAD_ICMP_ECHO_REQ_VER; + icmp_echo_req_cmd->cmd_type = cmd_type; + + switch (icmp_echo_req_cmd->cmd_type) { + case INFF_OFFLOAD_ICMP_ECHO_REQ_ENAB: + icmp_echo_req_cmd->data[0] = enable; + icmp_echo_req_cmd->length = sizeof(struct inff_icmp_echo_req_cmd) + + sizeof(u8); + break; + case INFF_OFFLOAD_ICMP_ECHO_REQ_ADD: + icmp_echo_req_peer_config = (struct inff_icmp_echo_req_peer_config *) + icmp_echo_req_cmd->data; + icmp_echo_req_cmd->length = sizeof(*icmp_echo_req_peer_config) + + sizeof(struct inff_icmp_echo_req_cmd); + icmp_echo_req_peer_config->version = INFF_OFFLOAD_ICMP_ECHO_REQ_VER; + icmp_echo_req_peer_config->ip_ver = ip_ver; + memcpy(icmp_echo_req_peer_config->u.ipv6.addr, ip_addr, + (icmp_echo_req_peer_config->ip_ver == INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6) ? + INFF_IPV6_ADDR_LEN : INFF_IPV4_ADDR_LEN); + memcpy(icmp_echo_req_peer_config->mac_addr, mac_addr, ETH_ALEN); + icmp_echo_req_peer_config->periodicity = periodicity; + icmp_echo_req_peer_config->duration = duration; + icmp_echo_req_peer_config->length = sizeof(struct inff_icmp_echo_req_peer_config); + break; + case INFF_OFFLOAD_ICMP_ECHO_REQ_DEL: + case INFF_OFFLOAD_ICMP_ECHO_REQ_START: + case INFF_OFFLOAD_ICMP_ECHO_REQ_STOP: + icmp_echo_req_peer_ip = (struct inff_icmp_echo_req_peer_ip *) + icmp_echo_req_cmd->data; + icmp_echo_req_cmd->length = sizeof(*icmp_echo_req_peer_ip) + + sizeof(struct inff_icmp_echo_req_cmd); + icmp_echo_req_peer_ip->version = INFF_OFFLOAD_ICMP_ECHO_REQ_VER; + icmp_echo_req_peer_ip->ip_ver = ip_ver; + memcpy(icmp_echo_req_peer_ip->u.ipv6.addr, ip_addr, + (icmp_echo_req_peer_ip->ip_ver == INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6) ? + INFF_IPV6_ADDR_LEN : INFF_IPV4_ADDR_LEN); + icmp_echo_req_peer_ip->length = sizeof(struct inff_icmp_echo_req_peer_ip); + break; + case INFF_OFFLOAD_ICMP_ECHO_REQ_INFO: + icmp_echo_req_peer_ip = (struct inff_icmp_echo_req_peer_ip *) + icmp_echo_req_cmd->data; + icmp_echo_req_cmd->length = sizeof(*icmp_echo_req_peer_ip) + + sizeof(struct inff_icmp_echo_req_cmd); + icmp_echo_req_peer_ip->version = INFF_OFFLOAD_ICMP_ECHO_REQ_VER; + icmp_echo_req_peer_ip->ip_ver = ip_ver; + if (ip_ver != INFF_OFFLOAD_ICMP_ECHO_REQ_IP_BOTH) { + memcpy(icmp_echo_req_peer_ip->u.ipv6.addr, ip_addr, + (icmp_echo_req_peer_ip->ip_ver == INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6) ? + INFF_IPV6_ADDR_LEN : INFF_IPV4_ADDR_LEN); + } + icmp_echo_req_peer_ip->length = sizeof(struct inff_icmp_echo_req_peer_ip); + break; + default: + inff_err("offload icmp_echo_req subcmd id %d not recognized", + icmp_echo_req_cmd->cmd_type); + return -EOPNOTSUPP; + } + + if (icmp_echo_req_cmd->cmd_type == INFF_OFFLOAD_ICMP_ECHO_REQ_INFO) + ret = inff_fil_iovar_data_get(ifp, "icmp_echo_req", cfg->extra_buf, + WL_EXTRA_BUF_MAX); + else + ret = inff_fil_iovar_data_set(ifp, "icmp_echo_req", (u8 *)icmp_echo_req_cmd, + icmp_echo_req_cmd->length); + + if (ret) + inff_err("Failed to get icmp_echo_req info: %d\n", ret); + + return ret; +} + +s32 +inff_notify_icmp_echo_req_event(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct inff_icmp_echo_req_event *echo_req_event; + + echo_req_event = (struct inff_icmp_echo_req_event *)data; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + inff_dbg(INFO, "icmp_echo_req_event reason = %d icmp_echo_req_event count = %d\n", + echo_req_event->reason, echo_req_event->echo_req_cnt); + + if (echo_req_event->ip_ver == INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6) { + inff_dbg(INFO, "icmp_echo_req_event IPv6 address = %pI6", + &echo_req_event->u.ipv6.addr); + } else if (echo_req_event->ip_ver == INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V4) { + inff_dbg(INFO, "icmp_echo_req_event IPv4 address = %pI4", + &echo_req_event->u.ipv6.addr); + } else { + inff_err("Invalid IP address\n"); + return -EINVAL; + } + + return inff_cfg80211_vndr_evt_icmp_echo_req(wiphy, &ifp->vif->wdev, data, + echo_req_event->length); +} diff --git a/drivers/net/wireless/infineon/inffmac/offload.h b/drivers/net/wireless/infineon/inffmac/offload.h new file mode 100644 index 000000000000..65b8711a99ed --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/offload.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_OFFLOAD_H +#define INFF_OFFLOAD_H + +#include "core.h" +#include "fwil_types.h" + +/* enum inff_offload_feats - Packet types to be offloaded to firmware for processing */ +enum inff_offload_feats { + INFF_OFFLOAD_ARP = BIT(0), + INFF_OFFLOAD_ND = BIT(1), + INFF_OFFLOAD_BDO = BIT(2), + INFF_OFFLOAD_ICMP = BIT(3), + INFF_OFFLOAD_TKO = BIT(4), + INFF_OFFLOAD_DLTRO = BIT(5), + INFF_OFFLOAD_PNO = BIT(6), + INFF_OFFLOAD_KEEPALIVE = BIT(7), + INFF_OFFLOAD_GTKOE = BIT(8), + INFF_OFFLOAD_WOWLPF = BIT(9) +}; + +enum inff_ol_cfg_id { + INFF_OFFLOAD_CFG_ID_PROF = 1, /* Offload Profile Update */ + INFF_OFFLOAD_CFG_ID_INET_V4, /* ADD/DEL IPv4 Address */ + INFF_OFFLOAD_CFG_ID_INET_V6, /* ADD/DEL IPv6 Address */ + INFF_OFFLOAD_CFG_ID_ACTIVATE, /* Activate/Deactivate Offload */ + /* Add new type before this line */ + INFF_OFFLOAD_CFG_ID_MAX /* Max Offload Config ID */ +}; + +enum inff_ol_prof_type { + INFF_OFFLOAD_PROF_TYPE_LOW_PWR = 1, /* Low Power Profile */ + INFF_OFFLOAD_PROF_TYPE_MID_PWR = 2, /* Mid Power Profile */ + INFF_OFFLOAD_PROF_TYPE_HIGH_PWR = 3, /* High Power Profile */ + /* Add new type before this line */ + INFF_OFFLOAD_PROF_TYPE_MAX /* Max Offload Profile */ +}; + +/* Offload profile configuration */ +struct inff_ol_cfg_v1 { + u16 ver; /* version of this structure */ + u16 len; /* length of structure in bytes */ + enum inff_ol_cfg_id id; /* Offload Config ID */ + + union { + struct { + enum inff_ol_prof_type type; /* offload profile type */ + bool reset; /* Remove profile configuration */ + u8 pad[3]; + } ol_profile; + struct { + struct ipv4_addr host_ipv4; + bool del; /* 1:del 0:add host ipv4 address */ + u8 pad[3]; + } ol_inet_v4; + struct { + struct ipv6_addr host_ipv6; + u8 type; /* 0:unicast 1:anycast */ + bool del; /* 1:del 0:add host ipv6 address */ + u8 pad[2]; + } ol_inet_v6; + struct { + bool enable; /* enable/disable offload feature */ + u8 pad[3]; + } ol_activate; + } u; + + u32 offload_skip; /* Bitmap of offload to be skipped */ +}; + +/* ARP Offload feature flags for arp_ol iovar */ +#define INFF_OFFLOAD_ARP_AGENT 0x00000001 +#define INFF_OFFLOAD_ARP_SNOOP 0x00000002 +#define INFF_OFFLOAD_ARP_HOST_AUTO_REPLY 0x00000004 +#define INFF_OFFLOAD_ARP_PEER_AUTO_REPLY 0x00000008 + +#define WL_MKEEP_ALIVE_VERSION 1 +#define WL_MKEEP_ALIVE_IMMEDIATE 0x80000000 + +struct inff_mkeep_alive { + u16 version; /* Version for mkeep_alive */ + u16 length; /* length of fixed parameters in the structure */ + u32 period_msec; /* high bit on means immediate send */ + u16 len_bytes; + u8 keep_alive_id; /* 0 - 3 for N = 4 */ + u8 data[]; +}; + +struct inff_tko { + u16 subcmd_id; /* subcommand id */ + u16 len; /* total length of data[] */ + u8 data[]; /* subcommand data */ +}; + +/* subcommand ids */ +#define WL_TKO_SUBCMD_PARAM 1 /* configure offload common parameters */ +#define WL_TKO_SUBCMD_ENABLE 3 /* enable/disable */ + +/* WL_TKO_SUBCMD_PARAM subcommand data */ +struct inff_tko_param { + u16 interval; /* keepalive tx interval (secs) */ + u16 retry_interval; /* keepalive retry interval (secs) */ + u16 retry_count; /* retry_count */ + s16 rst_delay; /* delay to delay a RST frame from reaching the host */ +}; + +struct inff_tko_enable { + u8 enable; /* 1 - enable, 0 - disable */ + u8 pad[3]; /* 4-byte struct alignment */ +}; + +#define INFF_OFFLOAD_ICMP_ECHO_REQ_VER 1 + +#define INFF_OFFLOAD_ICMP_ECHO_REQ_IP_BOTH 0 +#define INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V4 1 +#define INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6 2 + +/* ICMP Echo Request Sub commands */ +enum { + INFF_OFFLOAD_ICMP_ECHO_REQ_ENAB, + INFF_OFFLOAD_ICMP_ECHO_REQ_ADD, + INFF_OFFLOAD_ICMP_ECHO_REQ_DEL, + INFF_OFFLOAD_ICMP_ECHO_REQ_START, + INFF_OFFLOAD_ICMP_ECHO_REQ_STOP, + INFF_OFFLOAD_ICMP_ECHO_REQ_INFO +}; + +struct inff_icmp_echo_req_peer_ip { + u16 version; + u16 length; + u8 ip_ver; /* IP Version IPv4:1 IPv6:2 */ + u8 pad[3]; + union { + struct ipv4_addr ipv4; /* Peer IPV4 Address */ + struct ipv6_addr ipv6; /* Peer IPV6 Address */ + } u; +}; + +struct inff_icmp_echo_req_peer_config { + u16 version; + u16 length; + u8 ip_ver; /* IP Version IPv4:1 IPv6:2 */ + u8 pad[3]; + u32 periodicity; /* Periodicty of Ping in sec */ + u32 duration; /* Duration in sec */ + union { + struct ipv4_addr ipv4; /* Peer IPv4 Address */ + struct ipv6_addr ipv6; /* Peer IPv6 Address */ + } u; + u8 mac_addr[ETH_ALEN]; /* Peer Mac Address */ +}; + +/* ICMP Echo Req IOVAR Struct */ +struct inff_icmp_echo_req_cmd { + u16 version; + u16 length; + u8 cmd_type; /* ICMP Echo Req Cmd Type */ + u8 pad[3]; + u8 data[]; /* Data Pointing to Sub cmd structure */ +}; + +/* ICMP Echo Request IOVAR INFO Struct */ +struct inff_icmp_echo_req_get_peer_info { + u32 state; /* State of the Peer */ + struct inff_icmp_echo_req_peer_config config; /* Configuration of Peer */ +}; + +struct inff_icmp_echo_req_get_info { + u16 version; + u16 length; + u8 enable; /* Offload Enable */ + u8 count; /* Peer Count */ + u8 pad[2]; + u8 data[]; /* Data Pointing to get peer info structure */ +}; + +struct inff_icmp_echo_req_event { + u16 version; + u16 length; + u8 ip_ver; /* Peer IP Version IPv4:1 IPv6:2 */ + u8 reason; /* Event reason */ + u8 pad[2]; + u32 echo_req_cnt; /* ICMP Echo Req Count */ + union { + struct ipv4_addr ipv4; /* Peer IPV4 Address */ + struct ipv6_addr ipv6; /* Peer IPV6 Address */ + } u; +}; + +void inff_offload_config(struct inff_if *ifp, unsigned int ol_feat, + unsigned int ol_profile, bool reset); +void inff_offload_enable(struct inff_if *ifp, unsigned int ol_feat, + bool enable); +void inff_offload_host_ipv4_update(struct inff_if *ifp, unsigned int ol_feat, + u32 ipaddr, bool is_add); +int inff_offload_host_ipv6_update(struct inff_if *ifp, unsigned int ol_feat, + void *ptr, u8 type, bool is_add); +void inff_offload_configure_arp_nd(struct inff_if *ifp, bool enable); +int inff_offload_configure_mkeep_alive(struct inff_if *ifp, bool immediate, + long *param0, long *param1, long *param2, + int max_param_ct); +int inff_offload_configure_tko(struct inff_if *ifp, long tko_subcmd_id, long *param0, + long *param1, long *param2, long *param3); +int inff_offload_configure_icmp_echo_req(struct inff_if *ifp, u8 cmd_type, + u8 enable, u8 *ip_addr, u8 ip_ver, + u8 *mac_addr, u32 periodicity, u32 duration); +s32 inff_notify_icmp_echo_req_event(struct inff_if *ifp, const struct inff_event_msg *e, + void *data); + +#endif /* INFF_OFFLOAD_H */ -- 2.25.1 Driver implementation to collect the HE capabilities of the Device and then get it registered as part of the wiphy capabilities in the cfg80211 driver. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/he.c | 239 +++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/he.h | 58 +++++ 2 files changed, 297 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/he.c create mode 100644 drivers/net/wireless/infineon/inffmac/he.h diff --git a/drivers/net/wireless/infineon/inffmac/he.c b/drivers/net/wireless/infineon/inffmac/he.c new file mode 100644 index 000000000000..e1bb0665c136 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/he.c @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include "he.h" +#include "feature.h" +#include "fwil.h" +#include "cfg80211.h" +#include "debug.h" +#include "xtlv.h" + +struct ieee80211_sband_iftype_data sdata[NUM_NL80211_BANDS]; + +int +inff_he_get_enable(struct inff_if *ifp, u8 *param, int param_len) +{ + s32 ret = 0; + + ret = inff_fil_xtlv_data_get(ifp, "he", INFF_HE_CMD_ENAB, param, param_len); + if (unlikely(ret)) + iphy_err(ifp->drvr, "failed to check if HE is enabled"); + + return ret; +} + +int +inff_he_get_bss_color(struct inff_if *ifp, u8 *param, int param_len) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_xtlv *he_tlv; + int err = 0; + + he_tlv = (struct inff_xtlv *)param; + he_tlv->id = cpu_to_le16(INFF_HE_CMD_BSSCOLOR); + + err = inff_fil_iovar_data_get(ifp, "he", param, param_len); + if (err) + iphy_err(drvr, "get he bss_color error:%d\n", err); + + return err; +} + +int +inff_he_set_bss_color(struct inff_if *ifp, u8 color) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_xtlv *he_tlv; + u8 param[8] = {0}; + int err = 0; + + he_tlv = (struct inff_xtlv *)param; + he_tlv->id = cpu_to_le16(INFF_HE_CMD_BSSCOLOR); + he_tlv->len = cpu_to_le16(1); + memcpy(he_tlv->data, &color, sizeof(u8)); + + err = inff_fil_iovar_data_set(ifp, "he", param, sizeof(param)); + if (err) + iphy_err(drvr, "set he bss_color error:%d\n", err); + + return err; +} + +int +inff_he_get_muedca_opt(struct inff_if *ifp, u8 *param, int param_len) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_xtlv *he_tlv; + int err = 0; + + he_tlv = (struct inff_xtlv *)param; + he_tlv->id = cpu_to_le16(INFF_HE_CMD_MUEDCA_OPT); + + err = inff_fil_iovar_data_get(ifp, "he", param, param_len); + if (err) + iphy_err(drvr, "get he muedca_opt_enable error:%d\n", err); + + return err; +} + +int +inff_he_set_muedca_opt(struct inff_if *ifp, u8 val) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_xtlv *he_tlv; + u8 param[8] = {0}; + int err = 0; + + he_tlv = (struct inff_xtlv *)param; + he_tlv->id = cpu_to_le16(INFF_HE_CMD_BSSCOLOR); + he_tlv->len = cpu_to_le16(1); + he_tlv->data[0] = val; + + err = inff_fil_iovar_data_set(ifp, "he", param, sizeof(param)); + if (err) + iphy_err(drvr, "set he muedca_opt_enable error:%d\n", err); + + return err; +} + +int +inff_he_set_bitrate(struct inff_if *ifp, const struct cfg80211_bitrate_mask *mask, u8 band) +{ + struct inff_pub *drvr = ifp->drvr; + uint hegi; + u16 mcs_mask; + u32 rspec = 0; + s32 ret = 0; + u8 mcs = 0; + + mcs_mask = mask->control[band].he_mcs[0]; + mcs_mask = (mcs_mask ^ ((mcs_mask - 1) & mcs_mask)); + if (mcs_mask != mask->control[band].he_mcs[0]) + return ret; + + while (mcs_mask) { + mcs++; + mcs_mask >>= 1; + } + + rspec = WL_RSPEC_ENCODE_HE; /* 11ax HE */ + rspec |= (WL_RSPEC_HE_NSS_UNSPECIFIED << WL_RSPEC_HE_NSS_SHIFT) | (mcs - 1); + /* set the other rspec fields */ + hegi = mask->control[band].he_gi + 1; + rspec |= ((hegi != 0xFF) ? HE_GI_TO_RSPEC(hegi) : 0); + + switch (band) { + case NL80211_BAND_2GHZ: + ret = inff_fil_iovar_data_set(ifp, "2g_rate", (char *)&rspec, 4); + break; + case NL80211_BAND_5GHZ: + ret = inff_fil_iovar_data_set(ifp, "5g_rate", (char *)&rspec, 4); + break; + case NL80211_BAND_6GHZ: + ret = inff_fil_iovar_data_set(ifp, "6g_rate", (char *)&rspec, 4); + break; + default: + iphy_err(drvr, "Setting bitrate unsupported on band %d\n", band); + ret = -EOPNOTSUPP; + } + + if (unlikely(ret)) + iphy_err(drvr, "set rate failed, retcode = %d\n", ret); + + return ret; +} + +void +inff_he_update_wiphy_cap(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct wiphy *wiphy = drvr->wiphy; + struct ieee80211_supported_band *band; + struct ieee80211_sband_iftype_data *data; + struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_he_cap_elem *he_cap_elem; + struct ieee80211_he_mcs_nss_supp *he_mcs; + u8 mac_cap_info[HE_MAC_CAP_INFO_SIZE] = { 0 }; + u8 phy_cap_info[HE_PHY_CAP_INFO_SIZE] = { 0 }; + u16 capa = 0; + u8 hemode = 0; + int idx = 1, i = 0, j = 0; + + /* HE mode */ + inff_he_get_enable(ifp, &hemode, sizeof(hemode)); + if (!hemode) + return; + + inff_dbg(INFO, "HE Enabled\n"); + + /* HE MAC Capabilities Information */ + if (inff_fil_xtlv_data_get(ifp, "he", INFF_HE_CMD_MACCAP, mac_cap_info, + HE_MAC_CAP_INFO_SIZE)) + iphy_err(drvr, "HE MACCAP error\n"); + + /* HE PHY Capabilities Information */ + if (inff_fil_xtlv_data_get(ifp, "he", INFF_HE_CMD_PHYCAP, phy_cap_info, + HE_PHY_CAP_INFO_SIZE)) + iphy_err(drvr, "HE PHYCAP error\n"); + + /* Update HE Capab for each Band */ + for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { + band = wiphy->bands[i]; + if (!band) + continue; + + data = &sdata[band->band]; + he_cap = &data->he_cap; + he_cap_elem = &he_cap->he_cap_elem; + he_mcs = &he_cap->he_mcs_nss_supp; + + switch (band->band) { + case NL80211_BAND_6GHZ: + if (!inff_feat_is_6ghz_enabled(ifp)) + break; + + /* HE 6 GHz band capabilities */ + capa = (FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START, + IEEE80211_HT_MPDU_DENSITY_8) | + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, + IEEE80211_VHT_MAX_AMPDU_1024K) | + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454)); + data->he_6ghz_capa.capa = cpu_to_le16(capa); + + /* Band 6GHz supports HE, so */ + fallthrough; + + case NL80211_BAND_5GHZ: + /* Band 5GHz supports HE, so */ + fallthrough; + + case NL80211_BAND_2GHZ: + /* Band 2GHz supports HE */ + data->types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); + data->he_cap.has_he = true; + + for (j = 0; j < HE_MAC_CAP_INFO_SIZE; j++) + he_cap_elem->mac_cap_info[j] = le32_to_cpu(mac_cap_info[j]); + + for (j = 0; j < HE_PHY_CAP_INFO_SIZE; j++) + he_cap_elem->phy_cap_info[j] = le32_to_cpu(phy_cap_info[j]); + + /* HE Supported MCS and NSS Set */ + he_mcs->rx_mcs_80 = cpu_to_le16(0xfffa); + he_mcs->tx_mcs_80 = cpu_to_le16(0xfffa); + + band->n_iftype_data = idx; + band->iftype_data = data; + + break; + + default: + break; + } + } +} diff --git a/drivers/net/wireless/infineon/inffmac/he.h b/drivers/net/wireless/infineon/inffmac/he.h new file mode 100644 index 000000000000..4b5feb36b4e0 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/he.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_HE_H +#define INFF_HE_H + +#include "core.h" + +#define HE_MAC_CAP_INFO_SIZE 6 +#define HE_PHY_CAP_INFO_SIZE 11 + +/** + * enum inff_he_cmd - HE iovar subcmds handled by firmware HE module + */ +enum inff_he_cmd { + INFF_HE_CMD_ENAB = 0, + INFF_HE_CMD_FEATURES = 1, + INFF_HE_CMD_TWT_SETUP = 2, + INFF_HE_CMD_TWT_TEARDOWN = 3, + INFF_HE_CMD_TWT_INFO = 4, + INFF_HE_CMD_BSSCOLOR = 5, + INFF_HE_CMD_PARTIAL_BSSCOLOR = 6, + INFF_HE_CMD_CAP = 7, + INFF_HE_CMD_STAID = 8, + INFF_HE_CMD_RTSDURTHRESH = 10, + INFF_HE_CMD_PEDURATION = 11, + INFF_HE_CMD_TESTBED_MODE = 12, + INFF_HE_CMD_OMI = 13, + INFF_HE_CMD_MAC_PAD_DUR = 14, + INFF_HE_CMD_MUEDCA = 15, + INFF_HE_CMD_MACCAP = 16, + INFF_HE_CMD_PHYCAP = 17, + INFF_HE_CMD_DISPLAY = 18, + INFF_HE_CMD_ACTION = 19, + INFF_HE_CMD_OFDMATX = 20, + INFF_HE_CMD_20IN80_MODE = 21, + INFF_HE_CMD_SMPS = 22, + INFF_HE_CMD_PPETHRESH = 23, + INFF_HE_CMD_HTC_OMI_EN = 24, + INFF_HE_CMD_ERSU_EN = 25, + INFF_HE_CMD_PREPUNCRX_EN = 26, + INFF_HE_CMD_MIMOCAP_EN = 27, + INFF_HE_CMD_MUEDCA_OPT = 28, + INFF_HE_CMD_LAST +}; + +int inff_he_get_enable(struct inff_if *ifp, u8 *param, int param_len); +int inff_he_get_bss_color(struct inff_if *ifp, u8 *param, int param_len); +int inff_he_set_bss_color(struct inff_if *ifp, u8 color); +int inff_he_get_muedca_opt(struct inff_if *ifp, u8 *param, int param_len); +int inff_he_set_muedca_opt(struct inff_if *ifp, u8 val); +int inff_he_set_bitrate(struct inff_if *ifp, const struct cfg80211_bitrate_mask *mask, u8 band); +void inff_he_update_wiphy_cap(struct inff_if *ifp); + +#endif /* INFF_HE_H */ -- 2.25.1 Driver implementation for initiating and teardown an Individual Target Wake Time (iTWT) session with supported TWT Responder Device. The requests from the userspace for the sessions are handled by the driver through Infineon's Vendor NL80211 commands. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/twt.c | 1200 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/twt.h | 334 ++++++ 2 files changed, 1534 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/twt.c create mode 100644 drivers/net/wireless/infineon/inffmac/twt.h diff --git a/drivers/net/wireless/infineon/inffmac/twt.c b/drivers/net/wireless/infineon/inffmac/twt.c new file mode 100644 index 000000000000..0e5d598a6765 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/twt.c @@ -0,0 +1,1200 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2023-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "twt.h" +#include "debug.h" +#include "fwil.h" +#include "feature.h" +#include "bus.h" +#include "cfg80211.h" + +/** + * inff_twt_oper_str - array of twt operations in string + */ +const char *inff_twt_oper_str[INFF_TWT_OPER_MAX] = { + "Setup", + "Teardown" +}; + +/** + * inff_twt_nego_type_str - array of twt Negotiation types in string + */ +const char *inff_twt_nego_type_str[INFF_TWT_PARAM_NEGO_TYPE_MAX] = { + "iTWT", + "Wake TBTT", + "bTWT IE BCN", + "bTWT" +}; + +/** + * inff_twt_setup_cmd_str - array of twt setup commands in string + */ +const char *inff_twt_setup_cmd_str[INFF_TWT_OPER_SETUP_CMD_TYPE_MAX] = { + "Request", + "Suggest", + "Demand", + "Grouping", + "Accept", + "Alternate", + "Dictate", + "Reject" +}; + +/** + * inff_twt_sess_state_str - array of twt session states in string + */ +const char *inff_twt_sess_state_str[INFF_TWT_SESS_STATE_MAX] = { + "Unspec", + "Setup inprogress", + "Setup incomplete", + "Setup complete", + "Teardown inprogress", + "Teardown incomplete", + "Teardown complete" +}; + +/** + * inff_twt_wake_dur_to_min_twt() - Nominal Minimum Wake Duration derivation from Wake Duration + * + * @wake_dur: Wake Duration input. + * @min_twt_unit: Nomial Minimum Wake Duration Unit input. + * + * return: Nominal Minimum Wake Duration in units of min_twt_unit. + */ +static inline u8 +inff_twt_wake_dur_to_min_twt(u32 wake_dur, u8 min_twt_unit) +{ + u8 min_twt; + + if (min_twt_unit) { + /* + * If min_twt_unit is 1, then min_twt is + * in units of TUs (i.e) 1024 uS. + */ + min_twt = wake_dur / WAKE_DUR_UNIT_TU; + } else { + /* + * If min_twt_unit is 0, then min_twt is + * in units of 256 uS. + */ + min_twt = wake_dur / WAKE_DUR_UNIT_DEF; + } + + return min_twt; +} + +/** + * inff_twt_min_twt_to_wake_dur() - Derive Wake Duration from the + * Nominal Minimum Wake Duration + * + * @min_twt: Nominal Minimum Wake Duration input. + * @min_twt_unit: Nomial Minimum Wake Duration Unit input. + * 0 - 256 uS + * 1 - 1TU (or) 1024 uS + * + * return: Wake Duration in unit of microseconds. + */ +static inline u32 +inff_twt_min_twt_to_wake_dur(u8 min_twt, u8 min_twt_unit) +{ + u32 wake_dur; + + if (min_twt_unit) { + /* + * If min_twt_unit is 1, then min_twt is + * in units of TUs (i.e) 1024 uS. + */ + wake_dur = (u32)min_twt * WAKE_DUR_UNIT_TU; + } else { + /* + * If min_twt_unit is 0, then min_twt is + * in units of 256 uS. + */ + wake_dur = (u32)min_twt * WAKE_DUR_UNIT_DEF; + } + + return wake_dur; +} + +/** + * inff_twt_u32_to_float() - Derive Wake Interval Mantissa and Exponent + * from the Wake Interval + * + * @wake_int: Wake Interval input in microseconds. + * @exponent: pointer to Wake Interval Exponent output. + * @mantissa: pointer to Wake Interval Mantissa output. + */ +static inline void +inff_twt_u32_to_float(u32 wake_int, u8 *exponent, u16 *mantissa) +{ + u8 lzs = (u8)__builtin_clz(wake_int); /* leading 0's */ + u8 shift = lzs < 16 ? 16 - lzs : 0; + + *mantissa = (u16)(wake_int >> shift); + *exponent = shift; +} + +/** + * inff_twt_float_to_u32() - Derive Wake Interval derivation from + * Wake Interval Mantissa & Exponent. + * + * @exponent: Wake Interval Exponent input. + * @mantissa: Wake Interval Mantissa input. + * + * return: Wake interval in unit of microseconds. + */ +static inline u32 +inff_twt_float_to_u32(u8 exponent, u16 mantissa) +{ + return (u32)mantissa << exponent; +} + +/** + * inff_twt_get_next_dialog_token() - Return the next available Dialog token. + * + * return: Dialog token in u8. + */ +static inline u8 +inff_twt_get_next_dialog_token(void) +{ + static u8 dialog_token; + + /* Continuous iteratation in the range 1-255 */ + dialog_token = ((dialog_token + 0x1) % 0x100) ? : 1; + + return dialog_token; +} + +/** + * inff_twt_stats_read() - Read the contents of the debugfs file "twt_stats". + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + * + * return: 0. + */ +static int +inff_twt_stats_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_pub *drvr = bus_if->drvr; + int i; + + /* Return if the if TWT is not supported by Firmware */ + if (!(drvr->feat_flags[INFF_FEAT_TWT / 8] & BIT(INFF_FEAT_TWT % 8))) + return 0; + + /* Iterate the interface list in struct inff_pub */ + for (i = 0; i < INFF_MAX_IFS; i++) { + struct inff_if *ifp = drvr->iflist[i]; + struct inff_twt_session *twt_sess; + + /* Skip interface if TWT session list in struct inff_if is empty */ + if (!ifp || list_empty(&ifp->twt_sess_list)) + continue; + + seq_printf(seq, "ifname: %s, ifidx: %u, bsscfgidx: %d\n", + inff_ifname(ifp), ifp->ifidx, ifp->bsscfgidx); + + /* Iterate the TWT session list in struct inff_if */ + list_for_each_entry(twt_sess, &ifp->twt_sess_list, list) { + struct inff_twt_params *twt_params; + u32 wake_dur, wake_int; + + twt_params = &twt_sess->twt_params; + + wake_dur = inff_twt_min_twt_to_wake_dur(twt_params->min_twt, + twt_params->min_twt_unit); + wake_int = inff_twt_float_to_u32(twt_params->exponent, + twt_params->mantissa); + + if (twt_params->negotiation_type == INFF_TWT_PARAM_NEGO_TYPE_ITWT) + seq_printf(seq, "\tiTWT, Flow ID: %u, Dialog Token: %u\n", + twt_params->flow_id, twt_params->dialog_token); + else if (twt_params->negotiation_type == INFF_TWT_PARAM_NEGO_TYPE_BTWT) + seq_printf(seq, "\tbTWT, Bcast TWT ID: %u, Dialog Token: %u\n", + twt_params->bcast_twt_id, twt_params->dialog_token); + else + continue; + + seq_printf(seq, "\t\tSession state : %s\n", + inff_twt_sess_state_str[twt_sess->state]); + seq_printf(seq, "\t\tTWT peer : %pM\n", + twt_sess->peer_addr.octet); + seq_printf(seq, "\t\tTarget Wake Time : %llu uS\n", + twt_params->twt); + seq_printf(seq, "\t\tWake Duration : %u uS\n", + wake_dur); + seq_printf(seq, "\t\tWake Interval : %u uS\n", + wake_int); + seq_printf(seq, "\t\tSession type : %s, %s, %s\n\n", + twt_params->implicit ? "Implicit" : "Explicit", + twt_params->trigger ? "Trigger based" : "Non-Trigger based", + twt_params->flow_type ? "Un-Announced" : "Announced"); + } + } + return 0; +} + +/** + * inff_twt_debugfs_create() - create debugfs entries. + * + * @drvr: driver instance. + */ +void +inff_twt_debugfs_create(struct inff_pub *drvr) +{ + inff_debugfs_add_entry(drvr, "twt_stats", inff_twt_stats_read); +} + +/** + * inff_twt_cleanup_all_sess - Cleanup all TWT sessions from the driver list. + * + * @ifp: interface instatnce. + * + * return: 0 on success, value < 0 on failure. + */ +s32 +inff_twt_cleanup_all_sess(struct inff_if *ifp) +{ + struct inff_twt_session *entry = NULL, *next = NULL; + s32 ret = 0; + + if (!ifp) { + inff_err("TWT: Failed to cleanup sessions"); + ret = -EIO; + } + + spin_lock(&ifp->twt_sess_list_lock); + + list_for_each_entry_safe(entry, next, &ifp->twt_sess_list, list) { + inff_dbg(TWT, "TWT: Deleting session(%u) with peer %pM", + entry->twt_params.flow_id, entry->peer_addr.octet); + list_del(&entry->list); + kfree(entry); + } + + spin_unlock(&ifp->twt_sess_list_lock); + + return ret; +} + +/** + * inff_twt_lookup_sess_by_dialog_token() - Lookup a TWT sesssion information from + * the driver list based on the Dialog Token. + * + * @ifp: interface instance + * @dialog_token: TWT session Dialog Token + * + * return: Pointer to a TWT session instance if lookup is successful, NULL on failure. + */ +static struct inff_twt_session * +inff_twt_lookup_sess_by_dialog_token(struct inff_if *ifp, u8 dialog_token) +{ + struct inff_twt_session *iter = NULL; + + if (list_empty(&ifp->twt_sess_list)) + return NULL; + + list_for_each_entry(iter, &ifp->twt_sess_list, list) + if (iter->twt_params.dialog_token == dialog_token) + return iter; + + return NULL; +} + +/** + * inff_itwt_lookup_sess_by_flowid() - Lookup an iTWT sesssion information from + * the driver list based on the Flow ID. + * + * @ifp: interface instance + * @flow_id: iTWT session Flow ID + * + * return: Pointer to a TWT session instance if lookup is successful, NULL on failure. + */ +static struct inff_twt_session * +inff_itwt_lookup_sess_by_flowid(struct inff_if *ifp, u8 flow_id) +{ + struct inff_twt_session *iter = NULL; + + if (list_empty(&ifp->twt_sess_list)) + return NULL; + + list_for_each_entry(iter, &ifp->twt_sess_list, list) { + if (iter->twt_params.negotiation_type != INFF_TWT_PARAM_NEGO_TYPE_ITWT) + continue; + + if (iter->twt_params.flow_id == flow_id) + return iter; + } + + return NULL; +} + +/** + * inff_twt_update_sess_state() - Update the state of the TWT Session in the driver list + * + * @ifp: interface instance. + * @twt_sess: TWT session to be updated. + * @state: TWT session state, Refer enum inff_twt_session_state. + * @err_msg: print this message if error happened. + * @flow_id: print flow id if error happened. + * + * return: 0 on successful updation, value < 0 on failure. + */ +static s32 +inff_twt_update_sess_state(struct inff_if *ifp, struct inff_twt_session *twt_sess, + enum inff_twt_session_state state, u8 *err_msg, u8 flow_id) +{ + s32 ret = 0; + + if (!twt_sess) { + inff_err("TWT: %s: Failed to update session(%u) with state(%s)", + err_msg, flow_id, + inff_twt_sess_state_str[state]); + ret = -EINVAL; + goto exit; + } + + spin_lock(&ifp->twt_sess_list_lock); + + twt_sess->state = state; + inff_dbg(TWT, "TWT: updated session(%u) with peer %pM, state(%s)", + twt_sess->twt_params.flow_id, twt_sess->peer_addr.octet, + inff_twt_sess_state_str[twt_sess->state]); + + spin_unlock(&ifp->twt_sess_list_lock); +exit: + return ret; +} + +/** + * inff_twt_update_sess() - Update TWT session info in the driver list. + * + * @ifp: interface instance. + * @twt_sess: TWT session to be updated. + * @peer_addr: TWT peer address. + * @state: TWT session state, Refer enum inff_twt_session_state. + * @twt_params: TWT session parameters. + * + * return: 0 on successful updation, value < 0 on failure. + */ +static s32 +inff_twt_update_sess(struct inff_if *ifp, struct inff_twt_session *twt_sess, + const u8 *peer_addr, enum inff_twt_session_state state, + struct inff_twt_params *twt_params) +{ + s32 ret = 0; + + if (!twt_sess) { + inff_dbg(TWT, "TWT: session is not available to update"); + ret = -EINVAL; + goto exit; + } + + spin_lock(&ifp->twt_sess_list_lock); + + memcpy(twt_sess->peer_addr.octet, peer_addr, ETH_ALEN); + twt_sess->state = state; + memcpy(&twt_sess->twt_params, twt_params, + sizeof(struct inff_twt_params)); + + inff_dbg(TWT, "TWT: updated session(%u) with peer %pM, state(%s)", + twt_sess->twt_params.flow_id, + twt_sess->peer_addr.octet, + inff_twt_sess_state_str[twt_sess->state]); + + spin_unlock(&ifp->twt_sess_list_lock); +exit: + return ret; +} + +/** + * inff_twt_del_sess() - Delete a TWT sesssion info from the driver list. + * + * @ifp: interface instance. + * @twt_sess: TWT session to be deleted. + * + * return: 0 on successful deletion, value < 0 on failure. + */ +static s32 +inff_twt_del_sess(struct inff_if *ifp, struct inff_twt_session *twt_sess) +{ + s32 ret = 0; + u8 flow_id; + u8 peer_addr[ETH_ALEN]; + + if (!twt_sess) { + inff_dbg(TWT, "TWT: session is not available to delete"); + ret = -EINVAL; + goto exit; + } + + spin_lock(&ifp->twt_sess_list_lock); + + flow_id = twt_sess->twt_params.flow_id; + memcpy(peer_addr, twt_sess->peer_addr.octet, ETH_ALEN); + + list_del(&twt_sess->list); + kfree(twt_sess); + + inff_dbg(TWT, "TWT: Deleted session(%u) with peer %pM", + flow_id, peer_addr); + + spin_unlock(&ifp->twt_sess_list_lock); +exit: + return ret; +} + +/** + * inff_twt_add_sess() - Add a TWT session info to the driver list. + * + * @ifp: interface instance. + * @peer_addr: TWT peer address. + * @state: TWT session state, Refer enum inff_twt_session_state. + * @twt_params: TWT session parameters. + * + * return: 0 on successful addition, value < 0 on failure. + */ +static s32 +inff_twt_add_sess(struct inff_if *ifp, const u8 *peer_addr, + enum inff_twt_session_state state, + struct inff_twt_params *twt_params) +{ + struct inff_twt_session *new_twt_sess; + s32 ret = 0; + + new_twt_sess = kzalloc(sizeof(*new_twt_sess), GFP_ATOMIC); + if (!new_twt_sess) { + ret = -ENOMEM; + goto exit; + } + + new_twt_sess->ifidx = ifp->ifidx; + new_twt_sess->bsscfgidx = ifp->bsscfgidx; + new_twt_sess->state = state; + + memcpy(new_twt_sess->peer_addr.octet, peer_addr, ETH_ALEN); + memcpy(&new_twt_sess->twt_params, twt_params, + sizeof(struct inff_twt_params)); + + spin_lock(&ifp->twt_sess_list_lock); + + list_add_tail(&new_twt_sess->list, &ifp->twt_sess_list); + inff_dbg(TWT, "TWT: Added session(%u) with peer %pM, state (%s)", + new_twt_sess->twt_params.flow_id, + new_twt_sess->peer_addr.octet, + inff_twt_sess_state_str[new_twt_sess->state]); + + spin_unlock(&ifp->twt_sess_list_lock); +exit: + return ret; +} + +/** + * inff_twt_event_timeout_handler - Iterate the session list and handle stale + * TWT session entries which are failed to move to next state in FSM. + * + * @t: timer instance. + */ +void inff_twt_event_timeout_handler(struct timer_list *t) +{ + struct inff_if *ifp = timer_container_of(ifp, t, twt_evt_timeout); + struct inff_twt_session *twt_sess = NULL, *next = NULL; + unsigned long curr_ts = jiffies; + s32 ret = 0; + + list_for_each_entry_safe(twt_sess, next, &ifp->twt_sess_list, list) { + /* For this session entry, Skip if the time since the TWT cmd sent to the + * Firmware does not exceed the Event timeout configured. + */ + if (time_after(twt_sess->oper_start_ts + INFF_TWT_EVENT_TIMEOUT, curr_ts)) + continue; + + switch (twt_sess->state) { + case INFF_TWT_SESS_STATE_SETUP_INPROGRESS: + ret = inff_twt_update_sess_state(ifp, twt_sess, + INFF_TWT_SESS_STATE_SETUP_INCOMPLETE, + "Setup TIMEOUT", + twt_sess->twt_params.flow_id); + if (ret) + continue; + + break; + case INFF_TWT_SESS_STATE_TEARDOWN_INPROGRESS: + ret = inff_twt_update_sess_state(ifp, twt_sess, + INFF_TWT_SESS_STATE_TEARDOWN_INCOMPLETE, + "Teardown TIMEOUT", + twt_sess->twt_params.flow_id); + if (ret) + continue; + + break; + default: + continue; + } + + ret = inff_twt_del_sess(ifp, twt_sess); + if (ret) { + inff_err("TWT: Failed to Delete session(%u) from list", + twt_sess->twt_params.flow_id); + break; + } + + inff_dbg(TWT, "TWT: Cleared stale session(%u) with peer %pM, state(%s)", + twt_sess->twt_params.flow_id, twt_sess->peer_addr.octet, + inff_twt_sess_state_str[twt_sess->state]); + } +} + +/** + * inff_twt_setup_event_handler() - Handle the TWT Setup Event notification from Firmware. + * + * @ifp: interface instatnce. + * @e: event message. + * @data: payload of message, contains TWT session data. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_twt_setup_event_handler(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_twt_setup_event *setup_event; + struct inff_twt_sdesc *setup_desc; + struct inff_twt_session *twt_sess = NULL; + struct inff_twt_params twt_params; + bool unsolicited_setup = false; + s32 ret = 0; + + setup_event = (struct inff_twt_setup_event *)data; + setup_desc = (struct inff_twt_sdesc *) + (data + sizeof(struct inff_twt_setup_event)); + + /* TWT Negotiation_type */ + twt_params.negotiation_type = setup_desc->negotiation_type; + + /* Dialog Token */ + twt_params.dialog_token = setup_event->dialog; + + switch (twt_params.negotiation_type) { + case INFF_TWT_PARAM_NEGO_TYPE_ITWT: + /* Flow ID */ + twt_params.flow_id = setup_desc->flow_id; + + /* Lookup the session list for the flow ID in the Setup Response */ + twt_sess = inff_itwt_lookup_sess_by_flowid(ifp, twt_params.flow_id); + if (!twt_sess) + twt_sess = inff_twt_lookup_sess_by_dialog_token(ifp, + twt_params.dialog_token); + + /* If this device requested for session setup, a session entry with + * state(setup inprogess) would be already available, else this is an + * Unsolicited Setup Response from the peer TWT device. + */ + if (!twt_sess || twt_sess->state != INFF_TWT_SESS_STATE_SETUP_INPROGRESS) + unsolicited_setup = true; + + break; + case INFF_TWT_PARAM_NEGO_TYPE_BTWT: + /* Broadcast TWT ID */ + twt_params.bcast_twt_id = setup_desc->bid; + + /* TODO: Handle the Broadcast TWT Setup Event */ + fallthrough; + default: + inff_err("TWT: Setup EVENT: Negotiation Type(%s) not handled", + inff_twt_nego_type_str[twt_params.negotiation_type]); + ret = -EOPNOTSUPP; + goto exit; + } + + /* Setup Event */ + twt_params.setup_cmd = setup_desc->setup_cmd; + + /* Flowflags */ + twt_params.implicit = (setup_desc->flow_flags & INFF_TWT_FLOW_FLAG_IMPLICIT) ? 1 : 0; + twt_params.flow_type = (setup_desc->flow_flags & INFF_TWT_FLOW_FLAG_UNANNOUNCED) ? 1 : 0; + twt_params.trigger = (setup_desc->flow_flags & INFF_TWT_FLOW_FLAG_TRIGGER) ? 1 : 0; + twt_params.requestor = (setup_desc->flow_flags & INFF_TWT_FLOW_FLAG_REQUEST) ? 1 : 0; + twt_params.protection = (setup_desc->flow_flags & INFF_TWT_FLOW_FLAG_PROTECT) ? 1 : 0; + + /* Target Wake Time */ + twt_params.twt = le64_to_cpu((u64)setup_desc->wake_time_h << 32) | + le64_to_cpu((u64)setup_desc->wake_time_l); + + /* Wake Duration or Service Period */ + twt_params.min_twt_unit = 0; + twt_params.min_twt = + inff_twt_wake_dur_to_min_twt(le32_to_cpu(setup_desc->wake_dur), + twt_params.min_twt_unit); + + /* Wake Interval or Service Interval */ + inff_twt_u32_to_float(le32_to_cpu(setup_desc->wake_int), + &twt_params.exponent, &twt_params.mantissa); + + inff_dbg(TWT, "TWT: Setup EVENT: %sResponse with cmd(%s) from peer %pM", + unsolicited_setup ? "Un-Solicited " : "", + inff_twt_setup_cmd_str[setup_desc->setup_cmd], e->addr); + + switch (setup_desc->setup_cmd) { + case TWT_SETUP_CMD_REQUEST: + fallthrough; + case TWT_SETUP_CMD_SUGGEST: + fallthrough; + case TWT_SETUP_CMD_DEMAND: + fallthrough; + case TWT_SETUP_CMD_GROUPING: + ret = -EOPNOTSUPP; + goto exit; + case TWT_SETUP_CMD_ACCEPT: + if (!twt_sess) + ret = inff_twt_add_sess(ifp, e->addr, + INFF_TWT_SESS_STATE_SETUP_COMPLETE, + &twt_params); + else + ret = inff_twt_update_sess(ifp, twt_sess, e->addr, + INFF_TWT_SESS_STATE_SETUP_COMPLETE, + &twt_params); + break; + case TWT_SETUP_CMD_ALTERNATE: + fallthrough; + case TWT_SETUP_CMD_DICTATE: + ret = -EOPNOTSUPP; + goto exit; + case TWT_SETUP_CMD_REJECT: + if (!twt_sess) + /* Bail out, since nothing to handle on receiving Un-Solicited + * Reject from the TWT peer for an un-available TWT session. + */ + break; + + ret = inff_twt_update_sess_state(ifp, twt_sess, + INFF_TWT_SESS_STATE_SETUP_INCOMPLETE, + "Setup EVENT", twt_params.flow_id); + if (ret) + goto exit; + + ret = inff_twt_del_sess(ifp, twt_sess); + + break; + default: + ret = -EOPNOTSUPP; + goto exit; + } + + if (ret) { + inff_err("TWT: Setup EVENT: Failed to add/update/del session(%u) with peer %pM", + twt_params.flow_id, e->addr); + goto exit; + } + + inff_dbg(TWT, "TWT: Setup EVENT: Session %s\n" + "Dialog Token : %u\n" + "Setup command : %s\n" + "Flow flags : 0x %02x\n" + "Flow ID : %u\n" + "Broadcast TWT ID : %u\n" + "Wake Time H,L : 0x %08x %08x\n" + "Wake Type : %u\n" + "Wake Duration : %u uS\n" + "Wake Interval : %u uS\n" + "Negotiation type : %s\n", + inff_twt_sess_state_str[twt_sess->state], setup_event->dialog, + inff_twt_setup_cmd_str[setup_desc->setup_cmd], setup_desc->flow_flags, + setup_desc->flow_id, setup_desc->bid, setup_desc->wake_time_h, + setup_desc->wake_time_l, setup_desc->wake_type, setup_desc->wake_dur, + setup_desc->wake_int, inff_twt_nego_type_str[setup_desc->negotiation_type]); +exit: + return ret; +} + +/** + * inff_twt_teardown_event_handler() - Handle the TWT Teardown Event notification from Firmware. + * + * @ifp: interface instatnce. + * @e: event message. + * @data: payload of message, contains TWT session data. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_twt_teardown_event_handler(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_twt_teardown_event *teardown_event; + struct inff_twt_teardesc *teardown_desc; + struct inff_twt_session *twt_sess = NULL; + struct inff_twt_params twt_params; + bool unsolicited_teardown = false; + s32 ret = 0; + + teardown_event = (struct inff_twt_teardown_event *)data; + teardown_desc = (struct inff_twt_teardesc *) + (data + sizeof(struct inff_twt_teardown_event)); + + /* TWT Negotiation_type */ + twt_params.negotiation_type = teardown_desc->negotiation_type; + + /* Teardown all Negotiated TWT */ + twt_params.teardown_all_twt = teardown_desc->alltwt; + if (twt_params.teardown_all_twt) { + ret = inff_twt_cleanup_all_sess(ifp); + goto exit; + } + + switch (twt_params.negotiation_type) { + case INFF_TWT_PARAM_NEGO_TYPE_ITWT: + /* Flow ID */ + twt_params.flow_id = teardown_desc->flow_id; + + /* Lookup the session list for the received flow ID */ + twt_sess = inff_itwt_lookup_sess_by_flowid(ifp, twt_params.flow_id); + + /* If this device requested for session Teardown, a session entry with + * state(setup inprogess) would be already available, else this is an + * Unsolicited Teardown Response from the peer TWT device. + */ + if (!twt_sess || twt_sess->state != INFF_TWT_SESS_STATE_SETUP_INPROGRESS) + unsolicited_teardown = true; + + break; + case INFF_TWT_PARAM_NEGO_TYPE_BTWT: + /* Broadcast TWT ID */ + twt_params.bcast_twt_id = teardown_desc->bid; + + /* TODO: Handle the Broadcast TWT Teardown Event */ + fallthrough; + default: + inff_err("TWT: Teardown EVENT: Negotiation Type(%s) not handled\n", + inff_twt_nego_type_str[twt_params.negotiation_type]); + ret = -EOPNOTSUPP; + goto exit; + } + + inff_dbg(TWT, "TWT: Teardown EVENT: %sResponse from peer %pM", + unsolicited_teardown ? "Un-Solicited " : "", e->addr); + + if (!twt_sess) { + inff_dbg(TWT, "TWT: Teardown EVENT: Un-available session(%u) for deletion", + twt_params.flow_id); + ret = -EINVAL; + goto exit; + } + + ret = inff_twt_update_sess_state(ifp, twt_sess, + INFF_TWT_SESS_STATE_TEARDOWN_COMPLETE, + "Teardown EVENT", twt_params.flow_id); + if (ret) + goto exit; + + ret = inff_twt_del_sess(ifp, twt_sess); + if (ret) { + inff_err("TWT: Teardown EVENT: Failed to Delete session from list"); + goto exit; + } + + inff_dbg(TWT, "TWT: Teardown EVENT: Session %s\n" + "Flow ID : %u\n" + "Broadcast TWT ID : %u\n" + "Negotiation type : %s\n" + "Teardown all TWT : %u\n", + inff_twt_sess_state_str[twt_sess->state], teardown_desc->flow_id, + teardown_desc->bid, inff_twt_nego_type_str[teardown_desc->negotiation_type], + teardown_desc->alltwt); +exit: + return ret; +} + +/** + * inff_notify_twt_event() - Handle the TWT Event notifications from Firmware. + * + * @ifp: interface instatnce. + * @e: event message. + * @data: payload of message, contains TWT session data. + * + * return: 0 on success, value < 0 on failure. + */ +s32 +inff_notify_twt_event(struct inff_if *ifp, const struct inff_event_msg *e, void *data) +{ + s32 ret; + + if (!ifp) { + ret = -EIO; + goto exit; + } + + switch (e->event_code) { + case INFF_E_TWT_SETUP: + ret = inff_twt_setup_event_handler(ifp, e, data); + if (ret) { + inff_err("TWT: EVENT: Failed to handle TWT Setup event"); + goto exit; + } + break; + case INFF_E_TWT_TEARDOWN: + ret = inff_twt_teardown_event_handler(ifp, e, data); + if (ret) { + inff_err("TWT: EVENT: Failed to handle TWT Teardown event"); + goto exit; + } + break; + default: + inff_err("TWT: EVENT: Received event %d not handeled", e->event_code); + ret = -EOPNOTSUPP; + goto exit; + } + +exit: + return ret; +} + +/** + * inff_twt_setup_oper_handler() - Handle the TWT Setup Operation request from Userspace. + * + * @ifp: interface instance. + * @twt_params: TWT session parameters. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_twt_setup_oper_handler(struct inff_if *ifp, struct inff_twt_params twt_params) +{ + struct inff_cfg80211_vif *vif = ifp->vif; + struct inff_twt_setup_oper val; + struct inff_twt_session *twt_sess = NULL; + s32 ret; + + memset(&val, 0, sizeof(val)); + val.version = INFF_TWT_SETUP_VER; + val.length = sizeof(val.version) + sizeof(val.length); + + /* Default values, Override Below */ + val.sdesc.flow_flags = 0x0; + val.sdesc.wake_dur = 0xFFFFFFFF; + val.sdesc.wake_int = 0xFFFFFFFF; + val.sdesc.wake_int_max = 0xFFFFFFFF; + + /* TWT Negotiation_type */ + val.sdesc.negotiation_type = (u8)twt_params.negotiation_type; + + switch (val.sdesc.negotiation_type) { + case INFF_TWT_PARAM_NEGO_TYPE_ITWT: + /* Flow ID */ + if ((twt_params.flow_id >= 0x0 && twt_params.flow_id <= 0x7)) { + /* Lookup the session list for the requested flow ID */ + val.sdesc.flow_id = twt_params.flow_id; + twt_sess = inff_itwt_lookup_sess_by_flowid(ifp, twt_params.flow_id); + if (twt_sess) { + inff_err("TWT: Setup REQ: Skipping"); + inff_err(" session(%u) entry is already available with state(%s)", + twt_params.flow_id, + inff_twt_sess_state_str[twt_sess->state]); + ret = -EINVAL; + goto exit; + } + } else if (twt_params.flow_id == 0xFF) { + /* Let the Firmware choose the Flow ID */ + val.sdesc.flow_id = twt_params.flow_id; + } else { + inff_err("TWT: Setup REQ: flow ID: %d is invalid", + twt_params.flow_id); + ret = -EINVAL; + goto exit; + } + break; + case INFF_TWT_PARAM_NEGO_TYPE_BTWT: + /* Broadcast TWT ID */ + val.sdesc.bid = twt_params.bcast_twt_id; + + /* TODO: Handle the Broadcast TWT Setup REQ */ + fallthrough; + default: + inff_err("TWT: Setup REQ: Negotiation Type(%s) not handled", + inff_twt_nego_type_str[twt_params.negotiation_type]); + ret = -EOPNOTSUPP; + goto exit; + } + + /* Setup command */ + val.sdesc.setup_cmd = twt_params.setup_cmd; + + /* Flow flags */ + val.sdesc.flow_flags |= ((twt_params.negotiation_type & 0x02) >> 1 ? + INFF_TWT_FLOW_FLAG_BROADCAST : 0); + val.sdesc.flow_flags |= (twt_params.implicit ? INFF_TWT_FLOW_FLAG_IMPLICIT : 0); + val.sdesc.flow_flags |= (twt_params.flow_type ? INFF_TWT_FLOW_FLAG_UNANNOUNCED : 0); + val.sdesc.flow_flags |= (twt_params.trigger ? INFF_TWT_FLOW_FLAG_TRIGGER : 0); + val.sdesc.flow_flags |= ((twt_params.negotiation_type & 0x01) ? + INFF_TWT_FLOW_FLAG_WAKE_TBTT_NEGO : 0); + val.sdesc.flow_flags |= (twt_params.requestor ? INFF_TWT_FLOW_FLAG_REQUEST : 0); + val.sdesc.flow_flags |= (twt_params.protection ? INFF_TWT_FLOW_FLAG_PROTECT : 0); + + if (twt_params.twt) { + /* Target Wake Time parameter */ + val.sdesc.wake_time_h = cpu_to_le32((u32)(twt_params.twt >> 32)); + val.sdesc.wake_time_l = cpu_to_le32((u32)(twt_params.twt)); + val.sdesc.wake_type = INFF_TWT_WAKE_TIME_TYPE_BSS; + } else if (twt_params.twt_offset) { + /* Target Wake Time offset parameter */ + val.sdesc.wake_time_h = cpu_to_le32((u32)(twt_params.twt_offset >> 32)); + val.sdesc.wake_time_l = cpu_to_le32((u32)(twt_params.twt_offset)); + val.sdesc.wake_type = INFF_TWT_WAKE_TIME_TYPE_OFFSET; + } else { + /* Let the Firmware choose the Target Wake Time */ + val.sdesc.wake_time_h = 0x0; + val.sdesc.wake_time_l = 0x0; + val.sdesc.wake_type = INFF_TWT_WAKE_TIME_TYPE_AUTO; + } + + /* Wake Duration or Service Period */ + val.sdesc.wake_dur = cpu_to_le32(inff_twt_min_twt_to_wake_dur(twt_params.min_twt, + twt_params.min_twt_unit)); + + /* Wake Interval or Service Interval */ + val.sdesc.wake_int = cpu_to_le32(inff_twt_float_to_u32(twt_params.exponent, + twt_params.mantissa)); + + /* Override Dialog Token passed from userpace with next available value in Driver */ + twt_params.dialog_token = inff_twt_get_next_dialog_token(); + val.dialog = cpu_to_le16((u16)twt_params.dialog_token); + + /* Send the TWT Setup request to Firmware */ + ret = inff_fil_xtlv_data_set(ifp, "twt", INFF_TWT_CMD_SETUP, + (void *)&val, sizeof(val)); + if (ret < 0) { + inff_err("TWT: Setup REQ: Failed, Firmware error(%d)", ret); + goto exit; + } + + /* Add an entry setup with progress state */ + ret = inff_twt_add_sess(ifp, vif->profile.bssid, + INFF_TWT_SESS_STATE_SETUP_INPROGRESS, + &twt_params); + if (ret < 0) { + inff_err("TWT: Setup REQ: Failed to add session"); + goto exit; + } + + /* Schedule the Cleanup timer to handle Setup Completion timeout */ + mod_timer(&ifp->twt_evt_timeout, jiffies + INFF_TWT_EVENT_TIMEOUT); + + inff_dbg(TWT, "TWT: Setup REQ: Session %s\n" + "Dialog Token : %u\n" + "Setup command : %s\n" + "Flow flags : 0x %02x\n" + "Flow ID : %u\n" + "Broadcast TWT ID : %u\n" + "Wake Time H,L : 0x %08x %08x\n" + "Wake Type : %u\n" + "Wake Duration : %u uS\n" + "Wake Interval : %u uS\n" + "Negotiation type : %s\n", + inff_twt_sess_state_str[INFF_TWT_SESS_STATE_SETUP_INPROGRESS], + val.dialog, inff_twt_setup_cmd_str[val.sdesc.setup_cmd], + val.sdesc.flow_flags, val.sdesc.flow_id, val.sdesc.bid, + val.sdesc.wake_time_h, val.sdesc.wake_time_l, val.sdesc.wake_type, + val.sdesc.wake_dur, val.sdesc.wake_int, + inff_twt_nego_type_str[val.sdesc.negotiation_type]); +exit: + return ret; +} + +/** + * inff_twt_teardown_oper_handler() - Handle the TWT Teardown Operation request from Userspace. + * + * @ifp: interface instance. + * @twt_params: TWT session parameters. + * + * return: 0 on success, value < 0 on failure. + */ +static s32 +inff_twt_teardown_oper_handler(struct inff_if *ifp, struct inff_twt_params twt_params) +{ + struct inff_twt_teardown_oper val; + struct inff_twt_session *twt_sess = NULL; + s32 ret; + + memset(&val, 0, sizeof(val)); + val.version = INFF_TWT_TEARDOWN_VER; + val.length = sizeof(val.version) + sizeof(val.length); + + /* TWT Negotiation_type */ + val.teardesc.negotiation_type = (u8)twt_params.negotiation_type; + + /* Teardown All TWT */ + val.teardesc.alltwt = twt_params.teardown_all_twt; + if (val.teardesc.alltwt) { + /* If Teardown all TWT is set, then check if the TWT session is not empty */ + if (list_empty(&ifp->twt_sess_list)) { + inff_err("TWT: Teardown REQ: No active TWT sessions"); + ret = -EINVAL; + goto exit; + } + + /* Reset Flow ID & Bcast TWT ID with a placeholder value */ + twt_params.flow_id = 0xFF; + twt_params.bcast_twt_id = 0xFF; + } + + switch (val.teardesc.negotiation_type) { + case INFF_TWT_PARAM_NEGO_TYPE_ITWT: + /* Flow ID */ + if ((twt_params.flow_id >= 0x0 && twt_params.flow_id <= 0x7)) { + val.teardesc.flow_id = twt_params.flow_id; + + /* Lookup the session list for the requested flow ID */ + twt_sess = inff_itwt_lookup_sess_by_flowid(ifp, twt_params.flow_id); + if (!twt_sess || + twt_sess->state != INFF_TWT_SESS_STATE_SETUP_COMPLETE) { + inff_err("TWT: Teardown REQ: session(%u) is not active", + twt_params.flow_id); + ret = -EINVAL; + goto exit; + } + } else if (twt_params.flow_id == 0xFF) { + val.teardesc.flow_id = twt_params.flow_id; + } else { + inff_err("TWT: Teardown REQ: session(%u) is invalid", + twt_params.flow_id); + ret = -EINVAL; + goto exit; + } + break; + case INFF_TWT_PARAM_NEGO_TYPE_BTWT: + /* Broadcast TWT ID */ + val.teardesc.bid = twt_params.bcast_twt_id; + + /* TODO: Handle the Broadcast TWT Teardown REQ */ + fallthrough; + default: + inff_err("TWT: Teardown REQ: Negotiation Type(%s) not handled", + inff_twt_nego_type_str[twt_params.negotiation_type]); + ret = -EOPNOTSUPP; + goto exit; + } + + /* Send the TWT Teardown request to Firmware */ + ret = inff_fil_xtlv_data_set(ifp, "twt", INFF_TWT_CMD_TEARDOWN, + (void *)&val, sizeof(val)); + if (ret < 0) { + inff_err("TWT: Teardown REQ: Failed, Firmware error(%d)", ret); + goto exit; + } + + list_for_each_entry(twt_sess, &ifp->twt_sess_list, list) { + /* Skip updating the state of this session to "Teardown inprogress" + * on one of the following cases + * 1. The "Teardown all" session action is not requested by userspace. + * 2. This session's Flow ID is not explcitly requested for Teardown. + * 3. This session's state is not "setup complete". + * i.e, it is not already active to teardown. + */ + if (!twt_params.teardown_all_twt || + twt_params.flow_id != twt_sess->twt_params.flow_id || + twt_sess->state != INFF_TWT_SESS_STATE_SETUP_COMPLETE) + continue; + + ret = inff_twt_update_sess_state(ifp, twt_sess, + INFF_TWT_SESS_STATE_TEARDOWN_INPROGRESS, + "Teardown REQ", twt_params.flow_id); + if (ret) + goto exit; + } + + /* Schedule the Cleanup timer to handle Teardown Completion timeout */ + mod_timer(&ifp->twt_evt_timeout, jiffies + INFF_TWT_EVENT_TIMEOUT); + + inff_dbg(TWT, "TWT: Teardown REQ: Session %s\n" + "Flow ID : %u\n" + "Broadcast TWT ID : %u\n" + "Negotiation type : %s\n" + "Teardown all TWT : %u\n", + inff_twt_sess_state_str[INFF_TWT_SESS_STATE_TEARDOWN_INPROGRESS], + val.teardesc.flow_id, val.teardesc.bid, + inff_twt_nego_type_str[val.teardesc.negotiation_type], + val.teardesc.alltwt); +exit: + return ret; +} + +/** + * inff_twt_oper() - Handle the TWT Operation requests from Userspace. + * + * @wiphy: wiphy object for cfg80211 interface. + * @wdev: wireless device. + * @twt_params: TWT session parameters. + * + * return: 0 on success, value < 0 on failure. + */ +s32 +inff_twt_oper(struct wiphy *wiphy, struct wireless_dev *wdev, + struct inff_twt_params twt_params) +{ + struct inff_cfg80211_vif *vif = NULL; + struct inff_if *ifp = NULL; + s32 ret; + + vif = wdev_to_vif(wdev); + if (!vif) { + ret = -EIO; + goto exit; + } + + ifp = vif->ifp; + if (!ifp) { + ret = -EIO; + goto exit; + } + + /* Check if TWT feature is supported in the Firmware */ + if (!inff_feat_is_enabled(ifp, INFF_FEAT_TWT)) { + inff_err("TWT: REQ: Operation(%s) can't be handled, TWT not enabled on VIF(%s)", + inff_twt_oper_str[twt_params.twt_oper], inff_ifname(ifp)); + ret = -EOPNOTSUPP; + goto exit; + } + + /* Check VIF operating Mode */ + switch (wdev->iftype) { + case NL80211_IFTYPE_STATION: + if (!test_bit(INFF_VIF_STATUS_CONNECTED, &vif->sme_state)) { + inff_err("TWT: REQ: Operation(%s) invalid when VIF(%s) not connected with WLAN peer", + inff_twt_oper_str[twt_params.twt_oper], inff_ifname(ifp)); + ret = -ENOTCONN; + goto exit; + } + + break; + case NL80211_IFTYPE_AP: + /* TODO: Handle the TWT operation requests for AP Mode */ + fallthrough; + default: + inff_err("TWT: REQ: Operation(%s) not supported on VIF(%s) mode(%u)", + inff_twt_oper_str[twt_params.twt_oper], inff_ifname(ifp), + wdev->iftype); + ret = -EOPNOTSUPP; + goto exit; + } + + /* TWT Operation */ + switch (twt_params.twt_oper) { + case INFF_TWT_OPER_SETUP: + ret = inff_twt_setup_oper_handler(ifp, twt_params); + break; + case INFF_TWT_OPER_TEARDOWN: + ret = inff_twt_teardown_oper_handler(ifp, twt_params); + break; + default: + inff_err("TWT: REQ: Operation(%s) not supported on VIF(%s)", + inff_twt_oper_str[twt_params.twt_oper], inff_ifname(ifp)); + ret = -EOPNOTSUPP; + goto exit; + } +exit: + return ret; +} diff --git a/drivers/net/wireless/infineon/inffmac/twt.h b/drivers/net/wireless/infineon/inffmac/twt.h new file mode 100644 index 000000000000..a7aa672ceb75 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/twt.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2023-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_TWT_H +#define INFF_TWT_H + +#include +#include +#include "vendor_inf.h" +#include "core.h" + +/* Min TWT Default Unit */ +#define WAKE_DUR_UNIT_DEF 256 +/* Min TWT Unit in TUs */ +#define WAKE_DUR_UNIT_TU 1024 + +#define INFF_TWT_EVENT_TIMEOUT msecs_to_jiffies(3000) +/** + * enum inff_twt_cmd - TWT iovar subcmds handled by firmware TWT module + * + * @INFF_TWT_CMD_ENAB: Enable the firmware TWT module. + * @INFF_TWT_CMD_SETUP: Setup a TWT session with a TWT peer. + * @INFF_TWT_CMD_TEARDOWN: Teardown the active TWT session with a TWT peer. + */ +enum inff_twt_cmd { + INFF_TWT_CMD_ENAB, + INFF_TWT_CMD_SETUP, + INFF_TWT_CMD_TEARDOWN, +}; + +/* TWT iovar subcmd version */ +#define INFF_TWT_SETUP_VER 0u +#define INFF_TWT_TEARDOWN_VER 0u + +/** + * enum inff_twt_flow_flag - TWT flow flags to be used in TWT iovar setup subcmd + * + * @INFF_TWT_FLOW_FLAG_BROADCAST: Broadcast TWT Session. + * @INFF_TWT_FLOW_FLAG_IMPLICIT: Implcit TWT session type. + * @INFF_TWT_FLOW_FLAG_UNANNOUNCED: Unannounced TWT session type. + * @INFF_TWT_FLOW_FLAG_TRIGGER: Trigger based TWT Session type. + * @INFF_TWT_FLOW_FLAG_WAKE_TBTT_NEGO: Wake TBTT Negotiation type. + * @INFF_TWT_FLOW_FLAG_REQUEST: TWT Session setup requestor. + * @INFF_TWT_FLOW_FLAG_RESPONDER_PM: Not used. + * @INFF_TWT_FLOW_FLAG_UNSOLICITED: Unsolicited TWT Session Setup. + * @INFF_TWT_FLOW_FLAG_PROTECT: Specifies whether Tx within SP is protected, Not used. + */ +enum inff_twt_flow_flag { + INFF_TWT_FLOW_FLAG_BROADCAST = BIT(0), + INFF_TWT_FLOW_FLAG_IMPLICIT = BIT(1), + INFF_TWT_FLOW_FLAG_UNANNOUNCED = BIT(2), + INFF_TWT_FLOW_FLAG_TRIGGER = BIT(3), + INFF_TWT_FLOW_FLAG_WAKE_TBTT_NEGO = BIT(4), + INFF_TWT_FLOW_FLAG_REQUEST = BIT(5), + INFF_TWT_FLOW_FLAG_RESPONDER_PM = BIT(6), + INFF_TWT_FLOW_FLAG_UNSOLICITED = BIT(7), + INFF_TWT_FLOW_FLAG_PROTECT = BIT(8) +}; + +/** + * enum inff_twt_session_state - TWT session state in the Host driver list + * + * @INFF_TWT_SESS_STATE_UNSPEC: Reserved value 0. + * @INFF_TWT_SESS_STATE_SETUP_INPROGRESS: TWT session setup request was sent + * to the Firmware. + * @INFF_TWT_SESS_STATE_SETUP_INCOMPLETE: TWT session setup is incomplete, + * because either the TWT peer did not send a response, or sent a Reject + * response driver received a Reject Setup event from the Firmware. + * @INFF_TWT_SESS_STATE_SETUP_COMPLETE: TWT session setup is complete and received + * setup event from the Firmware. + * @INFF_TWT_SESS_STATE_TEARDOWN_INPROGRESS: TWT session teardown request was sent + * to the Firmware. + * @INFF_TWT_SESS_STATE_TEARDOWN_INCOMPLETE: TWT session teardown event timed out. + * @INFF_TWT_SESS_STATE_TEARDOWN_COMPLETE: TWT session teardown is complete and + * received Teardown event from the Firmware. + * @INFF_TWT_SESS_STATE_MAX: This acts as a the tail of state list. + * Make sure it located at the end of the list. + */ +enum inff_twt_session_state { + INFF_TWT_SESS_STATE_UNSPEC, + INFF_TWT_SESS_STATE_SETUP_INPROGRESS, + INFF_TWT_SESS_STATE_SETUP_INCOMPLETE, + INFF_TWT_SESS_STATE_SETUP_COMPLETE, + INFF_TWT_SESS_STATE_TEARDOWN_INPROGRESS, + INFF_TWT_SESS_STATE_TEARDOWN_INCOMPLETE, + INFF_TWT_SESS_STATE_TEARDOWN_COMPLETE, + INFF_TWT_SESS_STATE_MAX +}; + +/** + * struct inff_twt_params - TWT session parameters + * + * @twt_oper: TWT operation, Refer enum inff_twt_oper. + * @negotiation_type: Negotiation Type, Refer enum inff_twt_param_nego_type. + * @setup_cmd: Setup cmd, Refer enum inff_twt_oper_setup_cmd_type. + * @dialog_token: TWT Negotiation Dialog Token. + * @twt: Target Wake Time. + * @twt_offset: Target Wake Time Offset. + * @min_twt: Nominal Minimum Wake Duration. + * @exponent: Wake Interval Exponent. + * @mantissa: Wake Interval Mantissa. + * @requestor: TWT Session requestor or responder. + * @implicit: implicit or Explicit TWT session. + * @flow_type: Announced or Un-Announced TWT session. + * @flow_id: Flow ID. + * @bcast_twt_id: Broadcast TWT ID. + * @protection: Protection, Not used. + * @twt_channel: TWT Channel, Not used. + * @twt_info_frame_disabled: TWT information frame disabled, Not used. + * @min_twt_unit: Nominal Minimum Wake Duration Unit. + * @teardown_all_twt: Teardown All TWT. + */ +struct inff_twt_params { + enum inff_twt_oper twt_oper; + enum inff_twt_param_nego_type negotiation_type; + enum inff_twt_oper_setup_cmd_type setup_cmd; + u8 dialog_token; + u64 twt; + u64 twt_offset; + u8 min_twt; + u8 exponent; + u16 mantissa; + u8 requestor; + u8 trigger; + u8 implicit; + u8 flow_type; + u8 flow_id; + u8 bcast_twt_id; + u8 protection; + u8 twt_channel; + u8 twt_info_frame_disabled; + u8 min_twt_unit; + u8 teardown_all_twt; +}; + +/** + * struct inff_twt_session - TWT session structure. + * + * @ifidx: interface index. + * @bsscfgidx: bsscfg index. + * @peer: TWT peer address. + * @state: TWT session state, refer enum inff_twt_session_state. + * @twt_params: TWT session parameters. + * @oper_req_ts: TWT session operation (setup, teardown, etc..) start timestamp. + * @list: linked list. + */ +struct inff_twt_session { + u8 ifidx; + s32 bsscfgidx; + struct ether_addr peer_addr; + enum inff_twt_session_state state; + struct inff_twt_params twt_params; + unsigned long oper_start_ts; + struct list_head list; +}; + +/** + * enum inff_twt_wake_time_type - Type of the struct members wake_time_{h/l} in the + * TWT Setup descriptor struct inff_twt_sdesc. + * + * @INFF_TWT_WAKE_TIME_TYPE_BSS: wake_time_{h/l} is the BSS TSF tiume. + * @INFF_TWT_WAKE_TIME_TYPE_OFFSET: wake_time_{h/l} is an offset of TSF time + * when the iovar is processed. + * @INFF_TWT_WAKE_TIME_TYPE_AUTO: The target wake time is chosen internally by the Firmware. + */ +enum inff_twt_wake_time_type { + INFF_TWT_WAKE_TIME_TYPE_BSS, + INFF_TWT_WAKE_TIME_TYPE_OFFSET, + INFF_TWT_WAKE_TIME_TYPE_AUTO +}; + +/** + * struct inff_twt_sdesc - TWT Setup Descriptor. + * + * @setup_cmd: Setup command and event type. Refer enum inff_twt_oper_setup_cmd_type. + * @flow_flags: Flow attributes, Refer enum inff_twt_flow_flag. + * @flow_id: Flow ID, Range 0-7. Set to 0xFF for auto assignment. + * @wake_type: wake_time_{h/l} type, Refer enum inff_twt_wake_time_type. + * @wake_time_h: Target Wake Time, high 32 bits. + * @wake_time_l: Target Wake Time, Low 32 bits. + * @wake_dur: Target Wake Duration in unit of uS. + * @wake_int: Target Wake Interval. + * @btwt_persistence: Broadcast TWT Persistence. + * @wake_int_max: Max Wake interval(uS) for TWT. + * @duty_cycle_min: Min Duty cycle for TWT(Percentage). + * @pad: 1 byte pad. + * @bid: Brodacst TWT ID, Range 0-31. Set to 0xFF for auto assignment. + * @channel: TWT channel - Not used. + * @negotiation_type: Negotiation Type, Refer enum inff_twt_param_nego_type. + * @frame_recomm: Frame recommendation for broadcast TWTs - Not used. + */ +struct inff_twt_sdesc { + u8 setup_cmd; + u8 flow_flags; + u8 flow_id; + u8 wake_type; + u32 wake_time_h; + u32 wake_time_l; + u32 wake_dur; + u32 wake_int; + u32 btwt_persistence; + u32 wake_int_max; + u8 duty_cycle_min; + u8 pad; + u8 bid; + u8 channel; + u8 negotiation_type; + u8 frame_recomm; +}; + +/** + * struct inff_twt_setup_event - TWT Setup Completion event data from firmware TWT module + * + * @version: Structure version. + * @length:the byte count of fields from 'dialog' onwards. + * @dialog: the dialog token user supplied to the TWT setup API. + * @pad: 3 byte Pad. + * @status: Event status. + */ +struct inff_twt_setup_event { + u16 version; + u16 length; + u8 dialog; + u8 pad[3]; + s32 status; + /* enum inff_twt_sdesc sdesc; */ +}; + +/** + * struct inff_twt_setup_oper - TWT iovar Setup operation subcmd data to firmware TWT module + * + * @version: Structure version. + * @length: data length (starting after this field). + * @peer: TWT peer address. + * @pad: 2 byte Pad. + * @sdesc: TWT setup descriptor. + */ +struct inff_twt_setup_oper { + u16 version; + u16 length; + struct ether_addr peer; + u8 pad[2]; + struct inff_twt_sdesc sdesc; + u16 dialog; +}; + +/** + * struct inff_twt_teardesc - TWT Teardown descriptor. + * + * @negotiation_type: Negotiation Type: Refer enum inff_twt_param_nego_type. + * @flow_id: Flow ID: Range 0-7. Set to 0xFF for auto assignment. + * @bid: Brodacst TWT ID: Range 0-31. Set to 0xFF for auto assignment. + * @alltwt: Teardown all TWT sessions: set to 0 or 1. + */ +struct inff_twt_teardesc { + u8 negotiation_type; + u8 flow_id; + u8 bid; + u8 alltwt; +}; + +/** + * struct inff_twt_teardown_event - TWT Teardown Completion event data from firmware TWT module. + * + * @version: structure version. + * @length: the byte count of fields from 'status' onwards. + * @status: Event status. + */ +struct inff_twt_teardown_event { + u16 version; + u16 length; + s32 status; + /* enum inff_twt_teardesc teardesc; */ +}; + +/** + * struct inff_twt_teardown_oper - TWT iovar Teardown operation subcmd data to firmware TWT module. + * + * @version: structure version. + * @length: data length (starting after this field). + * @peer: TWT peer address. + * @teardesc: TWT Teardown descriptor. + */ +struct inff_twt_teardown_oper { + u16 version; + u16 length; + struct ether_addr peer; + struct inff_twt_teardesc teardesc; +}; + +/** + * inff_twt_debugfs_create() - create debugfs entries. + * + * @drvr: driver instance. + */ +void inff_twt_debugfs_create(struct inff_pub *drvr); + +/** + * inff_twt_cleanup_all_sess - Cleanup all TWT sessions from the driver list. + * + * @ifp: interface instatnce. + */ +s32 inff_twt_cleanup_all_sess(struct inff_if *ifp); + +/** + * inff_twt_event_timeout_handler - Iterate the session list and handle stale + * TWT session entries which are failed to move to next state in FSM. + */ +void inff_twt_event_timeout_handler(struct timer_list *t); + +/** + * inff_notify_twt_event() - Handle the TWT Event notifications from Firmware. + * + * @ifp: interface instatnce. + * @e: event message. + * @data: payload of message, contains TWT session data. + */ +int inff_notify_twt_event(struct inff_if *ifp, const struct inff_event_msg *e, + void *data); + +/** + * inff_twt_oper() - Handle the TWT Operation requests from Userspace. + * + * @wiphy: wiphy object for cfg80211 interface. + * @wdev: wireless device. + * @twt_params: TWT session parameters. + */ +int inff_twt_oper(struct wiphy *wiphy, struct wireless_dev *wdev, + struct inff_twt_params twt_params); + +#endif /* INFF_TWT_H */ -- 2.25.1 Driver implementation of Bluetooth shared SDIO. This feature allows sharing a common SDIO bus interface between the Bluetooth host Linux driver and WLAN host Linux driver for any operations to be done in Infineons's Wi-Fi + Bluetooth combo chipsets. Signed-off-by: Gokul Sivakumar --- .../infineon/inffmac/bt_shared_sdio.c | 1009 +++++++++++++++++ .../infineon/inffmac/bt_shared_sdio.h | 35 + 2 files changed, 1044 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/bt_shared_sdio.c create mode 100644 drivers/net/wireless/infineon/inffmac/bt_shared_sdio.h diff --git a/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.c b/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.c new file mode 100644 index 000000000000..763873083217 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2023-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "bus.h" +#include "chipcommon.h" +#include "core.h" +#include "sdio.h" +#include "fwil.h" +#include "common.h" +#include "bt_shared_sdio.h" + +/* make sure BTS version is the same as bt drier */ +#define BTS_VER_MAJOR 2 +#define BTS_VER_MINOR 0 +#define BTS_VER_PATCH 0 +#define BTS_VERSION (BTS_VER_MAJOR << 24 | BTS_VER_MINOR << 16 | BTS_VER_PATCH << 8) + +/* make sure bt_shared_info is the same as bt drier */ +struct bt_shared_info { + /* bt info */ + void *bt_data; + void (*bt_int_fun)(void *data); + + /* wlan info */ + void *wlan_bus_if; + u16 device_id; + u32 enum_addr; +}; + +/* list wlan private data below */ +#define SDIOD_ADDR_BOUND 0x1000 +#define SDIOD_ADDR_BOUND_MASK 0xfff + +struct inff_bus *glob_bus_if; + +#define BTS_MAX_ERR_RECORD_CNT 128 + +enum bts_err_type { + ERR_REG_RB = 0, /* reg read 1 byte error */ + ERR_REG_WB = 1, /* reg write 1 byte error */ + ERR_REG_RL = 2, /* reg read 4 bytes error */ + ERR_REG_WL = 3, /* reg write 4 bytes error */ + ERR_BUF_RD = 4, /* receive buffer error */ + ERR_BUF_WT = 5, /* send buffer error */ + ERR_MEM_RW = 6, /* r/w memory error */ + ERR_MAX, +}; + +struct bts_err_reg { + u8 fn; + u32 addr; + u32 val; +}; + +struct bts_err_buf { + u32 nbytes; +}; + +struct bts_err_mem { + bool set; + u32 addr; + u32 size; +}; + +struct bts_cmd_entity { + struct list_head list; /* link into bt_if->err_list */ + enum bts_err_type type; + int err; + struct timespec64 time; + union { + struct bts_err_reg reg; + struct bts_err_buf buf; + struct bts_err_mem mem; + } u; +}; + +/** + * struct inff_bt_if - bt shared SDIO information. + * + * @ bt_data: bt internal structure data + * @ bt_sdio_int_cb: bt registered interrupt callback function + * @ bt_use_count: Counter that tracks whether BT is using the bus + */ +struct inff_bt_if { + void *bt_data; + void (*bt_sdio_int_cb)(void *data); + u32 use_count; /* Counter for tracking if BT is using the bus */ + bool set_bt_reset; /* set bt reset bit in wlan remove flow */ + + /* debug purpose */ + u32 cnt_attach; /* number of attach */ + u32 cnt_detach; /* number of detach */ + u32 cnt_total_err; /* number of error */ + spinlock_t err_list_lock; + struct list_head err_list; +}; + +bool inff_btsdio_inited(struct inff_bus *bus_if) +{ + if (!bus_if) { + inff_err("bus_if is null\n"); + return false; + } + + if (!bus_if->bt_if) + return false; + + return true; +} + +static char *inff_btsdio_err_char(enum bts_err_type type) +{ + switch (type) { + case ERR_REG_RB: + return "REG_RB"; + case ERR_REG_WB: + return "REG_WB"; + case ERR_REG_RL: + return "REG_RL"; + case ERR_REG_WL: + return "REG_WL"; + case ERR_BUF_RD: + return "BUF_RD"; + case ERR_BUF_WT: + return "BUF_WT"; + case ERR_MEM_RW: + return "MEM_RW"; + default: + return "unknown"; + } +} + +static void inff_btsdio_err_free_all(struct inff_bt_if *bt_if) +{ + struct bts_cmd_entity *cmd = NULL; + struct bts_cmd_entity *next = NULL; + + if (!bt_if) { + inff_err("bt_if is null\n"); + return; + } + + spin_lock(&bt_if->err_list_lock); + list_for_each_entry_safe(cmd, next, &bt_if->err_list, list) { + list_del(&cmd->list); + kfree(cmd); + } + spin_unlock(&bt_if->err_list_lock); +} + +static int inff_btsdio_cmd_alloc(struct inff_bt_if *bt_if, struct bts_cmd_entity **cmd, + enum bts_err_type type, int err) +{ + if (!bt_if || !cmd) { + inff_err("bt_if(%p) or cmd(%p) is null\n", bt_if, cmd); + return -EINVAL; + } + + if (++bt_if->cnt_total_err > BTS_MAX_ERR_RECORD_CNT) + return -EPERM; + + *cmd = kzalloc(sizeof(**cmd), GFP_KERNEL); + if (!*cmd) { + inff_err("alloc failed\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&(*cmd)->list); + (*cmd)->type = type; + (*cmd)->err = err; + ktime_get_ts64(&(*cmd)->time); + + return 0; +} + +static void inff_btsdio_err_enq(struct inff_bt_if *bt_if, struct bts_cmd_entity *cmd) +{ + if (!bt_if || !cmd) { + inff_err("bt_if(%p) or cmd(%p) is null\n", bt_if, cmd); + return; + } + + spin_lock(&bt_if->err_list_lock); + list_add_tail(&cmd->list, &bt_if->err_list); + spin_unlock(&bt_if->err_list_lock); +} + +static void inff_btsdio_err_reg_record(struct inff_bt_if *bt_if, enum bts_err_type type, + int err, u8 fn, u32 addr, u32 val) +{ + struct bts_cmd_entity *cmd = NULL; + struct bts_err_reg *reg = NULL; + + if (!bt_if) { + inff_err("bt_if is null\n"); + return; + } + + if (inff_btsdio_cmd_alloc(bt_if, &cmd, type, err)) + return; + + reg = &cmd->u.reg; + reg->fn = fn; + reg->addr = addr; + reg->val = val; + inff_err("[%5lld.%06ld] %8s err: %d\taddr: 0x%x\tval: 0x%x\n", + (long long)cmd->time.tv_sec, cmd->time.tv_nsec / NSEC_PER_USEC, + inff_btsdio_err_char(cmd->type), err, addr, val); + + inff_btsdio_err_enq(bt_if, cmd); +} + +static void inff_btsdio_err_buf_record(struct inff_bt_if *bt_if, enum bts_err_type type, + int err, u32 nbytes) +{ + struct bts_cmd_entity *cmd = NULL; + struct bts_err_buf *buf = NULL; + + if (!bt_if) { + inff_err("bt_if is null\n"); + return; + } + + if (inff_btsdio_cmd_alloc(bt_if, &cmd, type, err)) + return; + + buf = &cmd->u.buf; + buf->nbytes = nbytes; + inff_err("[%5lld.%06ld] %8s err: %d\tnbytes: %d\n", + (long long)cmd->time.tv_sec, cmd->time.tv_nsec / NSEC_PER_USEC, + inff_btsdio_err_char(cmd->type), err, nbytes); + + inff_btsdio_err_enq(bt_if, cmd); +} + +static void inff_btsdio_err_mem_record(struct inff_bt_if *bt_if, int err, bool set, + u32 addr, u32 size) +{ + struct bts_cmd_entity *cmd = NULL; + struct bts_err_mem *mem = NULL; + + if (!bt_if) { + inff_err("bt_if is null\n"); + return; + } + + if (inff_btsdio_cmd_alloc(bt_if, &cmd, ERR_MEM_RW, err)) + return; + + mem = &cmd->u.mem; + mem->set = set; + mem->addr = addr; + mem->size = size; + inff_err("[%5lld.%06ld] %8s err: %d\tset: %d\taddr: 0x%x\tsize: %d\n", + (long long)cmd->time.tv_sec, cmd->time.tv_nsec / NSEC_PER_USEC, + inff_btsdio_err_char(cmd->type), err, set, addr, size); + + inff_btsdio_err_enq(bt_if, cmd); +} + +static void inff_btsdio_err_dump(struct seq_file *seq, struct inff_bt_if *bt_if) +{ + struct bts_cmd_entity *cmd = NULL; + struct bts_err_reg *reg = NULL; + struct bts_err_buf *buf = NULL; + struct bts_err_mem *mem = NULL; + u8 idx = 0; + + if (!bt_if || !seq) { + inff_err("bt_if(%p) or seq(%p) is null\n", bt_if, seq); + return; + } + + if (bt_if->cnt_total_err > 0) + seq_printf(seq, "\ntotal error number: %d\n", bt_if->cnt_total_err); + + spin_lock(&bt_if->err_list_lock); + list_for_each_entry(cmd, &bt_if->err_list, list) { + seq_printf(seq, "%3d: [%5lld.%06ld] %8s err: %d\t", + ++idx, (long long)cmd->time.tv_sec, cmd->time.tv_nsec / NSEC_PER_USEC, + inff_btsdio_err_char(cmd->type), cmd->err); + switch (cmd->type) { + case ERR_REG_RB: + case ERR_REG_RL: + reg = &cmd->u.reg; + seq_printf(seq, "F%d addr: 0x%x", reg->fn, reg->addr); + break; + case ERR_REG_WB: + case ERR_REG_WL: + reg = &cmd->u.reg; + seq_printf(seq, "F%d addr: 0x%x\tval: 0x%x", + reg->fn, reg->addr, reg->val); + break; + case ERR_BUF_RD: + seq_puts(seq, "F3"); + break; + case ERR_BUF_WT: + buf = &cmd->u.buf; + seq_printf(seq, "F3 nbytes: %d", buf->nbytes); + break; + case ERR_MEM_RW: + mem = &cmd->u.mem; + seq_printf(seq, "F1 set: %d\taddr: %d\tsize: %d", + mem->set, mem->addr, mem->size); + break; + default: + break; + } + seq_puts(seq, "\n"); + } + spin_unlock(&bt_if->err_list_lock); +} + +static int inff_btsdio_debugfs_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = NULL; + struct inff_sdio_dev *sdiodev = NULL; + struct inff_bt_if *bt_if = NULL; + struct mmc_host *host = NULL; + + if (!seq || !data) { + inff_err("seq(%p) or data(%p) is null\n", seq, data); + return 0; + } + bus_if = dev_get_drvdata(seq->private); + + if (!inff_btsdio_inited(bus_if)) { + seq_printf(seq, "Invalid bus_if (%p) or bt_if\n", bus_if); + return 0; + } + + sdiodev = bus_if->bus_priv.sdio; + bt_if = bus_if->bt_if; + + seq_printf(seq, + "chip: 0x%x\tversion (%d.%d.%d)\n" + "attach: %d\tdetach: %d\n" + "set_bt_reset: %d\n", + sdiodev->func1->device, BTS_VER_MAJOR, BTS_VER_MINOR, BTS_VER_PATCH, + bt_if->cnt_attach, bt_if->cnt_detach, + bt_if->set_bt_reset); + + if (bt_if->cnt_attach > bt_if->cnt_detach) + seq_printf(seq, "bt data: 0x%p\tbt cb: 0x%p\n", + bt_if->bt_data, bt_if->bt_sdio_int_cb); + + host = sdiodev->func2->card->host; + seq_printf(seq, "\nhost\n" + "%-5s: 0x%08x\t%-5s: 0x%08x\n" + "%-12s:%8d\t%-12s:%8d\t%-12s:%8d\t%-12s:%8d\n", + "caps", host->caps, "caps2", host->caps2, + "max blk cnt", host->max_blk_count, + "max req size", host->max_req_size, + "max seg", host->max_segs, + "max seg size", host->max_seg_size); + + seq_printf(seq, "\ndevice\n" + "%10s: %d\n" + "%-12s: %8d\t%-12s: %8d\t%-12s: %8d\n", + "sg_support", sdiodev->sg_support, + "max req size", sdiodev->max_request_size, + "max seg cnt", sdiodev->max_segment_count, + "max seq size", sdiodev->max_segment_size); + + seq_printf(seq, "\nblock size\n" + "%-3s:%4d\t%-3s:%4d\t%-3s:%4d\n", + "F1", sdiodev->func1->cur_blksize, + "F2", sdiodev->func2->cur_blksize, + "F3", sdiodev->func3->cur_blksize); + + inff_btsdio_err_dump(seq, bt_if); + + return 0; +} + +static void *inff_btsdio_get_func_entity(struct inff_sdio_dev *sdiodev, u8 fn) +{ + struct sdio_func *func = NULL; + + if (!sdiodev) { + inff_err("sdiodev is null\n"); + return NULL; + } + + if (fn == SDIO_FUNC_1) + func = sdiodev->func1; + else if (fn == SDIO_FUNC_2) + func = sdiodev->func2; + else if (fn == SDIO_FUNC_3) + func = sdiodev->func3; + + return func; +} + +static void inff_btsdio_int_handler(struct sdio_func *func) +{ + struct inff_bus *bus_if = NULL; + struct inff_sdio_dev *sdiodev = NULL; + struct inff_bt_if *bt_if = NULL; + + if (!func) { + inff_err("func is null\n"); + return; + } + bus_if = dev_get_drvdata(&func->dev); + + if (!bus_if) { + inff_err("bus_if is null\n"); + return; + } + sdiodev = bus_if->bus_priv.sdio; + bt_if = bus_if->bt_if; + + if (!bus_if->bt_if) + return; + + inff_dbg(INTR, "F%d IB intr triggered\n", func->num); + + if (bt_if->bt_sdio_int_cb) + bt_if->bt_sdio_int_cb(bt_if->bt_data); +} + +bool inff_btsdio_is_active(struct inff_bus *bus_if) +{ + struct inff_bt_if *bt_if = NULL; + + if (!bus_if) { + inff_err("bus_if is null\n"); + return false; + } + + if (!bus_if->bt_if) + return false; + + bt_if = bus_if->bt_if; + + if (bt_if->use_count == 0) + return false; + + return true; +} + +bool inff_btsdio_set_bt_reset(struct inff_bus *bus_if) +{ + if (!bus_if) { + inff_err("bus_if is null\n"); + return false; + } + + if (!bus_if->bt_if) + return false; + + return bus_if->bt_if->set_bt_reset; +} + +int inff_bus_attach(u32 ver, void *info) +{ + struct bt_shared_info *bts_info = NULL; + struct inff_sdio_dev *sdiodev = NULL; + struct inff_bt_if *bt_if = NULL; + + inff_dbg(INFO, "Enter\n"); + + if (!info) { + inff_err("info is null\n"); + return -EINVAL; + } + bts_info = (struct bt_shared_info *)info; + + if (!glob_bus_if) { + inff_err("btsdio is not initialized\n"); + return -EINVAL; + } + + if (!glob_bus_if->bt_if) { + inff_err("bt dev is not allocated\n"); + return -EINVAL; + } + + sdiodev = glob_bus_if->bus_priv.sdio; + + if (ver != BTS_VERSION) { + inff_err("version mismatch, bt 0x%x != wlan 0x%x\n", + ver, BTS_VERSION); + return -EINVAL; + } + + /* Get info from bt dev */ + bt_if = glob_bus_if->bt_if; + bt_if->bt_data = bts_info->bt_data; + bt_if->bt_sdio_int_cb = bts_info->bt_int_fun; + + /* Provide wlan info to bt dev */ + bts_info->wlan_bus_if = glob_bus_if; + bts_info->device_id = sdiodev->func1->device; + bts_info->enum_addr = inff_sdio_get_enum_addr(sdiodev->bus); + + bt_if->cnt_attach++; + inff_dbg(INFO, "Done: device: 0x%x, enum addr: 0x%08x\n", + sdiodev->func1->device, bts_info->enum_addr); + return 0; +} +EXPORT_SYMBOL(inff_bus_attach); + +void inff_bus_detach(struct inff_bus *bus_if) +{ + struct inff_bt_if *bt_if = NULL; + + inff_dbg(INFO, "Enter\n"); + + if (!bus_if) { + inff_err("bus_if is null\n"); + return; + } + + if (!bus_if->bt_if) + return; + + bt_if = bus_if->bt_if; + + if (bt_if->bt_data) + bt_if->bt_data = NULL; + if (bt_if->bt_sdio_int_cb) + bt_if->bt_sdio_int_cb = NULL; + + bt_if->cnt_detach++; + inff_dbg(INFO, "Done\n"); +} +EXPORT_SYMBOL(inff_bus_detach); + +u8 inff_bus_reg_readb(struct inff_bus *bus_if, u8 fn, u32 addr, int *err) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct sdio_func *func = NULL; + u8 val = 0; + + if (!bus_if || !err) { + inff_err("bus_if(%p) or err(%p) is null\n", bus_if, err); + *err = -EINVAL; + return 0; + } + + if (!bus_if->bt_if) { + *err = -EINVAL; + return 0; + } + + sdiodev = bus_if->bus_priv.sdio; + + func = inff_btsdio_get_func_entity(sdiodev, fn); + if (fn > SDIO_FUNC_3 || (fn != SDIO_FUNC_0 && !func)) { + inff_err("invalid function number = %d\n", fn); + *err = -EINVAL; + return 0; + } + + sdio_claim_host(sdiodev->func1); + if (fn == SDIO_FUNC_0) + val = inff_sdiod_func0_rb(sdiodev, addr, err); + else + val = inff_sdiod_func_rb(sdiodev, func, addr, err); + sdio_release_host(sdiodev->func1); + + inff_dbg(SDIO, "F%d addr: 0x%08x, val: 0x%02x, err: %d\n", fn, addr, val, *err); + + if (*err) + inff_btsdio_err_reg_record(bus_if->bt_if, ERR_REG_RB, *err, fn, addr, val); + + return val; +} +EXPORT_SYMBOL(inff_bus_reg_readb); + +void inff_bus_reg_writeb(struct inff_bus *bus_if, u8 fn, u32 addr, u8 val, int *err) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct sdio_func *func = NULL; + + if (!bus_if || !err) { + inff_err("bus_if(%p) or err(%p) is null\n", bus_if, err); + *err = -EINVAL; + return; + } + + if (!bus_if->bt_if) { + *err = -EINVAL; + return; + } + + sdiodev = bus_if->bus_priv.sdio; + + func = inff_btsdio_get_func_entity(sdiodev, fn); + if (fn > SDIO_FUNC_3 || (fn != SDIO_FUNC_0 && !func)) { + inff_err("invalid function number = %d\n", fn); + *err = -EINVAL; + return; + } + + sdio_claim_host(sdiodev->func1); + if (fn == SDIO_FUNC_0) + inff_sdiod_func0_wb(sdiodev, addr, val, err); + else + inff_sdiod_func_wb(sdiodev, func, addr, val, err); + sdio_release_host(sdiodev->func1); + + inff_dbg(SDIO, "F%d addr: 0x%08x, val: 0x%02x, err: %d\n", fn, addr, val, *err); + + if (*err) + inff_btsdio_err_reg_record(bus_if->bt_if, ERR_REG_WB, *err, fn, addr, val); +} +EXPORT_SYMBOL(inff_bus_reg_writeb); + +u32 inff_bus_reg_readl(struct inff_bus *bus_if, u32 addr, int *err) +{ + struct inff_sdio_dev *sdiodev = NULL; + u32 val = 0; + + if (!bus_if || !err) { + inff_err("bus_if(%p) or err(%p) is null\n", bus_if, err); + *err = -EINVAL; + return 0; + } + + if (!bus_if->bt_if) { + *err = -EINVAL; + return 0; + } + + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + val = inff_sdiod_readl(sdiodev, addr, err); + sdio_release_host(sdiodev->func1); + + inff_dbg(SDIO, "addr: 0x%08x, val: 0x%02x, err: %d\n", addr, val, *err); + + if (*err) + inff_btsdio_err_reg_record(bus_if->bt_if, ERR_REG_RL, *err, 1, addr, val); + + return val; +} +EXPORT_SYMBOL(inff_bus_reg_readl); + +void inff_bus_reg_writel(struct inff_bus *bus_if, u32 addr, u32 val, int *err) +{ + struct inff_sdio_dev *sdiodev = NULL; + + if (!bus_if || !err) { + inff_err("bus_if(%p) or err(%p) is null\n", bus_if, err); + *err = -EINVAL; + return; + } + + if (!bus_if->bt_if) { + *err = -EINVAL; + return; + } + + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + inff_sdiod_writel(sdiodev, addr, val, err); + sdio_release_host(sdiodev->func1); + + inff_dbg(SDIO, "addr: 0x%08x, val: 0x%08x, err: %d\n", addr, val, *err); + + if (*err) + inff_btsdio_err_reg_record(bus_if->bt_if, ERR_REG_WL, *err, 1, addr, val); +} +EXPORT_SYMBOL(inff_bus_reg_writel); + +int inff_bus_recv_buf(struct inff_bus *bus_if, u8 *buf, u32 nbytes) +{ + struct inff_sdio_dev *sdiodev = NULL; + int err = 0; + + if (!bus_if || !buf) { + inff_err("bus_if(%p) or buf(%p) is null\n", bus_if, buf); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + err = inff_sdiod_recv_buf(sdiodev, SDIO_FUNC_3, buf, nbytes); + sdio_release_host(sdiodev->func1); + + inff_dbg(DATA, "F3 receive nbytes: %d, err: %d\n", nbytes, err); + + if (err) + inff_btsdio_err_buf_record(bus_if->bt_if, ERR_BUF_RD, err, 0); + + return err; +} EXPORT_SYMBOL(inff_bus_recv_buf); + +int inff_bus_send_buf(struct inff_bus *bus_if, u8 *buf, u32 nbytes) +{ + struct inff_sdio_dev *sdiodev = NULL; + int err = 0; + + if (!bus_if || !buf) { + inff_err("bus_if(%p) or buf(%p) is null\n", bus_if, buf); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + err = inff_sdiod_send_buf(sdiodev, SDIO_FUNC_3, buf, nbytes); + sdio_release_host(sdiodev->func1); + + inff_dbg(DATA, "F3 send nbytes: %d, err: %d\n", nbytes, err); + + if (err) + inff_btsdio_err_buf_record(bus_if->bt_if, ERR_BUF_WT, err, nbytes); + + return err; +} EXPORT_SYMBOL(inff_bus_send_buf); + +int inff_bus_membytes(struct inff_bus *bus_if, bool set, u32 address, u8 *data, u32 size) +{ + struct inff_sdio_dev *sdiodev = NULL; + int err = 0; + u32 block1_offset = 0; + u32 block2_addr = 0; + u16 block1_size = 0; + u16 block2_size = 0; + u8 *block2_data = 0; + + if (!bus_if || !data) { + inff_err("bus_if(%p) or data(%p) is null\n", bus_if, data); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + sdiodev = bus_if->bus_priv.sdio; + sdio_claim_host(sdiodev->func1); + do { + /* To avoid SDIO access crosses AXI 4k address boundaries crossing */ + if (((address & SDIOD_ADDR_BOUND_MASK) + size) > SDIOD_ADDR_BOUND) { + inff_dbg(SDIO, "data cross 4K boundary\n"); + /* The 1st 4k packet */ + block1_offset = address & SDIOD_ADDR_BOUND_MASK; + block1_size = (SDIOD_ADDR_BOUND - block1_offset); + + err = inff_sdiod_ramrw(sdiodev, set, address, + data, block1_size); + if (err) + break; + + /* The 2nd 4k packet */ + block2_addr = address + block1_size; + block2_size = size - block1_size; + block2_data = data + block1_size; + err = inff_sdiod_ramrw(sdiodev, set, block2_addr, + block2_data, block2_size); + } else { + err = inff_sdiod_ramrw(sdiodev, set, address, data, size); + } + } while (false); + sdio_release_host(sdiodev->func1); + + if (err) + inff_btsdio_err_mem_record(bus_if->bt_if, err, set, address, size); + + return err; +} +EXPORT_SYMBOL(inff_bus_membytes); + +int inff_bus_set_blocksz(struct inff_bus *bus_if, u16 blocksz) +{ + struct inff_sdio_dev *sdiodev = NULL; + int err = 0; + + if (!bus_if) { + inff_err("bus_if is null\n"); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + inff_dbg(INFO, "set F3 block size to %d\n", blocksz); + + sdiodev = bus_if->bus_priv.sdio; + sdio_claim_host(sdiodev->func1); + err = sdio_set_block_size(sdiodev->func3, blocksz); + sdio_release_host(sdiodev->func1); + if (err) + inff_err("set F3 block size failed, err: %d\n", err); + + return err; +} +EXPORT_SYMBOL(inff_bus_set_blocksz); + +/* Function to enable the Bus Clock + * This function is not callable from non-sleepable context + */ +int inff_bus_clk_enable(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct inff_bt_if *bt_if = NULL; + int err = 0; + + if (!bus_if) { + inff_err("bus_if is null\n"); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + bt_if = bus_if->bt_if; + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + bt_if->use_count++; + sdio_release_host(sdiodev->func1); + err = inff_sdio_sleep(sdiodev->bus, false); + + return err; +} +EXPORT_SYMBOL(inff_bus_clk_enable); + +/* Function to disable the Bus Clock + * This function is not callable from non-sleepable context + */ +int inff_bus_clk_disable(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct inff_bt_if *bt_if = NULL; + int err = 0; + + if (!bus_if) { + inff_err("bus_if is null\n"); + return -EINVAL; + } + + if (!bus_if->bt_if) + return -EINVAL; + + bt_if = bus_if->bt_if; + sdiodev = bus_if->bus_priv.sdio; + + sdio_claim_host(sdiodev->func1); + if (bt_if->use_count != 0) + bt_if->use_count--; + sdio_release_host(sdiodev->func1); + err = inff_sdio_sleep(sdiodev->bus, true); + + return err; +} +EXPORT_SYMBOL(inff_bus_clk_disable); + +static bool inff_btsdio_is_over_sdio(struct inff_bus *bus_if) +{ + struct inff_pub *drvr = NULL; + struct inff_if *ifp = NULL; + struct inff_sdio_dev *sdiodev = NULL; + u32 bt_over_sdio_hw = 0; + int err = 0; + + if (!bus_if) { + inff_err("bus_if is null\n"); + return -EINVAL; + } + drvr = bus_if->drvr; + ifp = inff_get_ifp(drvr, 0); + sdiodev = bus_if->bus_priv.sdio; + + switch (sdiodev->func1->device) { + case SDIO_DEVICE_ID_CYPRESS_43022: + /* cannot config in OTP */ + bt_over_sdio_hw = 1; + break; + case SDIO_DEVICE_ID_CYPRESS_55500: + /* should enable feature in OTP */ + err = inff_fil_iovar_int_get(ifp, "bt_over_sdio", &bt_over_sdio_hw); + if (err < 0) { + bt_over_sdio_hw = 0; + inff_err("failed to get bt_over_sdio\n"); + } + break; + default: + bt_over_sdio_hw = 0; + break; + } + + inff_dbg(INFO, "Device: %d (SW: %d, HW: %d)\n", + sdiodev->func1->device, sdiodev->settings->bt_over_sdio, + bt_over_sdio_hw); + + if (sdiodev->settings->bt_over_sdio & bt_over_sdio_hw) + return true; + else + return false; +} + +void inff_btsdio_init(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct inff_sdio_platform_data *pdata = NULL; + struct inff_bt_if *bt_if = NULL; + + inff_dbg(INFO, "Enter\n"); + + if (!bus_if) { + inff_err("bus_if is null\n"); + return; + } + sdiodev = bus_if->bus_priv.sdio; + pdata = &sdiodev->settings->bus.sdio; + + if (!inff_btsdio_is_over_sdio(bus_if)) { + inff_err("bt over uart\n"); + return; + } + + /* Allocate bt dev */ + bt_if = kzalloc(sizeof(*bt_if), GFP_ATOMIC); + if (!bt_if) + return; + + glob_bus_if = bus_if; + bus_if->bt_if = bt_if; + + /* Initialize error list */ + INIT_LIST_HEAD(&bt_if->err_list); + spin_lock_init(&bt_if->err_list_lock); + + /* 43022: set bt reset by bt driver + * 55500: set bt reset by wl driver if hw enable bt over sdio + */ + if (sdiodev->func1->device == SDIO_DEVICE_ID_CYPRESS_55500) + bt_if->set_bt_reset = true; + + sdio_claim_host(sdiodev->func1); + /* register interrupt */ + if (!pdata->oob_irq_supported) { + inff_dbg(INFO, "register F3 ib irq\n"); + sdio_claim_irq(sdiodev->func3, inff_btsdio_int_handler); + } + sdio_release_host(sdiodev->func1); + + inff_dbg(INFO, "init version (%d.%d.%d) done\n", + BTS_VER_MAJOR, BTS_VER_MINOR, BTS_VER_PATCH); +} + +void inff_btsdio_deinit(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiodev = NULL; + struct inff_sdio_platform_data *pdata = NULL; + + inff_dbg(INFO, "Enter\n"); + + if (!bus_if) { + inff_err("bus_if is null\n"); + return; + } + + if (!bus_if->bt_if) + return; + + sdiodev = bus_if->bus_priv.sdio; + pdata = &sdiodev->settings->bus.sdio; + + /* unregister interrupt */ + sdio_claim_host(sdiodev->func1); + if (!pdata->oob_irq_supported) { + inff_dbg(INFO, "release F3 ib irq\n"); + sdio_release_irq(sdiodev->func3); + } + sdio_release_host(sdiodev->func1); + + inff_bus_detach(bus_if); + + /* Free all error info */ + inff_btsdio_err_free_all(bus_if->bt_if); + + /* Free bt dev */ + kfree(bus_if->bt_if); + + bus_if->bt_if = NULL; + glob_bus_if = NULL; + + inff_dbg(INFO, "deinit done\n"); +} + +void inff_btsdio_debugfs_create(struct inff_pub *drvr) +{ + inff_debugfs_add_entry(drvr, "bts_info", inff_btsdio_debugfs_read); +} diff --git a/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.h b/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.h new file mode 100644 index 000000000000..0e5d3d7b8783 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/bt_shared_sdio.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2023-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_BT_SHARED_SDIO_H +#define INFF_BT_SHARED_SDIO_H + +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + +bool inff_btsdio_inited(struct inff_bus *bus_if); +bool inff_btsdio_is_active(struct inff_bus *bus_if); +bool inff_btsdio_set_bt_reset(struct inff_bus *bus_if); + +int inff_bus_attach(u32 ver, void *info); +void inff_bus_detach(struct inff_bus *bus_if); +u8 inff_bus_reg_readb(struct inff_bus *bus_if, u8 fn, u32 addr, int *err); +void inff_bus_reg_writeb(struct inff_bus *bus_if, u8 fn, u32 addr, u8 val, int *err); +u32 inff_bus_reg_readl(struct inff_bus *bus_if, u32 addr, int *err); +void inff_bus_reg_writel(struct inff_bus *bus_if, u32 addr, u32 val, int *err); +int inff_bus_recv_buf(struct inff_bus *bus_if, u8 *buf, u32 nbytes); +int inff_bus_send_buf(struct inff_bus *bus_if, u8 *buf, u32 nbytes); +int inff_bus_membytes(struct inff_bus *bus_if, bool set, u32 address, u8 *data, u32 size); +int inff_bus_set_blocksz(struct inff_bus *bus_if, u16 blocksz); +int inff_bus_clk_enable(struct inff_bus *bus_if); +int inff_bus_clk_disable(struct inff_bus *bus_if); + +void inff_btsdio_init(struct inff_bus *bus_if); +void inff_btsdio_deinit(struct inff_bus *bus_if); +void inff_btsdio_debugfs_create(struct inff_pub *drvr); + +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + +#endif /* INFF_BT_SHARED_SDIO_H */ -- 2.25.1 Definitions of the Metadata in the Device Firmware binary files. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/trxhdr.h | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/trxhdr.h diff --git a/drivers/net/wireless/infineon/inffmac/trxhdr.h b/drivers/net/wireless/infineon/inffmac/trxhdr.h new file mode 100644 index 000000000000..fe724406442d --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/trxhdr.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2020-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_TRXHDR_H +#define INFF_TRXHDR_H + +/* Bootloader makes special use of trx header "offsets" array */ +enum { + TRX_OFFSET_SIGN_INFO_IDX = 0, + TRX_OFFSET_DATA_FOR_SIGN1_IDX = 1, + TRX_OFFSET_DATA_FOR_SIGN2_IDX = 2, + TRX_OFFSET_ROOT_MODULUS_IDX = 3, + TRX_OFFSET_ROOT_EXPONENT_IDX = 67, + TRX_OFFSET_CONT_MODULUS_IDX = 68, + TRX_OFFSET_CONT_EXPONENT_IDX = 132, + TRX_OFFSET_HASH_FW_IDX = 133, + TRX_OFFSET_FW_LEN_IDX = 149, + TRX_OFFSET_TR_RST_IDX = 150, + TRX_OFFSET_FW_VER_FOR_ANTIROOLBACK_IDX = 151, + TRX_OFFSET_IV_IDX = 152, + TRX_OFFSET_NONCE_IDX = 160, + TRX_OFFSET_SIGN_INFO2_IDX = 168, + TRX_OFFSET_MAX_IDX +}; + +#define TRX_MAGIC 0x30524448 /* "HDR0" */ +#define TRX_VERSION 4 /* Version 4 */ +#define TRX_MAX_OFFSET TRX_OFFSET_MAX_IDX /* Max number of file offsets */ + +struct trx_header_le { + __le32 magic; /* "HDR0" */ + __le32 len; /* Length of file including header */ + __le32 crc32; /* CRC from flag_version to end of file */ + __le32 flag_version; /* 0:15 flags, 16:31 version */ + __le32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions */ +}; + +#define TRX_VERSION5 5 /* Version 5 */ + +struct trxv5_header_le { + __le32 magic; /* "HDR0" */ + __le32 len; /* Length of file including header */ + __le32 crc32; /* 32-bit CRC from flag_version to end of file */ + __le32 flag_version; /* 0:15 flags, 16:31 version */ + __le32 root_cert_start_offset; /* Start Offset IDX for Root Certificate */ + __le32 content_cert_start_offset; /* Start Offset IDX for Content Certificate */ + __le32 fw_entry; /* Firmware Entry Point for CM mode */ + __le32 reserved; +}; + +#endif /* INFF_TRXHDR_H */ -- 2.25.1 Driver implementation for managing the chip and also do the chip operations needed for the Infineon-proprietary secured firmware architecture. This new secure firmware architecture demands a unique vendor-specific handshake between the host machine and new Infineon chipsets for both bring up and normal operation, because of firmware encryption and firmware signature validation. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/chip.c | 1507 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/chip.h | 223 +++ 2 files changed, 1730 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chip.c create mode 100644 drivers/net/wireless/infineon/inffmac/chip.h diff --git a/drivers/net/wireless/infineon/inffmac/chip.c b/drivers/net/wireless/infineon/inffmac/chip.c new file mode 100644 index 000000000000..1770e1261060 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip.c @@ -0,0 +1,1507 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include +#include +#include + +#include "defs.h" +#include "core.h" +#include "hw_ids.h" +#include "utils.h" +#include "chipcommon.h" +#include "debug.h" +#include "chip.h" +#include "chip_43022.h" +#include "chip_5551x.h" +#include "chip_5557x.h" +#include "chip_5591x.h" + +/* PL-368 DMP definitions */ +#define DMP_DESC_TYPE_MSK 0x0000000F +#define DMP_DESC_EMPTY 0x00000000 +#define DMP_DESC_VALID 0x00000001 +#define DMP_DESC_COMPONENT 0x00000001 +#define DMP_DESC_MASTER_PORT 0x00000003 +#define DMP_DESC_ADDRESS 0x00000005 +#define DMP_DESC_ADDRSIZE_GT32 0x00000008 +#define DMP_DESC_EOT 0x0000000F + +#define DMP_COMP_DESIGNER 0xFFF00000 +#define DMP_COMP_DESIGNER_S 20 +#define DMP_COMP_PARTNUM 0x000FFF00 +#define DMP_COMP_PARTNUM_S 8 +#define DMP_COMP_CLASS 0x000000F0 +#define DMP_COMP_CLASS_S 4 +#define DMP_COMP_REVISION 0xFF000000 +#define DMP_COMP_REVISION_S 24 +#define DMP_COMP_NUM_SWRAP 0x00F80000 +#define DMP_COMP_NUM_SWRAP_S 19 +#define DMP_COMP_NUM_MWRAP 0x0007C000 +#define DMP_COMP_NUM_MWRAP_S 14 +#define DMP_COMP_NUM_SPORT 0x00003E00 +#define DMP_COMP_NUM_SPORT_S 9 +#define DMP_COMP_NUM_MPORT 0x000001F0 +#define DMP_COMP_NUM_MPORT_S 4 + +#define DMP_MASTER_PORT_UID 0x0000FF00 +#define DMP_MASTER_PORT_UID_S 8 +#define DMP_MASTER_PORT_NUM 0x000000F0 +#define DMP_MASTER_PORT_NUM_S 4 + +#define DMP_SLAVE_ADDR_BASE 0xFFFFF000 +#define DMP_SLAVE_ADDR_BASE_S 12 +#define DMP_SLAVE_PORT_NUM 0x00000F00 +#define DMP_SLAVE_PORT_NUM_S 8 +#define DMP_SLAVE_TYPE 0x000000C0 +#define DMP_SLAVE_TYPE_S 6 +#define DMP_SLAVE_TYPE_SLAVE 0 +#define DMP_SLAVE_TYPE_BRIDGE 1 +#define DMP_SLAVE_TYPE_SWRAP 2 +#define DMP_SLAVE_TYPE_MWRAP 3 +#define DMP_SLAVE_SIZE_TYPE 0x00000030 +#define DMP_SLAVE_SIZE_TYPE_S 4 +#define DMP_SLAVE_SIZE_4K 0 +#define DMP_SLAVE_SIZE_8K 1 +#define DMP_SLAVE_SIZE_16K 2 +#define DMP_SLAVE_SIZE_DESC 3 + +/* ARM CR4 core specific control flag bits */ +#define ARMCR4_INFF_IOCTL_CPUHALT 0x0020 + +/* D11 core specific control flag bits */ +#define D11_INFF_IOCTL_PHYCLOCKEN 0x0004 +#define D11_INFF_IOCTL_PHYRESET 0x0008 + +/* chip core base & ramsize */ + +/* Max possibly supported memory size (limited by IO mapped memory) */ +#define INFF_CHIP_MAX_MEMSIZE (4 * 1024 * 1024) + +struct sbconfig { + u32 PAD[2]; + u32 sbipsflag; /* initiator port ocp slave flag */ + u32 PAD[3]; + u32 sbtpsflag; /* target port ocp slave flag */ + u32 PAD[11]; + u32 sbtmerrloga; /* (sonics >= 2.3) */ + u32 PAD; + u32 sbtmerrlog; /* (sonics >= 2.3) */ + u32 PAD[3]; + u32 sbadmatch3; /* address match3 */ + u32 PAD; + u32 sbadmatch2; /* address match2 */ + u32 PAD; + u32 sbadmatch1; /* address match1 */ + u32 PAD[7]; + u32 sbimstate; /* initiator agent state */ + u32 sbintvec; /* interrupt mask */ + u32 sbtmstatelow; /* target state */ + u32 sbtmstatehigh; /* target state */ + u32 sbbwa0; /* bandwidth allocation table0 */ + u32 PAD; + u32 sbimconfiglow; /* initiator configuration */ + u32 sbimconfighigh; /* initiator configuration */ + u32 sbadmatch0; /* address match0 */ + u32 PAD; + u32 sbtmconfiglow; /* target configuration */ + u32 sbtmconfighigh; /* target configuration */ + u32 sbbconfig; /* broadcast configuration */ + u32 PAD; + u32 sbbstate; /* broadcast state */ + u32 PAD[3]; + u32 sbactcnfg; /* activate configuration */ + u32 PAD[3]; + u32 sbflagst; /* current sbflags */ + u32 PAD[3]; + u32 sbidlow; /* identification */ + u32 sbidhigh; /* identification */ +}; + +#define INVALID_RAMBASE ((u32)(~0)) + +/* bankidx and bankinfo reg defines corerev >= 8 */ +#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 +#define SOCRAM_BANKINFO_SZMASK 0x0000007f +#define SOCRAM_BANKIDX_ROM_MASK 0x00000100 + +#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 +/* socram bankinfo memtype */ +#define SOCRAM_MEMTYPE_RAM 0 +#define SOCRAM_MEMTYPE_R0M 1 +#define SOCRAM_MEMTYPE_DEVRAM 2 + +#define SOCRAM_BANKINFO_SZBASE 8192 +#define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_MASK_EXT 0x100 +#define SRCI_SRNB_SHIFT 4 + +struct sbsocramregs { + u32 coreinfo; + u32 bwalloc; + u32 extracoreinfo; + u32 biststat; + u32 bankidx; + u32 standbyctrl; + + u32 errlogstatus; /* rev 6 */ + u32 errlogaddr; /* rev 6 */ + /* used for patching rev 3 & 5 */ + u32 cambankidx; + u32 cambankstandbyctrl; + u32 cambankpatchctrl; + u32 cambankpatchtblbaseaddr; + u32 cambankcmdreg; + u32 cambankdatareg; + u32 cambankmaskreg; + u32 PAD[1]; + u32 bankinfo; /* corev 8 */ + u32 bankpda; + u32 PAD[14]; + u32 extmemconfig; + u32 extmemparitycsr; + u32 extmemparityerrdata; + u32 extmemparityerrcnt; + u32 extmemwrctrlandsize; + u32 PAD[84]; + u32 workaround; + u32 pwrctl; /* corerev >= 2 */ + u32 PAD[133]; + u32 sr_control; /* corerev >= 15 */ + u32 sr_status; /* corerev >= 15 */ + u32 sr_address; /* corerev >= 15 */ + u32 sr_data; /* corerev >= 15 */ +}; + +#define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) +#define SYSMEMREGOFFS(_f) offsetof(struct sbsocramregs, _f) + +#define ARMCR4_CAP (0x04) +#define ARMCR4_BANKIDX (0x40) +#define ARMCR4_BANKINFO (0x44) +#define ARMCR4_BANKPDA (0x4C) + +#define ARMCR4_TCBBNB_MASK 0xf0 +#define ARMCR4_TCBBNB_SHIFT 4 +#define ARMCR4_TCBANB_MASK 0xf +#define ARMCR4_TCBANB_SHIFT 0 + +#define ARMCR4_BSZ_MASK 0x7f +#define ARMCR4_BSZ_MULT 8192 +#define ARMCR4_BLK_1K_MASK 0x200 + +#define INFF_BLHS_POLL_INTERVAL 10 /* msec */ +#define INFF_BLHS_D2H_READY_TIMEOUT 100 /* msec */ +#define INFF_BLHS_D2H_TRXHDR_PARSE_DONE_TIMEOUT 50 /* msec */ + +#define INFF_BLHS_D2H_VALDN_DONE_TIMEOUT 1000 /* msec */ +#define INFF_BLHS_D2H_MV_NVRAM_DONE_TIMEOUT (100) /* msec */ +#define INFF_BLHS_D2H_BP_CLK_DISABLE_REQ_TIMEOUT (5 * 1000) /* msec */ + +/* Bootloader handshake flags - dongle to host */ +#define INFF_BLHS_D2H_START BIT(0) +#define INFF_BLHS_D2H_READY BIT(1) +#define INFF_BLHS_D2H_STEADY BIT(2) +#define INFF_BLHS_D2H_TRXHDR_PARSE_DONE BIT(3) +#define INFF_BLHS_D2H_VALDN_START BIT(4) +#define INFF_BLHS_D2H_VALDN_RESULT BIT(5) +#define INFF_BLHS_D2H_VALDN_DONE BIT(6) +#define INFF_BLHS_D2H_MV_NVRAM_DONE BIT(7) +#define INFF_BLHS_D2H_BP_CLK_DISABLE_REQ BIT(8) + +/* Bootloader handshake flags - host to dongle */ +#define INFF_BLHS_H2D_DL_FW_START BIT(0) +#define INFF_BLHS_H2D_DL_FW_DONE BIT(1) +#define INFF_BLHS_H2D_DL_NVRAM_DONE BIT(2) +#define INFF_BLHS_H2D_BL_RESET_ON_ERROR BIT(3) +#define INFF_BLHS_H2D_DL_NVRAM_START BIT(4) +#define INFF_BLHS_H2D_BP_CLK_DISABLE_ACK BIT(5) + +static bool inff_chip_ai_iscoreup(struct inff_core_priv *core) +{ + struct inff_chip_priv *ci; + u32 regdata; + bool ret; + + ci = core->chip; + regdata = ci->ops->read32(ci->ctx, core->wrapbase + INFF_IOCTL); + ret = (regdata & (INFF_IOCTL_FGC | INFF_IOCTL_CLK)) == INFF_IOCTL_CLK; + + regdata = ci->ops->read32(ci->ctx, core->wrapbase + INFF_RESET_CTL); + ret = ret && ((regdata & INFF_RESET_CTL_RESET) == 0); + + return ret; +} + +static void inff_chip_ai_coredisable(struct inff_core_priv *core, + u32 prereset, u32 reset) +{ + struct inff_chip_priv *ci; + u32 regdata; + + ci = core->chip; + + /* if core is already in reset, skip reset */ + regdata = ci->ops->read32(ci->ctx, core->wrapbase + INFF_RESET_CTL); + if ((regdata & INFF_RESET_CTL_RESET) != 0) + goto in_reset_configure; + + /* configure reset */ + ci->ops->write32(ci->ctx, core->wrapbase + INFF_IOCTL, + prereset | INFF_IOCTL_FGC | INFF_IOCTL_CLK); + ci->ops->read32(ci->ctx, core->wrapbase + INFF_IOCTL); + + /* put in reset */ + ci->ops->write32(ci->ctx, core->wrapbase + INFF_RESET_CTL, + INFF_RESET_CTL_RESET); + usleep_range(10, 20); + + /* wait till reset is 1 */ + SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + INFF_RESET_CTL) != + INFF_RESET_CTL_RESET, 300); + +in_reset_configure: + /* in-reset configure */ + ci->ops->write32(ci->ctx, core->wrapbase + INFF_IOCTL, + reset | INFF_IOCTL_FGC | INFF_IOCTL_CLK); + ci->ops->read32(ci->ctx, core->wrapbase + INFF_IOCTL); +} + +static void inff_chip_ai_resetcore(struct inff_core_priv *core, u32 prereset, + u32 reset, u32 postreset) +{ + struct inff_chip_priv *ci; + int count; + struct inff_core *d11core2 = NULL; + struct inff_core_priv *d11priv2 = NULL; + + ci = core->chip; + + /* special handle two D11 cores reset */ + if (core->pub.id == INF_CORE_80211) { + d11core2 = inff_chip_get_d11core(&ci->pub, 1); + if (d11core2) { + inff_dbg(INFO, "found two d11 cores, reset both\n"); + d11priv2 = container_of(d11core2, + struct inff_core_priv, pub); + } + } + + /* must disable first to work for arbitrary current core state */ + inff_chip_ai_coredisable(core, prereset, reset); + if (d11priv2) + inff_chip_ai_coredisable(d11priv2, prereset, reset); + + count = 0; + while (ci->ops->read32(ci->ctx, core->wrapbase + INFF_RESET_CTL) & + INFF_RESET_CTL_RESET) { + ci->ops->write32(ci->ctx, core->wrapbase + INFF_RESET_CTL, 0); + count++; + if (count > 50) + break; + usleep_range(40, 60); + } + + if (d11priv2) { + count = 0; + while (ci->ops->read32(ci->ctx, + d11priv2->wrapbase + INFF_RESET_CTL) & + INFF_RESET_CTL_RESET) { + ci->ops->write32(ci->ctx, + d11priv2->wrapbase + INFF_RESET_CTL, + 0); + count++; + if (count > 50) + break; + usleep_range(40, 60); + } + } + + ci->ops->write32(ci->ctx, core->wrapbase + INFF_IOCTL, + postreset | INFF_IOCTL_CLK); + ci->ops->read32(ci->ctx, core->wrapbase + INFF_IOCTL); + + if (d11priv2) { + ci->ops->write32(ci->ctx, d11priv2->wrapbase + INFF_IOCTL, + postreset | INFF_IOCTL_CLK); + ci->ops->read32(ci->ctx, d11priv2->wrapbase + INFF_IOCTL); + } +} + +char *inff_chip_name(u32 id, u32 rev, char *buf, uint len) +{ + const char *fmt; + + fmt = ((id > 0xa000) || (id < 0x4000)) ? "BCM%d/%u" : "BCM%x/%u"; + snprintf(buf, len, fmt, id, rev); + return buf; +} + +static bool inff_chip_find_coreid(struct inff_chip_priv *ci, u16 coreid) +{ + struct inff_core_priv *core; + + list_for_each_entry(core, &ci->cores, list) { + inff_dbg(TRACE, " core 0x%x:%-2d base 0x%08x wrap 0x%08x\n", + core->pub.id, core->pub.rev, core->pub.base, + core->wrapbase); + if (core->pub.id == coreid) + return true; + } + + return false; +} + +static struct inff_core *inff_chip_add_core(struct inff_chip_priv *ci, + u16 coreid, u32 base, + u32 wrapbase) +{ + struct inff_core_priv *core; + + core = kzalloc(sizeof(*core), GFP_KERNEL); + if (!core) + return ERR_PTR(-ENOMEM); + + core->pub.id = coreid; + core->pub.base = base; + core->chip = ci; + core->wrapbase = wrapbase; + + list_add_tail(&core->list, &ci->cores); + return &core->pub; +} + +/* safety check for chipinfo */ +static int inff_chip_cores_check(struct inff_chip_priv *ci) +{ + struct inff_core_priv *core; + bool need_socram = false; + bool has_socram = false; + bool cpu_found = false; + int idx = 1; + + list_for_each_entry(core, &ci->cores, list) { + inff_dbg(INFO, " [%-2d] core 0x%x:%-3d base 0x%08x wrap 0x%08x\n", + idx++, core->pub.id, core->pub.rev, core->pub.base, + core->wrapbase); + + switch (core->pub.id) { + case INF_CORE_ARM_CM3: + cpu_found = true; + need_socram = true; + break; + case INF_CORE_INTERNAL_MEM: + has_socram = true; + break; + case INF_CORE_ARM_CR4: + cpu_found = true; + break; + default: + break; + } + } + + if (!cpu_found) { + inff_err("CPU core not detected\n"); + return -ENXIO; + } + /* check RAM core presence for ARM CM3 core */ + if (need_socram && !has_socram) { + inff_err("RAM core not provided with ARM CM3 core\n"); + return -ENODEV; + } + return 0; +} + +static u32 inff_chip_core_read32(struct inff_core_priv *core, u16 reg) +{ + return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg); +} + +static void inff_chip_core_write32(struct inff_core_priv *core, + u16 reg, u32 val) +{ + core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val); +} + +static bool inff_chip_socram_banksize(struct inff_core_priv *core, u8 idx, + u32 *banksize) +{ + u32 bankinfo; + u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); + + bankidx |= idx; + inff_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx); + bankinfo = inff_chip_core_read32(core, SOCRAMREGOFFS(bankinfo)); + *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1; + *banksize *= SOCRAM_BANKINFO_SZBASE; + return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK); +} + +static void inff_chip_socram_ramsize(struct inff_core_priv *sr, u32 *ramsize, + u32 *srsize) +{ + u32 coreinfo; + uint nb, banksize; + bool retent; + int i; + + *ramsize = 0; + *srsize = 0; + + /* chip has its own specific pre-assigned ramsize. */ + if (sr->chip->pub.chip_spec.hw_chip_ramsize) { + *ramsize = sr->chip->pub.chip_spec.hw_chip_ramsize; + return; + } + + if (WARN_ON(sr->pub.rev < 4)) + return; + + if (!inff_chip_iscoreup(&sr->pub)) + inff_chip_resetcore(&sr->pub, 0, 0, 0); + + /* Get info for determining size */ + coreinfo = inff_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo)); + + /* length of SRAM Banks increased for corerev greater than 23 */ + nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) >> SRCI_SRNB_SHIFT; + for (i = 0; i < nb; i++) { + retent = inff_chip_socram_banksize(sr, i, &banksize); + *ramsize += banksize; + if (retent) + *srsize += banksize; + } +} + +/** Return the SYS MEM size */ +static u32 inff_chip_sysmem_ramsize(struct inff_core_priv *sysmem) +{ + u32 memsize = 0; + u32 coreinfo; + u32 idx; + u32 nb; + u32 banksize; + + if (!inff_chip_iscoreup(&sysmem->pub)) + inff_chip_resetcore(&sysmem->pub, 0, 0, 0); + + coreinfo = inff_chip_core_read32(sysmem, SYSMEMREGOFFS(coreinfo)); + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + + for (idx = 0; idx < nb; idx++) { + inff_chip_socram_banksize(sysmem, idx, &banksize); + memsize += banksize; + } + + return memsize; +} + +/** Return the TCM-RAM size of the ARMCR4 core. */ +static u32 inff_chip_tcm_ramsize(struct inff_core_priv *cr4) +{ + u32 corecap; + u32 memsize = 0; + u32 nab; + u32 nbb; + u32 totb; + u32 bxinfo; + u32 blksize; + u32 idx; + + corecap = inff_chip_core_read32(cr4, ARMCR4_CAP); + + nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; + nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; + totb = nab + nbb; + + for (idx = 0; idx < totb; idx++) { + inff_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); + bxinfo = inff_chip_core_read32(cr4, ARMCR4_BANKINFO); + blksize = ARMCR4_BSZ_MULT; + if (bxinfo & ARMCR4_BLK_1K_MASK) + blksize >>= 3; + + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * blksize; + } + + return memsize; +} + +static u32 inff_chip_tcm_rambase(struct inff_chip_priv *ci) +{ + if (ci) { + if (ci->pub.chip_spec.hw_chip_rambase) + return ci->pub.chip_spec.hw_chip_rambase; + + inff_err("chip: %s hasn't assigned rambase.\n", ci->pub.name); + } + return INVALID_RAMBASE; +} + +int inff_chip_get_raminfo(struct inff_chip *pub) +{ + struct inff_chip_priv *ci = container_of(pub, struct inff_chip_priv, + pub); + struct inff_core_priv *mem_core; + struct inff_core *mem; + + mem = inff_chip_get_core(&ci->pub, INF_CORE_ARM_CR4); + if (mem) { + mem_core = container_of(mem, struct inff_core_priv, pub); + ci->pub.ramsize = inff_chip_tcm_ramsize(mem_core); + + ci->pub.ramsize -= (pub->chip_spec.hw_chip_tcam_size + + pub->chip_spec.hw_chip_trxhdr_size); + + ci->pub.rambase = inff_chip_tcm_rambase(ci); + if (ci->pub.rambase == INVALID_RAMBASE) { + inff_err("RAM base not provided with ARM CR4 core\n"); + return -EINVAL; + } + } else { + mem = inff_chip_get_core(&ci->pub, INF_CORE_SYS_MEM); + if (mem) { + mem_core = container_of(mem, struct inff_core_priv, + pub); + ci->pub.ramsize = inff_chip_sysmem_ramsize(mem_core); + ci->pub.rambase = inff_chip_tcm_rambase(ci); + if (ci->pub.rambase == INVALID_RAMBASE) { + inff_err("RAM base not provided with ARM CA7 core\n"); + return -EINVAL; + } + } else { + mem = inff_chip_get_core(&ci->pub, + INF_CORE_INTERNAL_MEM); + if (!mem) { + inff_err("No memory cores found\n"); + return -ENOMEM; + } + mem_core = container_of(mem, struct inff_core_priv, + pub); + inff_chip_socram_ramsize(mem_core, &ci->pub.ramsize, + &ci->pub.srsize); + } + } + inff_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", + ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, + ci->pub.srsize, ci->pub.srsize); + + if (!ci->pub.ramsize) { + inff_err("RAM size is undetermined\n"); + return -ENOMEM; + } + + if (ci->pub.ramsize > INFF_CHIP_MAX_MEMSIZE) { + inff_err("RAM size is incorrect\n"); + return -ENOMEM; + } + + return 0; +} + +static u32 inff_chip_dmp_get_desc(struct inff_chip_priv *ci, u32 *eromaddr, + u8 *type) +{ + u32 val; + + /* read next descriptor */ + val = ci->ops->read32(ci->ctx, *eromaddr); + *eromaddr += 4; + + if (!type) + return val; + + /* determine descriptor type */ + *type = (val & DMP_DESC_TYPE_MSK); + if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS) + *type = DMP_DESC_ADDRESS; + + return val; +} + +static int inff_chip_dmp_get_regaddr(struct inff_chip_priv *ci, u32 *eromaddr, + u32 *regbase, u32 *wrapbase) +{ + u8 desc; + u32 val, szdesc; + u8 stype, sztype, wraptype; + + *regbase = 0; + *wrapbase = 0; + + val = inff_chip_dmp_get_desc(ci, eromaddr, &desc); + if (desc == DMP_DESC_MASTER_PORT) { + wraptype = DMP_SLAVE_TYPE_MWRAP; + } else if (desc == DMP_DESC_ADDRESS) { + /* revert erom address */ + *eromaddr -= 4; + wraptype = DMP_SLAVE_TYPE_SWRAP; + } else { + *eromaddr -= 4; + return -EILSEQ; + } + + do { + /* locate address descriptor */ + do { + val = inff_chip_dmp_get_desc(ci, eromaddr, &desc); + /* unexpected table end */ + if (desc == DMP_DESC_EOT) { + *eromaddr -= 4; + return -EFAULT; + } + } while (desc != DMP_DESC_ADDRESS && + desc != DMP_DESC_COMPONENT); + + /* stop if we crossed current component border */ + if (desc == DMP_DESC_COMPONENT) { + *eromaddr -= 4; + return 0; + } + + /* skip upper 32-bit address descriptor */ + if (val & DMP_DESC_ADDRSIZE_GT32) + inff_chip_dmp_get_desc(ci, eromaddr, NULL); + + sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S; + + /* next size descriptor can be skipped */ + if (sztype == DMP_SLAVE_SIZE_DESC) { + szdesc = inff_chip_dmp_get_desc(ci, eromaddr, NULL); + /* skip upper size descriptor if present */ + if (szdesc & DMP_DESC_ADDRSIZE_GT32) + inff_chip_dmp_get_desc(ci, eromaddr, NULL); + } + + /* look for 4K or 8K register regions */ + if (sztype != DMP_SLAVE_SIZE_4K && + sztype != DMP_SLAVE_SIZE_8K) + continue; + + stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S; + + /* only regular slave and wrapper */ + if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE) + *regbase = val & DMP_SLAVE_ADDR_BASE; + if (*wrapbase == 0 && stype == wraptype) + *wrapbase = val & DMP_SLAVE_ADDR_BASE; + } while (*regbase == 0 || *wrapbase == 0); + + return 0; +} + +static +int inff_chip_dmp_erom_scan(struct inff_chip_priv *ci) +{ + struct inff_core *core; + u32 eromaddr; + u8 desc_type = 0; + u32 val; + u16 id; + u8 nmw, nsw, rev; + u32 base, wrap; + int err; + + if (ci->pub.ccsec) + eromaddr = ci->pub.ccsec->erombase; + else + eromaddr = ci->ops->read32(ci->ctx, + CORE_CC_REG(ci->pub.enum_base, eromptr)); + + while (desc_type != DMP_DESC_EOT) { + val = inff_chip_dmp_get_desc(ci, &eromaddr, &desc_type); + if (!(val & DMP_DESC_VALID)) + continue; + + if (desc_type == DMP_DESC_EMPTY) + continue; + + /* need a component descriptor */ + if (desc_type != DMP_DESC_COMPONENT) + continue; + + id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S; + + /* next descriptor must be component as well */ + val = inff_chip_dmp_get_desc(ci, &eromaddr, &desc_type); + if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT)) + return -EFAULT; + + /* only look at cores with master port(s) */ + nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S; + nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S; + rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; + + /* need core with ports */ + if (nmw + nsw == 0 && + id != INF_CORE_PMU && + id != INF_CORE_GCI && + id != INF_CORE_SR) + continue; + + /* try to obtain register address info */ + err = inff_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap); + if (err) + continue; + + /* finally a core to be added */ + core = inff_chip_add_core(ci, id, base, wrap); + if (IS_ERR(core)) + return PTR_ERR(core); + + core->rev = rev; + } + + return 0; +} + +u32 inff_chip_enum_base(u16 devid) +{ + return INFF_SI_ENUM_BASE_DEFAULT; +} + +static void inff_blhs_init(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + + chip = container_of(pub, struct inff_chip_priv, pub); + addr = pub->blhs->h2d; + inff_dbg(TRACE, + "h2d value before initing: 0x%08x (addr 0x%08x)\n", + pub->blhs->read(chip->ctx, addr), + addr); + pub->blhs->write(chip->ctx, addr, 0); +} + +static int inff_blhs_is_bootloader_ready(struct inff_chip_priv *chip) +{ + u32 regdata; + u32 addr; + + addr = chip->pub.blhs->d2h; + SPINWAIT_MS((chip->pub.blhs->read(chip->ctx, addr) & + INFF_BLHS_D2H_READY) == 0, + INFF_BLHS_D2H_READY_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + + regdata = chip->pub.blhs->read(chip->ctx, addr); + if (!(regdata & INFF_BLHS_D2H_READY)) { + inff_err("Timeout waiting for bootloader ready, waittime %d ms addr 0x%x\n", + INFF_BLHS_D2H_READY_TIMEOUT, + addr); + return -EPERM; + } + + inff_dbg(TRACE, "bootloader is ready\n"); + return 0; +} + +static int inff_blhs_pre_nvram_download(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + int err; + + /* Host indication for bootloader to start the init */ + inff_blhs_init(pub); + + chip = container_of(pub, struct inff_chip_priv, pub); + err = inff_blhs_is_bootloader_ready(chip); + if (err) + return err; + + /* Host notification about NVRAM download start */ + addr = pub->blhs->h2d; + pub->blhs->write(chip->ctx, addr, INFF_BLHS_H2D_DL_NVRAM_START); + + return 0; +} + +static int inff_blhs_ack_wait_dongle_access(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + int err = 0; + + chip = container_of(pub, struct inff_chip_priv, pub); + + addr = pub->blhs->d2h; + SPINWAIT_MS((pub->blhs->read(chip->ctx, addr) & + INFF_BLHS_D2H_BP_CLK_DISABLE_REQ) == 0, + INFF_BLHS_D2H_BP_CLK_DISABLE_REQ_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + + addr = pub->blhs->h2d; + pub->blhs->write(chip->ctx, addr, INFF_BLHS_H2D_BP_CLK_DISABLE_ACK); + + if (chip->ops->get_intr_pend) + err = chip->ops->get_intr_pend(chip->ctx); + + return err; +} + +static int inff_blhs_prep_fw_download(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + int err; + + chip = container_of(pub, struct inff_chip_priv, pub); + /* Host indication for bootloader to start the init */ + if (!(pub->chip == INF_CC_43022_CHIP_ID)) { + inff_blhs_init(pub); + + chip = container_of(pub, struct inff_chip_priv, pub); + err = inff_blhs_is_bootloader_ready(chip); + if (err) + return err; + } + /* Host notification about FW download start */ + addr = pub->blhs->h2d; + pub->blhs->write(chip->ctx, addr, INFF_BLHS_H2D_DL_FW_START); + + return 0; +} + +static int inff_blhs_post_fw_download(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + u32 regdata; + int err = 0; + + chip = container_of(pub, struct inff_chip_priv, pub); + addr = pub->blhs->h2d; + pub->blhs->write(chip->ctx, addr, INFF_BLHS_H2D_DL_FW_DONE); + + if (pub->chip == INF_CC_43022_CHIP_ID) { + err = inff_blhs_ack_wait_dongle_access(pub); + } else { + addr = pub->blhs->d2h; + SPINWAIT_MS((pub->blhs->read(chip->ctx, addr) & + INFF_BLHS_D2H_TRXHDR_PARSE_DONE) == 0, + INFF_BLHS_D2H_TRXHDR_PARSE_DONE_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + + regdata = pub->blhs->read(chip->ctx, addr); + if (!(regdata & INFF_BLHS_D2H_TRXHDR_PARSE_DONE)) { + inff_err("TRX header parsing failed\n"); + + /* Host indication for bootloader to get reset on error */ + addr = pub->blhs->h2d; + regdata = pub->blhs->read(chip->ctx, addr); + regdata |= INFF_BLHS_H2D_BL_RESET_ON_ERROR; + pub->blhs->write(chip->ctx, addr, regdata); + + return -EPERM; + } + } + + return err; +} + +static int inff_blhs_post_nvram_download(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + u32 regdata; + + chip = container_of(pub, struct inff_chip_priv, pub); + addr = pub->blhs->h2d; + if (pub->chip == INF_CC_43022_CHIP_ID) { + regdata = pub->blhs->read(chip->ctx, addr); + regdata |= INFF_BLHS_H2D_DL_NVRAM_DONE; + pub->blhs->write(chip->ctx, addr, regdata); + + addr = pub->blhs->d2h; + SPINWAIT_MS((pub->blhs->read(chip->ctx, addr) & + INFF_BLHS_D2H_MV_NVRAM_DONE) == 0, + INFF_BLHS_D2H_MV_NVRAM_DONE_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + /* check if the NVRAM move has been done */ + regdata = pub->blhs->read(chip->ctx, addr); + if ((regdata & INFF_BLHS_D2H_MV_NVRAM_DONE)) { + inff_dbg(INFO, + "NVRAM moved to the end of the RAM. regdata 0x%08x\n", + regdata); + } else { + /* Timeout waiting for the NVRAM to be moved to the end of the RAM. */ + inff_err("Timeout: %dms for INFF_BLHS_D2H_MV_NVRAM_DONE regdata 0x%08x\n", + INFF_BLHS_D2H_MV_NVRAM_DONE_TIMEOUT, regdata); + return -EPERM; + } + } else { + regdata = pub->blhs->read(chip->ctx, addr); + regdata |= INFF_BLHS_H2D_DL_NVRAM_DONE; + pub->blhs->write(chip->ctx, addr, regdata); + } + return 0; +} + +static int inff_blhs_chk_validation(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + u32 regdata; + + chip = container_of(pub, struct inff_chip_priv, pub); + addr = pub->blhs->d2h; + SPINWAIT_MS((pub->blhs->read(chip->ctx, addr) & + INFF_BLHS_D2H_VALDN_DONE) == 0, + INFF_BLHS_D2H_VALDN_DONE_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + + regdata = pub->blhs->read(chip->ctx, addr); + if (!(regdata & INFF_BLHS_D2H_VALDN_DONE) || + !(regdata & INFF_BLHS_D2H_VALDN_RESULT)) { + inff_err("TRX image validation check failed, timeout %d\n", + INFF_BLHS_D2H_VALDN_DONE_TIMEOUT); + + /* Host notification for bootloader to get reset on error */ + addr = pub->blhs->h2d; + regdata = pub->blhs->read(chip->ctx, addr); + regdata |= INFF_BLHS_H2D_BL_RESET_ON_ERROR; + pub->blhs->write(chip->ctx, addr, regdata); + + return -EPERM; + } + + inff_dbg(INFO, "TRX Image validation check completed successfully\n"); + return 0; +} + +static int inff_blhs_post_watchdog_reset(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + int err; + + /* Host indication for bootloader to start the init */ + inff_blhs_init(pub); + + chip = container_of(pub, struct inff_chip_priv, pub); + err = inff_blhs_is_bootloader_ready(chip); + + return err; +} + +static int inff_chip_recognition(struct inff_chip_priv *ci) +{ + u32 regdata; + int ret; + const u32 READ_FAILED = 0xFFFFFFFF; + + /* Get CC core rev + * Chipid is in bus core if CC space is protected or + * it is assume to be at offset 0 from SI_ENUM_BASE + * For different chiptypes or old sdio hosts w/o chipcommon, + * other ways of recognition should be added here. + */ + if (ci->pub.ccsec) + regdata = ci->pub.ccsec->chipid; + else + regdata = ci->ops->read32(ci->ctx, + CORE_CC_REG(ci->pub.enum_base, chipid)); + if (regdata == READ_FAILED) { + inff_err("MMIO read failed: 0x%08x\n", regdata); + return -ENODEV; + } + + ci->pub.chip = regdata & CID_ID_MASK; + ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT; + ci->pub.socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT; + + inff_chip_name(ci->pub.chip, ci->pub.chiprev, + ci->pub.name, sizeof(ci->pub.name)); + inff_dbg(INFO, "found %s chip: %s\n", + ci->pub.socitype == SOCI_AI ? "AXI" : "N/A", ci->pub.name); + + switch (ci->pub.chip) { + case INF_CC_5557X_CHIP_ID: + inff_chip_5557x_init(&ci->pub); + break; + case INF_CC_5551X_CHIP_ID: + inff_chip_5551x_init(&ci->pub); + break; + case INF_CC_43022_CHIP_ID: + inff_chip_43022_init(&ci->pub); + break; + case INF_CC_5591X_CHIP_ID: + inff_chip_5591x_init(&ci->pub); + break; + default: + inff_err("chip %u is not supported\n", + ci->pub.chip); + return -ENODEV; + } + + switch (ci->pub.socitype) { + case SOCI_AI: + ci->iscoreup = inff_chip_ai_iscoreup; + ci->coredisable = inff_chip_ai_coredisable; + ci->resetcore = inff_chip_ai_resetcore; + break; + case SOCI_CP: + break; + default: + inff_err("chip backplane type %u is not supported\n", + ci->pub.socitype); + return -ENODEV; + } + + inff_chip_dmp_erom_scan(ci); + + ret = inff_chip_cores_check(ci); + if (ret) + return ret; + + /* assure chip is passive for core access */ + inff_chip_set_passive(&ci->pub); + + /* Call bus specific reset function now. Cores have been determined + * but further access may require a chip specific reset at this point. + */ + if (ci->ops->reset) { + ci->ops->reset(ci->ctx, &ci->pub); + inff_chip_set_passive(&ci->pub); + } + + return inff_chip_get_raminfo(&ci->pub); +} + +static int inff_chip_setup(struct inff_chip_priv *chip) +{ + struct inff_chip *pub; + struct inff_core_priv *cc; + struct inff_core *pmu; + u32 base; + u32 val; + int ret = 0; + + pub = &chip->pub; + cc = list_first_entry(&chip->cores, struct inff_core_priv, list); + base = cc->pub.base; + + /* get chipcommon capabilities */ + pub->cc_caps = chip->ops->read32(chip->ctx, + CORE_CC_REG(base, capabilities)); + pub->cc_caps_ext = chip->ops->read32(chip->ctx, + CORE_CC_REG(base, + capabilities_ext)); + + /* get pmu caps & rev */ + pmu = inff_chip_get_pmu(pub); /* after reading cc_caps_ext */ + if (pub->cc_caps & CC_CAP_PMU) { + val = chip->ops->read32(chip->ctx, + CORE_CC_REG(pmu->base, pmucapabilities)); + pub->pmurev = val & PCAP_REV_MASK; + pub->pmucaps = val; + } + + inff_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n", + cc->pub.rev, pub->pmurev, pub->pmucaps); + + /* execute bus core specific setup */ + if (chip->ops->setup) + ret = chip->ops->setup(chip->ctx, pub); + + return ret; +} + +struct inff_chip *inff_chip_attach(void *ctx, u16 devid, + const struct inff_buscore_ops *ops) +{ + struct inff_chip_priv *chip; + struct inff_blhs *blhs; + struct inff_ccsec *ccsec; + int err = 0; + + if (WARN_ON(!ops->read32)) + err = -EINVAL; + if (WARN_ON(!ops->write32)) + err = -EINVAL; + if (WARN_ON(!ops->prepare)) + err = -EINVAL; + if (WARN_ON(!ops->activate)) + err = -EINVAL; + if (err < 0) + return ERR_PTR(-EINVAL); + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&chip->cores); + chip->num_cores = 0; + chip->ops = ops; + chip->ctx = ctx; + chip->pub.enum_base = inff_chip_enum_base(devid); + + err = ops->prepare(ctx); + if (err < 0) + goto fail; + + blhs = NULL; + ccsec = NULL; + if (chip->ops->sec_attach) { + err = chip->ops->sec_attach(chip->ctx, &blhs, &ccsec, + INFF_BLHS_D2H_READY, + INFF_BLHS_D2H_READY_TIMEOUT, + INFF_BLHS_POLL_INTERVAL); + if (err < 0) + goto fail; + + chip->pub.blhs = blhs; + chip->pub.ccsec = ccsec; + if (blhs) { + blhs->init = inff_blhs_init; + blhs->pre_nvramdl = inff_blhs_pre_nvram_download; + blhs->prep_fwdl = inff_blhs_prep_fw_download; + blhs->post_fwdl = inff_blhs_post_fw_download; + blhs->post_nvramdl = inff_blhs_post_nvram_download; + blhs->chk_validation = inff_blhs_chk_validation; + blhs->post_wdreset = inff_blhs_post_watchdog_reset; + } else { + err = -EPERM; + inff_err("chip must support blhs!\n"); + goto fail; + } + } else { + err = -EPERM; + inff_err("Dongle didn't support Secure Attach!\n"); + goto fail; + } + + err = inff_chip_recognition(chip); + if (err < 0) + goto fail; + + err = inff_chip_setup(chip); + if (err < 0) + goto fail; + + return &chip->pub; + +fail: + inff_chip_detach(&chip->pub); + return ERR_PTR(err); +} + +void inff_chip_detach(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + struct inff_core_priv *core; + struct inff_core_priv *tmp; + + chip = container_of(pub, struct inff_chip_priv, pub); + list_for_each_entry_safe(core, tmp, &chip->cores, list) { + list_del(&core->list); + kfree(core); + } + + kfree(pub->blhs); + kfree(pub->ccsec); + kfree(chip); +} + +struct inff_core *inff_chip_get_d11core(struct inff_chip *pub, u8 unit) +{ + struct inff_chip_priv *chip; + struct inff_core_priv *core; + + chip = container_of(pub, struct inff_chip_priv, pub); + list_for_each_entry(core, &chip->cores, list) { + if (core->pub.id == INF_CORE_80211) { + if (unit-- == 0) + return &core->pub; + } + } + return NULL; +} + +struct inff_core *inff_chip_get_core(struct inff_chip *pub, u16 coreid) +{ + struct inff_chip_priv *chip; + struct inff_core_priv *core; + + chip = container_of(pub, struct inff_chip_priv, pub); + list_for_each_entry(core, &chip->cores, list) + if (core->pub.id == coreid) + return &core->pub; + + return NULL; +} + +struct inff_core *inff_chip_get_chipcommon(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + struct inff_core_priv *cc; + + chip = container_of(pub, struct inff_chip_priv, pub); + cc = list_first_entry(&chip->cores, struct inff_core_priv, list); + if (WARN_ON(!cc || cc->pub.id != INF_CORE_CHIPCOMMON)) + return inff_chip_get_core(pub, INF_CORE_CHIPCOMMON); + return &cc->pub; +} + +struct inff_core *inff_chip_get_pmu(struct inff_chip *pub) +{ + struct inff_core *cc = inff_chip_get_chipcommon(pub); + struct inff_core *pmu; + + /* See if there is separated PMU core available */ + if (cc->rev >= 35 && + pub->cc_caps_ext & INF_CC_CAP_EXT_AOB_PRESENT) { + pmu = inff_chip_get_core(pub, INF_CORE_PMU); + if (pmu) + return pmu; + } + + /* Fallback to ChipCommon core for older hardware */ + return cc; +} + +static struct inff_core *inff_chip_get_gci(struct inff_chip *pub) +{ + struct inff_core *gci; + + gci = inff_chip_get_core(pub, INF_CORE_GCI); + return gci; +} + +bool inff_chip_iscoreup(struct inff_core *pub) +{ + struct inff_core_priv *core; + + core = container_of(pub, struct inff_core_priv, pub); + return core->chip->iscoreup(core); +} + +void inff_chip_coredisable(struct inff_core *pub, u32 prereset, u32 reset) +{ + struct inff_core_priv *core; + + core = container_of(pub, struct inff_core_priv, pub); + core->chip->coredisable(core, prereset, reset); +} + +void inff_chip_resetcore(struct inff_core *pub, u32 prereset, u32 reset, + u32 postreset) +{ + struct inff_core_priv *core; + + core = container_of(pub, struct inff_core_priv, pub); + core->chip->resetcore(core, prereset, reset, postreset); +} + +static bool inff_chip_cm3_set_active(struct inff_chip_priv *chip) +{ + struct inff_core *core; + + core = inff_chip_get_core(&chip->pub, INF_CORE_INTERNAL_MEM); + if (!inff_chip_iscoreup(core)) { + inff_err("SOCRAM core is down after reset?\n"); + return false; + } + + chip->ops->activate(chip->ctx, &chip->pub, 0); + + core = inff_chip_get_core(&chip->pub, INF_CORE_ARM_CM3); + inff_chip_resetcore(core, 0, 0, 0); + + return true; +} + +static inline void +inff_chip_cr4_set_passive(struct inff_chip_priv *chip) +{ + int i; + struct inff_core *core; + + /* Disable the cores only and let the firmware enable them. + * Releasing reset ourselves breaks BCM4387 in weird ways. + */ + for (i = 0; (core = inff_chip_get_d11core(&chip->pub, i)); i++) + inff_chip_coredisable(core, D11_INFF_IOCTL_PHYRESET | + D11_INFF_IOCTL_PHYCLOCKEN, + D11_INFF_IOCTL_PHYCLOCKEN); +} + +static bool inff_chip_cr4_set_active(struct inff_chip_priv *chip, u32 rstvec) +{ + struct inff_core *core; + + chip->ops->activate(chip->ctx, &chip->pub, rstvec); + + /* restore ARM */ + core = inff_chip_get_core(&chip->pub, INF_CORE_ARM_CR4); + inff_chip_resetcore(core, ARMCR4_INFF_IOCTL_CPUHALT, 0, 0); + + return true; +} + +void inff_chip_set_passive(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + struct inff_core *arm; + + inff_dbg(TRACE, "Enter\n"); + + chip = container_of(pub, struct inff_chip_priv, pub); + arm = inff_chip_get_core(pub, INF_CORE_ARM_CR4); + if (arm) { + inff_chip_cr4_set_passive(chip); + return; + } +} + +bool inff_chip_set_active(struct inff_chip *pub, u32 rstvec) +{ + struct inff_chip_priv *chip; + struct inff_core *arm; + + inff_dbg(TRACE, "Enter\n"); + + chip = container_of(pub, struct inff_chip_priv, pub); + arm = inff_chip_get_core(pub, INF_CORE_ARM_CR4); + if (arm) + return inff_chip_cr4_set_active(chip, rstvec); + arm = inff_chip_get_core(pub, INF_CORE_ARM_CM3); + if (arm) + return inff_chip_cm3_set_active(chip); + + return false; +} + +bool inff_chip_sr_capable(struct inff_chip *pub) +{ + u32 base, addr, reg; + struct inff_chip_priv *chip; + struct inff_core *pmu = inff_chip_get_pmu(pub); + + inff_dbg(TRACE, "Enter\n"); + + /* old chips with PMU version less than 17 don't support save restore */ + if (pub->pmurev < 17) + return false; + + base = inff_chip_get_chipcommon(pub)->base; + chip = container_of(pub, struct inff_chip_priv, pub); + + switch (pub->chip) { + case INF_CC_43022_CHIP_ID: + addr = CORE_CC_REG(pmu->base, retention_ctl); + reg = chip->ops->read32(chip->ctx, addr); + return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | + PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; + case INF_CC_5551X_CHIP_ID: + case INF_CC_5557X_CHIP_ID: + return inff_chip_find_coreid(chip, INF_CORE_SR); + default: + inff_err("unsupported chip:0x%x\n", pub->chip); + } + return false; +} + +void inff_chip_reset_pmu_regs(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 addr; + u32 base; + + inff_dbg(TRACE, "Enter\n"); + + chip = container_of(pub, struct inff_chip_priv, pub); + base = inff_chip_get_pmu(pub)->base; + + switch (pub->chip) { + case INF_CC_43022_CHIP_ID: + /* SW scratch */ + addr = CORE_CC_REG(base, swscratch); + chip->ops->write32(chip->ctx, addr, 0); + + /* PMU status */ + addr = CORE_CC_REG(base, pmustatus); + chip->ops->write32(chip->ctx, addr, + INF43022_PMU_STATUS_MASK); + + /* PMU control ext */ + addr = CORE_CC_REG(base, pmucontrol_ext); + chip->ops->write32(chip->ctx, addr, + INF43022_PMU_CONTROL_EXT_MASK); + break; + + default: + inff_err("Unsupported chip id\n"); + break; + } +} + +void inff_chip_ulp_reset_lhl_regs(struct inff_chip *pub) +{ + struct inff_chip_priv *chip; + u32 base; + u32 addr; + + inff_dbg(TRACE, "Enter\n"); + + chip = container_of(pub, struct inff_chip_priv, pub); + base = inff_chip_get_gci(pub)->base; + + /* LHL Top Level Power Sequence Control */ + addr = CORE_GCI_REG(base, lhl_top_pwrseq_ctl_adr); + chip->ops->write32(chip->ctx, addr, 0); + + /* GPIO Interrupt Enable0 */ + addr = CORE_GCI_REG(base, gpio_int_en_port_adr[0]); + chip->ops->write32(chip->ctx, addr, 0); + + /* GPIO Interrupt Status0 */ + addr = CORE_GCI_REG(base, gpio_int_st_port_adr[0]); + chip->ops->write32(chip->ctx, addr, ~0); + + /* WL ARM Timer0 Interrupt Mask */ + addr = CORE_GCI_REG(base, lhl_wl_armtim0_intrp_adr); + chip->ops->write32(chip->ctx, addr, 0); + + /* WL ARM Timer0 Interrupt Status */ + addr = CORE_GCI_REG(base, lhl_wl_armtim0_st_adr); + chip->ops->write32(chip->ctx, addr, ~0); + + /* WL ARM Timer */ + addr = CORE_GCI_REG(base, lhl_wl_armtim0_adr); + chip->ops->write32(chip->ctx, addr, 0); + + /* WL MAC Timer0 Interrupt Mask */ + addr = CORE_GCI_REG(base, lhl_wl_mactim0_intrp_adr); + chip->ops->write32(chip->ctx, addr, 0); + + /* WL MAC Timer0 Interrupt Status */ + addr = CORE_GCI_REG(base, lhl_wl_mactim0_st_adr); + chip->ops->write32(chip->ctx, addr, ~0); + + /* WL MAC TimerInt0 */ + addr = CORE_GCI_REG(base, lhl_wl_mactim_int0_adr); + chip->ops->write32(chip->ctx, addr, 0x0); +} + +void inff_chip_reset_watchdog(struct inff_chip *pub) +{ + inff_dbg(TRACE, "Enter\n"); + + /*FIXME: redundent function already.*/ + + switch (pub->chip) { + case INF_CC_43022_CHIP_ID: + inff_dbg(INFO, "DO NOTHING FOR 43022 here... can't access PMU registers\n"); + break; + default: + break; + } +} diff --git a/drivers/net/wireless/infineon/inffmac/chip.h b/drivers/net/wireless/infineon/inffmac/chip.h new file mode 100644 index 000000000000..fe825a8eb05b --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_CHIP_H +#define INFF_CHIP_H + +#include +#include +#include +#include "firmware.h" + +#define INFF_SI_ENUM_BASE_DEFAULT 0x18000000 + +#define CORE_CC_REG(base, field) \ + ((base) + offsetof(struct chipcregs, field)) + +#define CORE_GCI_REG(base, field) \ + ((base) + offsetof(struct chipgciregs, field)) + +#define INFF_IOCTL 0x0408 /* IO control */ +#define INFF_IOCTL_CLK 0x0001 +#define INFF_IOCTL_FGC 0x0002 +#define INFF_RESET_CTL 0x0800 +#define INFF_RESET_CTL_RESET 0x0001 + +struct inff_blhs; + +#define INFF_CHIP_COMMON_HW_CAPS_REPLAYCNTS 16 + +/* SOC Interconnect types (aka chip types) */ +#define SOCI_AI 1 +#define SOCI_CP 5 + +enum inff_fw_file_types { + INFF_FW_CODE = 0, + INFF_FW_NVRAM, + INFF_FW_CLM, + INFF_FW_BIN_MAX_TYPE, +}; + +struct inff_fw_dataset { + enum inff_fw_type type; + struct inff_fw_name fwnames; +}; + +struct inff_chip_specific { + u32 hw_caps_replaycnts; + u32 hw_reg_pmu_status_msk; + u32 hw_reg_pmu_ctrl_ext_msk; + u32 hw_chip_ramsize; + u32 hw_chip_rambase; + u32 hw_chip_tcam_size; + u32 hw_chip_trxhdr_size; + + struct inff_fw_dataset fwdata[INFF_FW_BIN_MAX_TYPE]; + char fw_name[INFF_FW_NAME_LEN]; + char nvram_name[INFF_FW_NAME_LEN]; + char clm_name[INFF_FW_NAME_LEN]; + const struct firmware *clm_fw; + + /* TODO: firmware download flow ops. */ +}; + +struct inff_chip_mlo_priv { + void (*get_eht_cap)(struct ieee80211_supported_band *band, + struct ieee80211_sband_iftype_data *data); +}; + +/** + * struct inff_chip - chip level information. + * + * @chip: chip identifier. + * @chiprev: chip revision. + * @enum_base: base address of core enumeration space. + * @cc_caps: chipcommon core capabilities. + * @cc_caps_ext: chipcommon core extended capabilities. + * @pmucaps: PMU capabilities. + * @pmurev: PMU revision. + * @rambase: RAM base address (only applicable for ARM CR4 chips). + * @ramsize: amount of RAM on chip including retention. + * @srsize: amount of retention RAM on chip. + * @socitype: SOC interconnect type. + * @name: string representation of the chip identifier. + * @blhs: bootlooder handshake handle. + */ +struct inff_chip { + u32 chip; + u32 chiprev; + u32 enum_base; + u32 cc_caps; + u32 cc_caps_ext; + u32 pmucaps; + u32 pmurev; + u32 rambase; + u32 ramsize; + u32 srsize; + u32 socitype; + char name[12]; + struct inff_blhs *blhs; + struct inff_ccsec *ccsec; + struct inff_chip_specific chip_spec; + struct inff_chip_mlo_priv chip_mlo_priv; +}; + +/** + * struct inff_core - core related information. + * + * @id: core identifier. + * @rev: core revision. + * @base: base address of core register space. + */ +struct inff_core { + u16 id; + u16 rev; + u32 base; +}; + +struct inff_core_priv { + struct inff_core pub; + u32 wrapbase; + struct list_head list; + struct inff_chip_priv *chip; +}; + +struct inff_chip_priv { + struct inff_chip pub; + const struct inff_buscore_ops *ops; + void *ctx; + /* assured first core is chipcommon, second core is buscore */ + struct list_head cores; + u16 num_cores; + + bool (*iscoreup)(struct inff_core_priv *core); + void (*coredisable)(struct inff_core_priv *core, u32 prereset, + u32 reset); + void (*resetcore)(struct inff_core_priv *core, u32 prereset, u32 reset, + u32 postreset); +}; + +/** + * struct inff_buscore_ops - buscore specific callbacks. + * + * @read32: read 32-bit value over bus. + * @write32: write 32-bit value over bus. + * @prepare: prepare bus for core configuration. + * @setup: bus-specific core setup. + * @active: chip becomes active. + * The callback should use the provided @rstvec when non-zero. + * @blhs_attach: attach bootloader handshake handle + */ +struct inff_buscore_ops { + u32 (*read32)(void *ctx, u32 addr); + void (*write32)(void *ctx, u32 addr, u32 value); + int (*prepare)(void *ctx); + int (*reset)(void *ctx, struct inff_chip *chip); + int (*setup)(void *ctx, struct inff_chip *chip); + void (*activate)(void *ctx, struct inff_chip *chip, u32 rstvec); + int (*sec_attach)(void *ctx, struct inff_blhs **blhs, struct inff_ccsec **ccsec, + u32 flag, uint timeout, uint interval); + int (*get_intr_pend)(void *ctx); +}; + +/** + * struct inff_blhs - bootloader handshake handle related information. + * + * @d2h: offset of dongle to host register for the handshake. + * @h2d: offset of host to dongle register for the handshake. + * @init: bootloader handshake initialization. + * @prep_fwdl: handshake before firmware download. + * @post_fwdl: handshake after firmware download. + * @post_nvramdl: handshake after nvram download. + * @chk_validation: handshake for firmware validation check. + * @post_wdreset: handshake after watchdog reset. + * @read: read value with register offset for the handshake. + * @write: write value with register offset for the handshake. + */ +struct inff_blhs { + u32 d2h; + u32 h2d; + void (*init)(struct inff_chip *pub); + int (*pre_nvramdl)(struct inff_chip *pub); + int (*prep_fwdl)(struct inff_chip *pub); + int (*post_fwdl)(struct inff_chip *pub); + int (*post_nvramdl)(struct inff_chip *pub); + int (*chk_validation)(struct inff_chip *pub); + int (*post_wdreset)(struct inff_chip *pub); + u32 (*read)(void *ctx, u32 addr); + void (*write)(void *ctx, u32 addr, u32 value); +}; + +struct inff_ccsec { + u32 bus_corebase; + u32 erombase; + u32 chipid; +}; + +int inff_chip_get_raminfo(struct inff_chip *pub); +struct inff_chip *inff_chip_attach(void *ctx, u16 devid, + const struct inff_buscore_ops *ops); +void inff_chip_detach(struct inff_chip *chip); +struct inff_core *inff_chip_get_core(struct inff_chip *chip, u16 coreid); +struct inff_core *inff_chip_get_d11core(struct inff_chip *pub, u8 unit); +struct inff_core *inff_chip_get_chipcommon(struct inff_chip *chip); +struct inff_core *inff_chip_get_pmu(struct inff_chip *pub); +bool inff_chip_iscoreup(struct inff_core *core); +void inff_chip_coredisable(struct inff_core *core, u32 prereset, u32 reset); +void inff_chip_resetcore(struct inff_core *core, u32 prereset, u32 reset, + u32 postreset); +void inff_chip_set_passive(struct inff_chip *ci); +bool inff_chip_set_active(struct inff_chip *ci, u32 rstvec); +bool inff_chip_sr_capable(struct inff_chip *pub); +char *inff_chip_name(u32 chipid, u32 chiprev, char *buf, uint len); +u32 inff_chip_enum_base(u16 devid); +void inff_chip_reset_watchdog(struct inff_chip *pub); +void inff_chip_ulp_reset_lhl_regs(struct inff_chip *pub); +void inff_chip_reset_pmu_regs(struct inff_chip *pub); + +#endif /* INFF_CHIP_H */ -- 2.25.1 The Chip family specific initialization for Infineon's CYW43022 chipset. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/chip_43022.c | 30 +++++++++++++++++++ .../wireless/infineon/inffmac/chip_43022.h | 29 ++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chip_43022.c create mode 100644 drivers/net/wireless/infineon/inffmac/chip_43022.h diff --git a/drivers/net/wireless/infineon/inffmac/chip_43022.c b/drivers/net/wireless/infineon/inffmac/chip_43022.c new file mode 100644 index 000000000000..a03328d1e75c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_43022.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip_43022.h" + +void inff_chip_43022_init(struct inff_chip *chip) +{ + struct inff_chip_specific *chip_spec = &chip->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + chip_spec->hw_caps_replaycnts = INF43022_HW_CAPS_REPLAYCNTS; + chip_spec->hw_reg_pmu_status_msk = INF43022_PMU_STATUS_MASK; + chip_spec->hw_reg_pmu_ctrl_ext_msk = INF43022_PMU_CONTROL_EXT_MASK; + chip_spec->hw_chip_ramsize = INF43022_CHIP_RAMSIZE; + + fw_data[INFF_FW_CODE].fwnames.extension = ".trxs"; + fw_data[INFF_FW_CODE].fwnames.path = chip_spec->fw_name; + fw_data[INFF_FW_CODE].type = INFF_FW_TYPE_TRXS; + + fw_data[INFF_FW_NVRAM].fwnames.extension = ".txt"; + fw_data[INFF_FW_NVRAM].fwnames.path = chip_spec->nvram_name; + fw_data[INFF_FW_NVRAM].type = INFF_FW_TYPE_NVRAM; + + fw_data[INFF_FW_CLM].fwnames.extension = ".clm_blob"; + fw_data[INFF_FW_CLM].fwnames.path = chip_spec->clm_name; + fw_data[INFF_FW_CLM].type = INFF_FW_TYPE_CLM; +} diff --git a/drivers/net/wireless/infineon/inffmac/chip_43022.h b/drivers/net/wireless/infineon/inffmac/chip_43022.h new file mode 100644 index 000000000000..00715980653f --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_43022.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip.h" + +/* PMU STATUS mask for 43022 */ +#define INF43022_PMU_STATUS_MASK 0x1AC + +/* PMU CONTROL EXT mask for 43002 */ +#define INF43022_PMU_CONTROL_EXT_MASK 0x11 + +/* Minimum PMU resource mask for 43022 */ +#define INF43022_PMU_MIN_RES_MASK 0xF8BFE77 + +#define INF43022_CHIP_RAMSIZE 0xA0000 + +/* chip specific settings */ +#define INF43022_HW_CAPS_REPLAYCNTS 4 + +/* INF43022 watermark expressed in number of words */ +#define INF43022_F2_WATERMARK 0x60 +#define INF43022_MES_WATERMARK 0x50 +#define INF43022_MESBUSYCTRL (INF43022_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) + +void inff_chip_43022_init(struct inff_chip *chip); -- 2.25.1 The Chip family specific initialization for Infineon's CYW5557x chipsets which has varying spatial stream (1x1/2x2) and bandwidth (20/40/80 MHz) capabilities. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/chip_5557x.c | 31 +++++++++++++++++++ .../wireless/infineon/inffmac/chip_5557x.h | 24 ++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5557x.c create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5557x.h diff --git a/drivers/net/wireless/infineon/inffmac/chip_5557x.c b/drivers/net/wireless/infineon/inffmac/chip_5557x.c new file mode 100644 index 000000000000..b6c9edf31c40 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5557x.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip_5557x.h" + +void inff_chip_5557x_init(struct inff_chip *chip) +{ + struct inff_chip_specific *chip_spec = &chip->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + chip_spec->hw_caps_replaycnts = INFF_CHIP_COMMON_HW_CAPS_REPLAYCNTS; + + chip_spec->hw_chip_rambase = INF55572_RAM_BASE; + chip_spec->hw_chip_tcam_size = INF55572_TCAM_SIZE; + chip_spec->hw_chip_trxhdr_size = INF55572_TRXHDR_SIZE; + + fw_data[INFF_FW_CODE].fwnames.extension = ".trxse"; + fw_data[INFF_FW_CODE].fwnames.path = chip_spec->fw_name; + fw_data[INFF_FW_CODE].type = INFF_FW_TYPE_TRXSE; + + fw_data[INFF_FW_NVRAM].fwnames.extension = ".txt"; + fw_data[INFF_FW_NVRAM].fwnames.path = chip_spec->nvram_name; + fw_data[INFF_FW_NVRAM].type = INFF_FW_TYPE_NVRAM; + + fw_data[INFF_FW_CLM].fwnames.extension = ".clm_blob"; + fw_data[INFF_FW_CLM].fwnames.path = chip_spec->clm_name; + fw_data[INFF_FW_CLM].type = INFF_FW_TYPE_CLM; +} diff --git a/drivers/net/wireless/infineon/inffmac/chip_5557x.h b/drivers/net/wireless/infineon/inffmac/chip_5557x.h new file mode 100644 index 000000000000..644e81bd1c9c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5557x.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip.h" + +/* INF55572 dedicated space and RAM base */ +#define INF55572_TCAM_SIZE 0x800 +#define INF55572_TRXHDR_SIZE 0x2b4 +#define INF55572_RAM_BASE (0x370000 + \ + INF55572_TCAM_SIZE + INF55572_TRXHDR_SIZE) + +/* INF55572 SDIO func2 block size */ +#define SDIO_INF55572_FUNC2_BLOCKSIZE 256 + +/* INF55572 watermark expressed in number of words */ +#define INF55572_F2_WATERMARK 0x40 +#define INF55572_MES_WATERMARK 0x40 +#define INF55572_F1_MESBUSYCTRL (INF55572_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) + +void inff_chip_5557x_init(struct inff_chip *chip); -- 2.25.1 The Chip family specific initialization for Infineon's CYW5551x chipsets. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/chip_5551x.c | 37 +++++++++++++++++++ .../wireless/infineon/inffmac/chip_5551x.h | 24 ++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5551x.c create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5551x.h diff --git a/drivers/net/wireless/infineon/inffmac/chip_5551x.c b/drivers/net/wireless/infineon/inffmac/chip_5551x.c new file mode 100644 index 000000000000..38ae4b823b05 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5551x.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip_5551x.h" + +void inff_chip_5551x_init(struct inff_chip *chip) +{ + struct inff_chip_specific *chip_spec = &chip->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + chip_spec->hw_caps_replaycnts = INFF_CHIP_COMMON_HW_CAPS_REPLAYCNTS; + + if (chip->chiprev == 0) { + chip_spec->hw_chip_tcam_size = INF55500_TCAM_SIZE; + chip_spec->hw_chip_trxhdr_size = INF55500_TRXHDR_SIZE; + chip_spec->hw_chip_rambase = INF55500_RAM_BASE; + } else { + chip_spec->hw_chip_tcam_size = INF55500_A1_TCAM_SIZE; + chip_spec->hw_chip_trxhdr_size = INF55500_A1_TRXHDR_SIZE; + chip_spec->hw_chip_rambase = INF55500_A1_RAM_BASE; + } + + fw_data[INFF_FW_CODE].fwnames.extension = ".trxse"; + fw_data[INFF_FW_CODE].fwnames.path = chip_spec->fw_name; + fw_data[INFF_FW_CODE].type = INFF_FW_TYPE_TRXSE; + + fw_data[INFF_FW_NVRAM].fwnames.extension = ".txt"; + fw_data[INFF_FW_NVRAM].fwnames.path = chip_spec->nvram_name; + fw_data[INFF_FW_NVRAM].type = INFF_FW_TYPE_NVRAM; + + fw_data[INFF_FW_CLM].fwnames.extension = ".clm_blob"; + fw_data[INFF_FW_CLM].fwnames.path = chip_spec->clm_name; + fw_data[INFF_FW_CLM].type = INFF_FW_TYPE_CLM; +} diff --git a/drivers/net/wireless/infineon/inffmac/chip_5551x.h b/drivers/net/wireless/infineon/inffmac/chip_5551x.h new file mode 100644 index 000000000000..abf9d60a0b74 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5551x.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip.h" + +/* 55500-A0, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */ +#define INF55500_RAM_START (0x3a0000) +#define INF55500_TCAM_SIZE (0x800) +#define INF55500_TRXHDR_SIZE (0x2b4) + +#define INF55500_RAM_BASE (INF55500_RAM_START + INF55500_TCAM_SIZE + \ + INF55500_TRXHDR_SIZE) + +/* 55500-A1, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */ +#define INF55500_A1_TCAM_SIZE (0x1000) +#define INF55500_A1_TRXHDR_SIZE (0x20) + +#define INF55500_A1_RAM_BASE (INF55500_RAM_START + INF55500_A1_TCAM_SIZE + \ + INF55500_A1_TRXHDR_SIZE) + +void inff_chip_5551x_init(struct inff_chip *chip); -- 2.25.1 The Chip family specific initialization for Infineon's CYW5591x chipsets. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/chip_5591x.c | 24 ++++++++++++++ .../wireless/infineon/inffmac/chip_5591x.h | 33 +++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5591x.c create mode 100644 drivers/net/wireless/infineon/inffmac/chip_5591x.h diff --git a/drivers/net/wireless/infineon/inffmac/chip_5591x.c b/drivers/net/wireless/infineon/inffmac/chip_5591x.c new file mode 100644 index 000000000000..8901ede943cf --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5591x.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip_5591x.h" + +void inff_chip_5591x_init(struct inff_chip *chip) +{ + struct inff_chip_specific *chip_spec = &chip->chip_spec; + + chip_spec->hw_caps_replaycnts = INFF_CHIP_COMMON_HW_CAPS_REPLAYCNTS; + + if (chip->chiprev == 0) { + chip_spec->hw_chip_tcam_size = INF55900_TCAM_SIZE; + chip_spec->hw_chip_trxhdr_size = INF55900_TRXHDR_SIZE; + chip_spec->hw_chip_rambase = INF55900_RAM_BASE; + } else { + chip_spec->hw_chip_tcam_size = INF55900_A1_TCAM_SIZE; + chip_spec->hw_chip_trxhdr_size = INF55900_A1_TRXHDR_SIZE; + chip_spec->hw_chip_rambase = INF55900_A1_RAM_BASE; + } +} diff --git a/drivers/net/wireless/infineon/inffmac/chip_5591x.h b/drivers/net/wireless/infineon/inffmac/chip_5591x.h new file mode 100644 index 000000000000..c1ca76788004 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chip_5591x.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2024-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include "chip.h" + +/* 55900-A0, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */ +#define INF55900_RAM_START (0x3a0000) +#define INF55900_TCAM_SIZE (0x800) +#define INF55900_TRXHDR_SIZE (0x2b4) + +#define INF55900_RAM_BASE (INF55900_RAM_START + INF55900_TCAM_SIZE + \ + INF55900_TRXHDR_SIZE) + +/* 55900-A1, Dedicated sapce for TCAM_PATCH and TRX HDR area at RAMSTART */ +#define INF55900_A1_TCAM_SIZE (0x1000) +#define INF55900_A1_TRXHDR_SIZE (0x20) + +#define INF55900_A1_RAM_BASE (INF55900_RAM_START + INF55900_A1_TCAM_SIZE + \ + INF55900_A1_TRXHDR_SIZE) + +/* INF55900 SDIO func2 block size */ +#define SDIO_INF55900_FUNC2_BLOCKSIZE 256 + +/* INF55900 watermark expressed in number of words */ +#define INF55900_F2_WATERMARK 0x40 +#define INF55900_MES_WATERMARK 0x40 +#define INF55900_F1_MESBUSYCTRL (INF55900_MES_WATERMARK | \ + SBSDIO_MESBUSYCTRL_ENAB) + +void inff_chip_5591x_init(struct inff_chip *chip); -- 2.25.1 Implements the specific bus logic for Infineon devices connected to the linux machine via a PCIe interface. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/pcie.c | 2909 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/pcie.h | 19 + 2 files changed, 2928 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/pcie.c create mode 100644 drivers/net/wireless/infineon/inffmac/pcie.h diff --git a/drivers/net/wireless/infineon/inffmac/pcie.c b/drivers/net/wireless/infineon/inffmac/pcie.c new file mode 100644 index 000000000000..5ec1bd7e3c52 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pcie.c @@ -0,0 +1,2909 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "chipcommon.h" +#include "utils.h" +#include "hw_ids.h" + +/* Custom inff_err() that takes bus arg and passes it further */ +#define inff_err(bus, fmt, ...) \ + do { \ + if (IS_ENABLED(CONFIG_INF_DEBUG) || \ + IS_ENABLED(CONFIG_INF_TRACING) || \ + net_ratelimit()) \ + __inff_err(bus, __func__, fmt, ##__VA_ARGS__); \ + } while (0) + +#include "debug.h" +#include "bus.h" +#include "commonring.h" +#include "msgbuf.h" +#include "pcie.h" +#include "firmware.h" +#include "chip.h" +#include "core.h" +#include "common.h" +#include "cfg80211.h" +#include "trxhdr.h" + +enum inff_pcie_state { + INFFMAC_PCIE_STATE_DOWN, + INFFMAC_PCIE_STATE_UP +}; + +/* per-board firmware binaries */ +#define INFF_55572_FIRMWARE_BASENAME INFF_FW_DEFAULT_PATH "inffmac55572-pcie" + +MODULE_FIRMWARE(INFF_55572_FIRMWARE_BASENAME ".trxse"); + +static const struct inff_firmware_mapping inff_pcie_fwnames[] = { + INFF_FW_ENTRY(INF_CC_5557X_CHIP_ID, 0xFFFFFFFF, 55572), +}; + +#define INFF_PCIE_READ_SHARED_TIMEOUT 5000 /* msec */ +#define INFF_PCIE_FW_UP_TIMEOUT 5000 /* msec */ + +#define INFF_PCIE_REG_MAP_SIZE (32 * 1024) + +/* backplane address space accessed by BAR0 */ +#define INFF_PCIE_BAR0_WINDOW 0x80 +#define INFF_PCIE_BAR0_REG_SIZE 0x1000 +#define INFF_PCIE_BAR0_WRAPPERBASE 0x70 + +#define INFF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 +#define INFF_PCIE_BAR0_PCIE_ENUM_OFFSET 0x2000 +#define INFF_CYW55572_PCIE_BAR0_PCIE_ENUM_OFFSET 0x3000 + +#define INFF_PCIE_BAR1_WINDOW 0x84 + +#define INFF_PCIE_ARMCR4REG_BANKIDX 0x40 +#define INFF_PCIE_ARMCR4REG_BANKPDA 0x4C + +#define INFF_PCIE_REG_INTSTATUS 0x90 +#define INFF_PCIE_REG_INTMASK 0x94 +#define INFF_PCIE_REG_SBMBX 0x98 + +#define INFF_PCIE_REG_LINK_STATUS_CTRL 0xBC + +#define INFF_PCIE_PCIE2REG_INTMASK 0x24 +#define INFF_PCIE_PCIE2REG_MAILBOXINT 0x48 +#define INFF_PCIE_PCIE2REG_MAILBOXMASK 0x4C +#define INFF_PCIE_PCIE2REG_CONFIGADDR 0x120 +#define INFF_PCIE_PCIE2REG_CONFIGDATA 0x124 +#define INFF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 +#define INFF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 +#define INFF_PCIE_PCIE2REG_DAR_D2H_MSG_0 0xA80 +#define INFF_PCIE_PCIE2REG_DAR_H2D_MSG_0 0xA90 + +#define INFF_PCIE_64_PCIE2REG_INTMASK 0xC14 +#define INFF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30 +#define INFF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34 +#define INFF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20 +#define INFF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24 + +#define INFF_PCIE2_INTA 0x01 +#define INFF_PCIE2_INTB 0x02 + +#define INFF_PCIE_INT_0 0x01 +#define INFF_PCIE_INT_1 0x02 +#define INFF_PCIE_INT_DEF (INFF_PCIE_INT_0 | \ + INFF_PCIE_INT_1) + +#define INFF_PCIE_MB_INT_FN0_0 0x0100 +#define INFF_PCIE_MB_INT_FN0_1 0x0200 +#define INFF_PCIE_MB_INT_D2H0_DB0 0x10000 +#define INFF_PCIE_MB_INT_D2H0_DB1 0x20000 +#define INFF_PCIE_MB_INT_D2H1_DB0 0x40000 +#define INFF_PCIE_MB_INT_D2H1_DB1 0x80000 +#define INFF_PCIE_MB_INT_D2H2_DB0 0x100000 +#define INFF_PCIE_MB_INT_D2H2_DB1 0x200000 +#define INFF_PCIE_MB_INT_D2H3_DB0 0x400000 +#define INFF_PCIE_MB_INT_D2H3_DB1 0x800000 + +#define INFF_PCIE_MB_INT_FN0 (INFF_PCIE_MB_INT_FN0_0 | \ + INFF_PCIE_MB_INT_FN0_1) +#define INFF_PCIE_MB_INT_D2H_DB (INFF_PCIE_MB_INT_D2H0_DB0 | \ + INFF_PCIE_MB_INT_D2H0_DB1 | \ + INFF_PCIE_MB_INT_D2H1_DB0 | \ + INFF_PCIE_MB_INT_D2H1_DB1 | \ + INFF_PCIE_MB_INT_D2H2_DB0 | \ + INFF_PCIE_MB_INT_D2H2_DB1 | \ + INFF_PCIE_MB_INT_D2H3_DB0 | \ + INFF_PCIE_MB_INT_D2H3_DB1) + +#define INFF_PCIE_64_MB_INT_D2H0_DB0 0x1 +#define INFF_PCIE_64_MB_INT_D2H0_DB1 0x2 +#define INFF_PCIE_64_MB_INT_D2H1_DB0 0x4 +#define INFF_PCIE_64_MB_INT_D2H1_DB1 0x8 +#define INFF_PCIE_64_MB_INT_D2H2_DB0 0x10 +#define INFF_PCIE_64_MB_INT_D2H2_DB1 0x20 +#define INFF_PCIE_64_MB_INT_D2H3_DB0 0x40 +#define INFF_PCIE_64_MB_INT_D2H3_DB1 0x80 +#define INFF_PCIE_64_MB_INT_D2H4_DB0 0x100 +#define INFF_PCIE_64_MB_INT_D2H4_DB1 0x200 +#define INFF_PCIE_64_MB_INT_D2H5_DB0 0x400 +#define INFF_PCIE_64_MB_INT_D2H5_DB1 0x800 +#define INFF_PCIE_64_MB_INT_D2H6_DB0 0x1000 +#define INFF_PCIE_64_MB_INT_D2H6_DB1 0x2000 +#define INFF_PCIE_64_MB_INT_D2H7_DB0 0x4000 +#define INFF_PCIE_64_MB_INT_D2H7_DB1 0x8000 + +#define INFF_PCIE_64_MB_INT_D2H_DB (INFF_PCIE_64_MB_INT_D2H0_DB0 | \ + INFF_PCIE_64_MB_INT_D2H0_DB1 | \ + INFF_PCIE_64_MB_INT_D2H1_DB0 | \ + INFF_PCIE_64_MB_INT_D2H1_DB1 | \ + INFF_PCIE_64_MB_INT_D2H2_DB0 | \ + INFF_PCIE_64_MB_INT_D2H2_DB1 | \ + INFF_PCIE_64_MB_INT_D2H3_DB0 | \ + INFF_PCIE_64_MB_INT_D2H3_DB1 | \ + INFF_PCIE_64_MB_INT_D2H4_DB0 | \ + INFF_PCIE_64_MB_INT_D2H4_DB1 | \ + INFF_PCIE_64_MB_INT_D2H5_DB0 | \ + INFF_PCIE_64_MB_INT_D2H5_DB1 | \ + INFF_PCIE_64_MB_INT_D2H6_DB0 | \ + INFF_PCIE_64_MB_INT_D2H6_DB1 | \ + INFF_PCIE_64_MB_INT_D2H7_DB0 | \ + INFF_PCIE_64_MB_INT_D2H7_DB1) + +#define INFF_PCIE_SHARED_VERSION_6 6 +#define INFF_PCIE_SHARED_VERSION_7 7 +#define INFF_PCIE_MIN_SHARED_VERSION 5 +#define INFF_PCIE_MAX_SHARED_VERSION INFF_PCIE_SHARED_VERSION_7 +#define INFF_PCIE_SHARED_VERSION_MASK 0x00FF +#define INFF_PCIE_SHARED_DMA_INDEX 0x10000 +#define INFF_PCIE_SHARED_DMA_2B_IDX 0x100000 +#define INFF_PCIE_SHARED_USE_MAILBOX 0x2000000 +#define INFF_PCIE_SHARED_HOSTRDY_DB1 0x10000000 + +#define INFF_PCIE_FLAGS_HTOD_SPLIT 0x4000 +#define INFF_PCIE_FLAGS_DTOH_SPLIT 0x8000 + +#define INFF_SHARED_MAX_RXBUFPOST_OFFSET 34 +#define INFF_SHARED_RING_BASE_OFFSET 52 +#define INFF_SHARED_RX_DATAOFFSET_OFFSET 36 +#define INFF_SHARED_CONSOLE_ADDR_OFFSET 20 +#define INFF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 +#define INFF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 +#define INFF_SHARED_RING_INFO_ADDR_OFFSET 48 +#define INFF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 +#define INFF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 +#define INFF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 +#define INFF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 +#define INFF_SHARED_HOST_CAP_OFFSET 84 + +#define INFF_RING_H2D_RING_COUNT_OFFSET 0 +#define INFF_RING_D2H_RING_COUNT_OFFSET 1 +#define INFF_RING_H2D_RING_MEM_OFFSET 4 +#define INFF_RING_H2D_RING_STATE_OFFSET 8 + +#define INFF_RING_MEM_BASE_ADDR_OFFSET 8 +#define INFF_RING_MAX_ITEM_OFFSET 4 +#define INFF_RING_LEN_ITEMS_OFFSET 6 +#define INFF_RING_MEM_SZ 16 +#define INFF_RING_STATE_SZ 8 + +#define INFF_DEF_MAX_RXBUFPOST 255 + +#define INFF_HOSTCAP_H2D_ENABLE_HOSTRDY 0x400 +#define INFF_HOSTCAP_DS_NO_OOB_DW 0x1000 + +#define INFF_CONSOLE_BUFADDR_OFFSET 8 +#define INFF_CONSOLE_BUFSIZE_OFFSET 12 +#define INFF_CONSOLE_WRITEIDX_OFFSET 16 + +#define INFF_DMA_D2H_SCRATCH_BUF_LEN 8 +#define INFF_DMA_D2H_RINGUPD_BUF_LEN 1024 + +#define INFF_D2H_DEV_D3_ACK 0x00000001 +#define INFF_D2H_DEV_DS_ENTER_REQ 0x00000002 +#define INFF_D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define INFF_D2H_DEV_FWHALT 0x10000000 + +#define INFF_H2D_HOST_D3_INFORM 0x00000001 +#define INFF_H2D_HOST_DS_ACK 0x00000002 +#define INFF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define INFF_H2D_HOST_D0_INFORM 0x00000010 + +#define INFF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000) + +#define INFF_PCIE_CFGREG_STATUS_CMD 0x4 +#define INFF_PCIE_CFGREG_PM_CSR 0x4C +#define INFF_PCIE_CFGREG_MSI_CAP 0x58 +#define INFF_PCIE_CFGREG_MSI_ADDR_L 0x5C +#define INFF_PCIE_CFGREG_MSI_ADDR_H 0x60 +#define INFF_PCIE_CFGREG_MSI_DATA 0x64 +#define INFF_PCIE_CFGREG_REVID 0x6C +#define INFF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC +#define INFF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC +#define INFF_PCIE_CFGREG_RBAR_CTRL 0x228 +#define INFF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 +#define INFF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 +#define INFF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 +#define INFF_PCIE_CFGREG_REVID_SECURE_MODE BIT(31) +#define INFF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 + +/* Magic number at a magic location to find RAM size */ +#define INFF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */ +#define INFF_RAMSIZE_OFFSET 0x6c + +#define INFF_ENTROPY_SEED_LEN 64u +#define INFF_ENTROPY_NONCE_LEN 16u +#define INFF_ENTROPY_HOST_LEN (INFF_ENTROPY_SEED_LEN + \ + INFF_ENTROPY_NONCE_LEN) +#define INFF_NVRAM_OFFSET_TCM 4u +#define INFF_NVRAM_COMPRS_FACTOR 4u +#define INFF_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu + +struct inff_rand_metadata { + u32 signature; + u32 count; +}; + +struct inff_pcie_console { + u32 base_addr; + u32 buf_addr; + u32 bufsize; + u32 read_idx; + u8 log_str[256]; + u8 log_idx; +}; + +struct inff_pcie_shared_info { + u32 tcm_base_address; + u32 flags; + struct inff_pcie_ringbuf *commonrings[INFF_NROF_COMMON_MSGRINGS]; + struct inff_pcie_ringbuf *flowrings; + u16 max_rxbufpost; + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; + u32 rx_dataoffset; + u32 htod_mb_data_addr; + u32 dtoh_mb_data_addr; + u32 ring_info_addr; + struct inff_pcie_console console; + void *scratch; + dma_addr_t scratch_dmahandle; + void *ringupd; + dma_addr_t ringupd_dmahandle; + u8 version; +}; + +struct inff_pcie_core_info { + u32 base; + u32 wrapbase; +}; + +struct inff_pciedev_info { + enum inff_pcie_state state; + bool in_irq; + struct pci_dev *pdev; + const struct inff_pcie_reginfo *reginfo; + void __iomem *regs; + void __iomem *tcm; + u32 ram_base; + u32 ram_size; + struct inff_chip *ci; + u32 coreid; + struct inff_pcie_shared_info shared; + u8 hostready; + bool use_mailbox; + bool use_d0_inform; + wait_queue_head_t mbdata_resp_wait; + bool mbdata_completed; + bool irq_allocated; + bool wowl_enabled; + u8 dma_idx_sz; + void *idxbuf; + u32 idxbuf_sz; + dma_addr_t idxbuf_dmahandle; + u16 (*read_ptr)(struct inff_pciedev_info *devinfo, u32 mem_offset); + void (*write_ptr)(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value); + struct inff_mp_device *settings; + ulong bar1_size; +#ifdef DEBUG + u32 console_interval; + bool console_active; + struct timer_list timer; +#endif +}; + +struct inff_pcie_ringbuf { + struct inff_commonring commonring; + dma_addr_t dma_handle; + u32 w_idx_addr; + u32 r_idx_addr; + struct inff_pciedev_info *devinfo; + u8 id; +}; + +/** + * struct inff_pcie_dhi_ringinfo - dongle/host interface shared ring info + * + * @ringmem: dongle memory pointer to ring memory location + * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers + * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers + * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers + * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers + * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers + * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers + * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers + * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers + * @max_flowrings: maximum number of tx flow rings supported. + * @max_submissionrings: maximum number of submission rings(h2d) supported. + * @max_completionrings: maximum number of completion rings(d2h) supported. + */ +struct inff_pcie_dhi_ringinfo { + __le32 ringmem; + __le32 h2d_w_idx_ptr; + __le32 h2d_r_idx_ptr; + __le32 d2h_w_idx_ptr; + __le32 d2h_r_idx_ptr; + struct msgbuf_buf_addr h2d_w_idx_hostaddr; + struct msgbuf_buf_addr h2d_r_idx_hostaddr; + struct msgbuf_buf_addr d2h_w_idx_hostaddr; + struct msgbuf_buf_addr d2h_r_idx_hostaddr; + __le16 max_flowrings; + __le16 max_submissionrings; + __le16 max_completionrings; +}; + +struct inff_pcie_reginfo { + u32 intmask; + u32 mailboxint; + u32 mailboxmask; + u32 h2d_mailbox_0; + u32 h2d_mailbox_1; + u32 int_d2h_db; + u32 int_fn0; +}; + +static const struct inff_pcie_reginfo inff_reginfo_default = { + .intmask = INFF_PCIE_PCIE2REG_INTMASK, + .mailboxint = INFF_PCIE_PCIE2REG_MAILBOXINT, + .mailboxmask = INFF_PCIE_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = INFF_PCIE_MB_INT_D2H_DB, + .int_fn0 = INFF_PCIE_MB_INT_FN0, +}; + +static const struct inff_pcie_reginfo inff_reginfo_64 = { + .intmask = INFF_PCIE_64_PCIE2REG_INTMASK, + .mailboxint = INFF_PCIE_64_PCIE2REG_MAILBOXINT, + .mailboxmask = INFF_PCIE_64_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = INFF_PCIE_64_MB_INT_D2H_DB, + .int_fn0 = INFF_PCIE_MB_INT_FN0, +}; + +static void inff_pcie_setup(struct device *dev, int ret, + struct inff_fw_request *fwreq); +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active); +static void inff_pcie_debugfs_create(struct device *dev); +static void inff_pcie_bus_console_init(struct inff_pciedev_info *devinfo); +static void inff_pcie_bus_console_read(struct inff_pciedev_info *devinfo, + bool error); +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active); +static void inff_pcie_debugfs_create(struct device *dev); + +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ +DEFINE_RAW_SPINLOCK(pcie_lock); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + +static u32 +inff_pcie_read_reg32(struct inff_pciedev_info *devinfo, u32 reg_offset) +{ + void __iomem *address = devinfo->regs + reg_offset; + + return ioread32(address); +} + +static void +inff_pcie_write_reg32(struct inff_pciedev_info *devinfo, u32 reg_offset, + u32 value) +{ + void __iomem *address = devinfo->regs + reg_offset; + + iowrite32(value, address); +} + +static u8 +inff_pcie_read_tcm8(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + u8 value; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread8(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread8(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u16 +inff_pcie_read_tcm16(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u16 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread16(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread16(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_tcm16(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + + iowrite16(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite16(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u16 +inff_pcie_read_idx(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + return (*(address)); +} + +static void +inff_pcie_write_idx(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + *(address) = value; +} + +static u32 +inff_pcie_read_tcm32(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u32 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread32(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread32(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_tcm32(struct inff_pciedev_info *devinfo, u32 mem_offset, + u32 value) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + iowrite32(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite32(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u32 +inff_pcie_read_ram32(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + devinfo->ci->rambase + + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u32 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread32(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread32(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_ram32(struct inff_pciedev_info *devinfo, u32 mem_offset, + u32 value) +{ + void __iomem *address = devinfo->tcm + devinfo->ci->rambase + + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + iowrite32(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite32(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_copy_mem_todev(struct inff_pciedev_info *devinfo, u32 mem_offset, + void *srcaddr, u32 len) +{ + struct pci_dev *pdev = devinfo->pdev; +#ifndef CONFIG_INFFMAC_PCIE_BARWIN_SZ + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); +#endif + void __iomem *address = devinfo->tcm + mem_offset; + __le32 *src32; + __le16 *src16; + u8 *src8; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { + if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { + src8 = (u8 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite8(*src8, address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address++; + src8++; + len--; + } + } else { + len = len / 2; + src16 = (__le16 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite16(le16_to_cpu(*src16), address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 2; + src16++; + len--; + } + } + } else { + len = len / 4; + src32 = (__le32 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite32(le32_to_cpu(*src32), address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 4; + src32++; + len--; + } + } +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + pci_write_config_dword(pdev, INFF_PCIE_BAR1_WINDOW, 0x0); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_copy_dev_tomem(struct inff_pciedev_info *devinfo, u32 mem_offset, + void *dstaddr, u32 len) +{ + struct pci_dev *pdev = devinfo->pdev; +#ifndef CONFIG_INFFMAC_PCIE_BARWIN_SZ + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); +#endif + void __iomem *address = devinfo->tcm + mem_offset; + __le32 *dst32; + __le16 *dst16; + u8 *dst8; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) { + if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) { + dst8 = (u8 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst8 = ioread8(address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address++; + dst8++; + len--; + } + } else { + len = len / 2; + dst16 = (__le16 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst16 = cpu_to_le16(ioread16(address)); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 2; + dst16++; + len--; + } + } + } else { + len = len / 4; + dst32 = (__le32 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst32 = cpu_to_le32(ioread32(address)); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 4; + dst32++; + len--; + } + } +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + pci_write_config_dword(pdev, INFF_PCIE_BAR1_WINDOW, 0x0); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +#define WRITECC32(devinfo, reg, value) inff_pcie_write_reg32(devinfo, \ + CHIPCREGOFFS(reg), value) + +static void +inff_pcie_select_core(struct inff_pciedev_info *devinfo, u16 coreid) +{ + const struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + struct inff_core *core; + u32 bar0_win; + + core = inff_chip_get_core(devinfo->ci, coreid); + if (core) { + bar0_win = core->base; + pci_write_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, bar0_win); + if (pci_read_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, + &bar0_win) == 0) { + if (bar0_win != core->base) { + bar0_win = core->base; + pci_write_config_dword(pdev, + INFF_PCIE_BAR0_WINDOW, + bar0_win); + } + } + } else { + inff_err(bus, "Unsupported core selected %x\n", coreid); + } +} + +static void inff_pcie_reset_device(struct inff_pciedev_info *devinfo) +{ + struct inff_core *core; + static const u16 cfg_offset[] = { + INFF_PCIE_CFGREG_STATUS_CMD, + INFF_PCIE_CFGREG_PM_CSR, + INFF_PCIE_CFGREG_MSI_CAP, + INFF_PCIE_CFGREG_MSI_ADDR_L, + INFF_PCIE_CFGREG_MSI_ADDR_H, + INFF_PCIE_CFGREG_MSI_DATA, +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + INFF_PCIE_BAR1_WINDOW, +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + INFF_PCIE_CFGREG_LINK_STATUS_CTRL2, + INFF_PCIE_CFGREG_RBAR_CTRL, + INFF_PCIE_CFGREG_PML1_SUB_CTRL1, + INFF_PCIE_CFGREG_REG_BAR2_CONFIG, + INFF_PCIE_CFGREG_REG_BAR3_CONFIG + }; + u32 i; + u32 val; + u32 lsc; + + if (!devinfo->ci) + return; + + /* Disable ASPM */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + pci_read_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + &lsc); + val = lsc & (~INFF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + val); + + /* Watchdog reset */ + devinfo->ci->blhs->init(devinfo->ci); + inff_pcie_select_core(devinfo, INF_CORE_CHIPCOMMON); + WRITECC32(devinfo, watchdog, 4); + msleep(100); + if (devinfo->ci->blhs->post_wdreset(devinfo->ci)) + return; + + /* Restore ASPM */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + lsc); + + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev <= 13) { + for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { + inff_pcie_write_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGADDR, + cfg_offset[i]); + val = inff_pcie_read_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGDATA); + inff_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", + cfg_offset[i], val); + inff_pcie_write_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGDATA, + val); + } + } +} + +static void inff_pcie_attach(struct inff_pciedev_info *devinfo) +{ + u32 config; + + /* BAR1 window may not be sized properly */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_pcie_write_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); + config = inff_pcie_read_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGDATA); + inff_pcie_write_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGDATA, config); + + device_wakeup_enable(&devinfo->pdev->dev); +} + +static int inff_pcie_bus_readshared(struct inff_pciedev_info *devinfo, + u32 nvram_csm) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + u32 loop_counter; + u32 addr_le; + u32 addr = 0; + + loop_counter = INFF_PCIE_READ_SHARED_TIMEOUT / 50; + while ((addr == 0 || addr == nvram_csm) && (loop_counter)) { + msleep(50); + addr_le = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - 4); + addr = le32_to_cpu(addr_le); + loop_counter--; + } + if (addr == 0 || addr == nvram_csm || addr < devinfo->ci->rambase || + addr >= devinfo->ci->rambase + devinfo->ci->ramsize) { + inff_err(bus, "Invalid shared RAM address 0x%08x\n", addr); + return -ENODEV; + } + devinfo->shared.tcm_base_address = addr; + inff_dbg(PCIE, "Shared RAM addr: 0x%08x\n", addr); + + inff_pcie_bus_console_init(devinfo); + return 0; +} + +static int inff_pcie_enter_download_state(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + int err = 0; + + err = devinfo->ci->blhs->prep_fwdl(devinfo->ci); + if (err) { + inff_err(bus, "FW download preparation failed"); + return err; + } + + if (!inff_pcie_bus_readshared(devinfo, 0)) + inff_pcie_bus_console_read(devinfo, false); + + return err; +} + +static int inff_pcie_exit_download_state(struct inff_pciedev_info *devinfo, + u32 resetintr) +{ + inff_pcie_bus_console_read(devinfo, false); + devinfo->ci->blhs->post_nvramdl(devinfo->ci); + + return 0; +} + +static int +inff_pcie_send_mb_data(struct inff_pciedev_info *devinfo, u32 htod_mb_data) +{ + struct inff_pcie_shared_info *shared; + struct inff_bus *bus; + int err; + struct inff_core *core; + u32 addr; + u32 cur_htod_mb_data; + u32 i; + + shared = &devinfo->shared; + bus = dev_get_drvdata(&devinfo->pdev->dev); + if (shared->version >= INFF_PCIE_SHARED_VERSION_6 && + !devinfo->use_mailbox) { + err = inff_msgbuf_tx_mbdata(bus->drvr, htod_mb_data); + if (err) { + inff_err(bus, "sendimg mbdata failed err=%d\n", err); + return err; + } + } else { + addr = shared->htod_mb_data_addr; + cur_htod_mb_data = inff_pcie_read_tcm32(devinfo, addr); + + if (cur_htod_mb_data != 0) + inff_dbg(PCIE, "MB transaction is already pending 0x%04x\n", + cur_htod_mb_data); + + i = 0; + while (cur_htod_mb_data != 0) { + usleep_range(10000, 10001); + i++; + if (i > 100) + return -EIO; + cur_htod_mb_data = inff_pcie_read_tcm32(devinfo, addr); + } + + inff_pcie_write_tcm32(devinfo, addr, htod_mb_data); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_SBMBX, 1); + + /* Send mailbox interrupt twice as a hardware workaround */ + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev <= 13) + pci_write_config_dword(devinfo->pdev, + INFF_PCIE_REG_SBMBX, 1); + } + return 0; +} + +static u32 inff_pcie_read_mb_data(struct inff_pciedev_info *devinfo) +{ + struct inff_pcie_shared_info *shared; + u32 addr; + u32 dtoh_mb_data; + + shared = &devinfo->shared; + addr = shared->dtoh_mb_data_addr; + dtoh_mb_data = inff_pcie_read_tcm32(devinfo, addr); + inff_pcie_write_tcm32(devinfo, addr, 0); + return dtoh_mb_data; +} + +void inff_pcie_handle_mb_data(struct inff_bus *bus_if, u32 d2h_mb_data) +{ + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(INFO, "D2H_MB_DATA: 0x%04x\n", d2h_mb_data); + + if (d2h_mb_data & INFF_D2H_DEV_DS_ENTER_REQ) { + inff_dbg(INFO, "D2H_MB_DATA: DEEP SLEEP REQ\n"); + inff_pcie_send_mb_data(devinfo, INFF_H2D_HOST_DS_ACK); + inff_dbg(INFO, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); + } + + if (d2h_mb_data & INFF_D2H_DEV_DS_EXIT_NOTE) + inff_dbg(INFO, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); + if (d2h_mb_data & INFF_D2H_DEV_D3_ACK) { + inff_dbg(INFO, "D2H_MB_DATA: D3 ACK\n"); + devinfo->mbdata_completed = true; + wake_up(&devinfo->mbdata_resp_wait); + } + + if (d2h_mb_data & INFF_D2H_DEV_FWHALT) { + inff_dbg(INFO, "D2H_MB_DATA: FW HALT\n"); + inff_fw_crashed(&devinfo->pdev->dev); + } +} + +static void inff_pcie_bus_console_init(struct inff_pciedev_info *devinfo) +{ + struct inff_pcie_shared_info *shared; + struct inff_pcie_console *console; + u32 buf_addr; + u32 addr; + + shared = &devinfo->shared; + console = &shared->console; + addr = shared->tcm_base_address + INFF_SHARED_CONSOLE_ADDR_OFFSET; + console->base_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = console->base_addr + INFF_CONSOLE_BUFADDR_OFFSET; + buf_addr = inff_pcie_read_tcm32(devinfo, addr); + /* reset console index when buffer address is updated */ + if (console->buf_addr != buf_addr) { + console->buf_addr = buf_addr; + console->read_idx = 0; + } + addr = console->base_addr + INFF_CONSOLE_BUFSIZE_OFFSET; + console->bufsize = inff_pcie_read_tcm32(devinfo, addr); + + inff_dbg(FWCON, "Console: base %x, buf %x, size %d\n", + console->base_addr, console->buf_addr, console->bufsize); +} + +/** + * inff_pcie_bus_console_read - reads firmware messages + * + * @devinfo: pointer to the device data structure + * @error: specifies if error has occurred (prints messages unconditionally) + */ +static void inff_pcie_bus_console_read(struct inff_pciedev_info *devinfo, + bool error) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + struct inff_pcie_console *console; + u32 addr; + u8 ch; + u32 newidx; + + if (!error && !INFF_FWCON_ON()) + return; + + console = &devinfo->shared.console; + if (!console->base_addr) + return; + addr = console->base_addr + INFF_CONSOLE_WRITEIDX_OFFSET; + newidx = inff_pcie_read_tcm32(devinfo, addr); + while (newidx != console->read_idx) { + addr = console->buf_addr + console->read_idx; + ch = inff_pcie_read_tcm8(devinfo, addr); + console->read_idx++; + if (console->read_idx == console->bufsize) + console->read_idx = 0; + if (ch == '\r') + continue; + console->log_str[console->log_idx] = ch; + console->log_idx++; + if ((ch != '\n') && + (console->log_idx == (sizeof(console->log_str) - 2))) { + ch = '\n'; + console->log_str[console->log_idx] = ch; + console->log_idx++; + } + if (ch == '\n') { + console->log_str[console->log_idx] = 0; + if (error) + __inff_err(bus, __func__, "CONSOLE: %s", + console->log_str); + else + pr_debug("CONSOLE: %s", console->log_str); + console->log_idx = 0; + } + } +} + +static void inff_pcie_intr_disable(struct inff_pciedev_info *devinfo) +{ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0); +} + +static void inff_pcie_interrupt_disable(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_intr_disable(devinfo); +} + +static void inff_pcie_intr_enable(struct inff_pciedev_info *devinfo) +{ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, + devinfo->reginfo->int_d2h_db | + devinfo->reginfo->int_fn0); +} + +static void inff_pcie_interrupt_enable(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_intr_enable(devinfo); +} + +static void inff_pcie_hostready(struct inff_pciedev_info *devinfo) +{ + if (devinfo->shared.flags & INFF_PCIE_SHARED_HOSTRDY_DB1) + inff_pcie_write_reg32(devinfo, + devinfo->reginfo->h2d_mailbox_1, 1); +} + +static irqreturn_t inff_pcie_quick_check_isr(int irq, void *arg) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)arg; + + if (inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) { + inff_pcie_intr_disable(devinfo); + inff_dbg(PCIE, "Enter\n"); + return IRQ_WAKE_THREAD; + } + return IRQ_NONE; +} + +static irqreturn_t inff_pcie_isr_thread(int irq, void *arg) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)arg; + u32 status; + u32 d2h_mbdata; + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + devinfo->in_irq = true; + status = inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); + inff_dbg(PCIE, "Enter %x\n", status); + if (status) { + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, + status); + if (status & devinfo->reginfo->int_fn0) { + d2h_mbdata = inff_pcie_read_mb_data(devinfo); + inff_pcie_handle_mb_data(bus, d2h_mbdata); + } + if (status & devinfo->reginfo->int_d2h_db) { + if (devinfo->state == INFFMAC_PCIE_STATE_UP) + inff_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); + } + } + inff_pcie_bus_console_read(devinfo, false); + if (devinfo->state == INFFMAC_PCIE_STATE_UP) + inff_pcie_intr_enable(devinfo); + devinfo->in_irq = false; + return IRQ_HANDLED; +} + +static int inff_pcie_request_irq(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + inff_pcie_intr_disable(devinfo); + + inff_dbg(PCIE, "Enter\n"); + + pci_enable_msi(pdev); + if (request_threaded_irq(pdev->irq, inff_pcie_quick_check_isr, + inff_pcie_isr_thread, IRQF_SHARED, + "inff_pcie_intr", devinfo)) { + pci_disable_msi(pdev); + inff_err(bus, "Failed to request IRQ %d\n", pdev->irq); + return -EIO; + } + devinfo->irq_allocated = true; + return 0; +} + +static void inff_pcie_release_irq(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + u32 status; + u32 count; + + if (!devinfo->irq_allocated) + return; + + inff_pcie_intr_disable(devinfo); + free_irq(pdev->irq, devinfo); + pci_disable_msi(pdev); + + msleep(50); + count = 0; + while ((devinfo->in_irq) && (count < 20)) { + msleep(50); + count++; + } + if (devinfo->in_irq) + inff_err(bus, "Still in IRQ (processing) !!!\n"); + + status = inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status); + + devinfo->irq_allocated = false; +} + +static int inff_pcie_ring_mb_write_rptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, + commonring->w_ptr, ring->id); + + devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); + + return 0; +} + +static int inff_pcie_ring_mb_write_wptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, + commonring->r_ptr, ring->id); + + devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); + + return 0; +} + +static int inff_pcie_ring_mb_ring_bell(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "RING !\n"); + /* Any arbitrary value will do, lets use 1 */ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1); + + return 0; +} + +static int inff_pcie_ring_mb_update_rptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr); + + inff_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, + commonring->w_ptr, ring->id); + + return 0; +} + +static int inff_pcie_ring_mb_update_wptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr); + + inff_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, + commonring->r_ptr, ring->id); + + return 0; +} + +static void * +inff_pcie_init_dmabuffer_for_device(struct inff_pciedev_info *devinfo, + u32 size, u32 tcm_dma_phys_addr, + dma_addr_t *dma_handle) +{ + void *ring; + u64 address; + + ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, + GFP_KERNEL); + if (!ring) + return NULL; + + address = (u64)*dma_handle; + inff_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, + address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); + + return ring; +} + +static struct inff_pcie_ringbuf * +inff_pcie_alloc_dma_and_ring(struct inff_pciedev_info *devinfo, u32 ring_id, + u32 tcm_ring_phys_addr) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + void *dma_buf; + dma_addr_t dma_handle; + struct inff_pcie_ringbuf *ring; + u32 size; + u32 addr; + u32 ring_max_item, ring_item_size; + u32 commonring_depth = bus->drvr->settings->commonring_depth[ring_id]; + + if (devinfo->shared.version < INFF_PCIE_SHARED_VERSION_7) + ring_item_size = inff_ring_itemsize_pre_v7[ring_id]; + else + ring_item_size = inff_ring_itemsize[ring_id]; + + if (commonring_depth >= INFF_RING_MAX_ITEM_LOWER_LIMIT && + commonring_depth <= INFF_RING_MAX_ITEM_UPPER_LIMIT) + ring_max_item = commonring_depth; + else + ring_max_item = inff_ring_max_item[ring_id]; + + size = ring_max_item * ring_item_size; + dma_buf = inff_pcie_init_dmabuffer_for_device(devinfo, size, + tcm_ring_phys_addr + + INFF_RING_MEM_BASE_ADDR_OFFSET, + &dma_handle); + if (!dma_buf) + return NULL; + + addr = tcm_ring_phys_addr + INFF_RING_MAX_ITEM_OFFSET; + inff_pcie_write_tcm16(devinfo, addr, ring_max_item); + addr = tcm_ring_phys_addr + INFF_RING_LEN_ITEMS_OFFSET; + inff_pcie_write_tcm16(devinfo, addr, ring_item_size); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, + dma_handle); + return NULL; + } + inff_commonring_config(&ring->commonring, ring_max_item, + ring_item_size, dma_buf); + ring->dma_handle = dma_handle; + ring->devinfo = devinfo; + inff_commonring_register_cb(&ring->commonring, + inff_pcie_ring_mb_ring_bell, + inff_pcie_ring_mb_update_rptr, + inff_pcie_ring_mb_update_wptr, + inff_pcie_ring_mb_write_rptr, + inff_pcie_ring_mb_write_wptr, ring); + + return ring; +} + +static void inff_pcie_release_ringbuffer(struct device *dev, + struct inff_pcie_ringbuf *ring) +{ + void *dma_buf; + u32 size; + + if (!ring) + return; + + dma_buf = ring->commonring.buf_addr; + if (dma_buf) { + size = ring->commonring.depth * ring->commonring.item_len; + dma_free_coherent(dev, size, dma_buf, ring->dma_handle); + } + kfree(ring); +} + +static void inff_pcie_release_ringbuffers(struct inff_pciedev_info *devinfo) +{ + u32 i; + + for (i = 0; i < INFF_NROF_COMMON_MSGRINGS; i++) { + inff_pcie_release_ringbuffer(&devinfo->pdev->dev, + devinfo->shared.commonrings[i]); + devinfo->shared.commonrings[i] = NULL; + } + kfree(devinfo->shared.flowrings); + devinfo->shared.flowrings = NULL; + if (devinfo->idxbuf) { + dma_free_coherent(&devinfo->pdev->dev, + devinfo->idxbuf_sz, + devinfo->idxbuf, + devinfo->idxbuf_dmahandle); + devinfo->idxbuf = NULL; + } +} + +static int inff_pcie_init_ringbuffers(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_pcie_ringbuf *ring; + struct inff_pcie_ringbuf *rings; + u32 d2h_w_idx_ptr; + u32 d2h_r_idx_ptr; + u32 h2d_w_idx_ptr; + u32 h2d_r_idx_ptr; + u32 ring_mem_ptr; + u32 i; + u64 address; + u32 bufsz; + u8 idx_offset; + struct inff_pcie_dhi_ringinfo ringinfo; + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + inff_pcie_copy_dev_tomem(devinfo, devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#else + memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr, + sizeof(ringinfo)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (devinfo->shared.version >= 6) { + max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings); + max_flowrings = le16_to_cpu(ringinfo.max_flowrings); + max_completionrings = le16_to_cpu(ringinfo.max_completionrings); + } else { + max_submissionrings = le16_to_cpu(ringinfo.max_flowrings); + max_flowrings = max_submissionrings - + INFF_NROF_H2D_COMMON_MSGRINGS; + max_completionrings = INFF_NROF_D2H_COMMON_MSGRINGS; + } + if (max_flowrings > 256) { + inff_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); + return -EIO; + } + + if (devinfo->dma_idx_sz != 0) { + bufsz = (max_submissionrings + max_completionrings) * + devinfo->dma_idx_sz * 2; + devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz, + &devinfo->idxbuf_dmahandle, + GFP_KERNEL); + if (!devinfo->idxbuf) + devinfo->dma_idx_sz = 0; + } + + if (devinfo->dma_idx_sz == 0) { + d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr); + d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr); + h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr); + h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr); + idx_offset = sizeof(u32); + devinfo->write_ptr = inff_pcie_write_tcm16; + devinfo->read_ptr = inff_pcie_read_tcm16; + inff_dbg(PCIE, "Using TCM indices\n"); + } else { + memset(devinfo->idxbuf, 0, bufsz); + devinfo->idxbuf_sz = bufsz; + idx_offset = devinfo->dma_idx_sz; + devinfo->write_ptr = inff_pcie_write_idx; + devinfo->read_ptr = inff_pcie_read_idx; + + h2d_w_idx_ptr = 0; + address = (u64)devinfo->idxbuf_dmahandle; + ringinfo.h2d_w_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.h2d_w_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + h2d_r_idx_ptr = h2d_w_idx_ptr + + max_submissionrings * idx_offset; + address += max_submissionrings * idx_offset; + ringinfo.h2d_r_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.h2d_r_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + d2h_w_idx_ptr = h2d_r_idx_ptr + + max_submissionrings * idx_offset; + address += max_submissionrings * idx_offset; + ringinfo.d2h_w_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.d2h_w_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + d2h_r_idx_ptr = d2h_w_idx_ptr + + max_completionrings * idx_offset; + address += max_completionrings * idx_offset; + ringinfo.d2h_r_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.d2h_r_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + inff_pcie_copy_mem_todev(devinfo, + devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#else + memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + inff_dbg(PCIE, "Using host memory indices\n"); + } + + ring_mem_ptr = le32_to_cpu(ringinfo.ringmem); + + for (i = 0; i < INFF_NROF_H2D_COMMON_MSGRINGS; i++) { + ring = inff_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); + if (!ring) + goto fail; + ring->w_idx_addr = h2d_w_idx_ptr; + ring->r_idx_addr = h2d_r_idx_ptr; + ring->id = i; + devinfo->shared.commonrings[i] = ring; + + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; + ring_mem_ptr += INFF_RING_MEM_SZ; + } + + for (i = INFF_NROF_H2D_COMMON_MSGRINGS; + i < INFF_NROF_COMMON_MSGRINGS; i++) { + ring = inff_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); + if (!ring) + goto fail; + ring->w_idx_addr = d2h_w_idx_ptr; + ring->r_idx_addr = d2h_r_idx_ptr; + ring->id = i; + devinfo->shared.commonrings[i] = ring; + + d2h_w_idx_ptr += idx_offset; + d2h_r_idx_ptr += idx_offset; + ring_mem_ptr += INFF_RING_MEM_SZ; + } + + devinfo->shared.max_flowrings = max_flowrings; + devinfo->shared.max_submissionrings = max_submissionrings; + devinfo->shared.max_completionrings = max_completionrings; + rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL); + if (!rings) + goto fail; + + inff_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings); + + for (i = 0; i < max_flowrings; i++) { + ring = &rings[i]; + ring->devinfo = devinfo; + ring->id = i + INFF_H2D_MSGRING_FLOWRING_IDSTART; + inff_commonring_register_cb(&ring->commonring, + inff_pcie_ring_mb_ring_bell, + inff_pcie_ring_mb_update_rptr, + inff_pcie_ring_mb_update_wptr, + inff_pcie_ring_mb_write_rptr, + inff_pcie_ring_mb_write_wptr, + ring); + ring->w_idx_addr = h2d_w_idx_ptr; + ring->r_idx_addr = h2d_r_idx_ptr; + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; + } + devinfo->shared.flowrings = rings; + + return 0; + +fail: + inff_err(bus, "Allocating ring buffers failed\n"); + inff_pcie_release_ringbuffers(devinfo); + return -ENOMEM; +} + +static void +inff_pcie_release_scratchbuffers(struct inff_pciedev_info *devinfo) +{ + if (devinfo->shared.scratch) + dma_free_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_SCRATCH_BUF_LEN, + devinfo->shared.scratch, + devinfo->shared.scratch_dmahandle); + if (devinfo->shared.ringupd) + dma_free_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_RINGUPD_BUF_LEN, + devinfo->shared.ringupd, + devinfo->shared.ringupd_dmahandle); +} + +static int inff_pcie_init_scratchbuffers(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + u64 address; + u32 addr; + + devinfo->shared.scratch = + dma_alloc_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_SCRATCH_BUF_LEN, + &devinfo->shared.scratch_dmahandle, + GFP_KERNEL); + if (!devinfo->shared.scratch) + goto fail; + + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_SCRATCH_ADDR_OFFSET; + address = (u64)devinfo->shared.scratch_dmahandle; + inff_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_SCRATCH_LEN_OFFSET; + inff_pcie_write_tcm32(devinfo, addr, INFF_DMA_D2H_SCRATCH_BUF_LEN); + + devinfo->shared.ringupd = + dma_alloc_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_RINGUPD_BUF_LEN, + &devinfo->shared.ringupd_dmahandle, + GFP_KERNEL); + if (!devinfo->shared.ringupd) + goto fail; + + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_RINGUPD_ADDR_OFFSET; + address = (u64)devinfo->shared.ringupd_dmahandle; + inff_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_RINGUPD_LEN_OFFSET; + inff_pcie_write_tcm32(devinfo, addr, INFF_DMA_D2H_RINGUPD_BUF_LEN); + return 0; + +fail: + inff_err(bus, "Allocating scratch buffers failed\n"); + inff_pcie_release_scratchbuffers(devinfo); + return -ENOMEM; +} + +static void inff_pcie_down(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_fwcon_timer(devinfo, false); +} + +static int inff_pcie_preinit(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + + inff_dbg(PCIE, "Enter\n"); + + inff_pcie_intr_enable(buspub->devinfo); + inff_pcie_hostready(buspub->devinfo); + + return 0; +} + +static int inff_pcie_tx(struct device *dev, struct sk_buff *skb) +{ + return 0; +} + +static int inff_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, + uint len) +{ + return 0; +} + +static int inff_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, + uint len) +{ + return 0; +} + +static void inff_pcie_wowl_config(struct device *dev, bool enabled) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); + devinfo->wowl_enabled = enabled; +} + +static size_t inff_pcie_get_ramsize(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + return devinfo->ci->ramsize - devinfo->ci->srsize; +} + +static int inff_pcie_get_memdump(struct device *dev, void *data, size_t len) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len); + inff_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len); + return 0; +} + +static int inff_pcie_get_blob(struct device *dev, const struct firmware **fw, + enum inff_blob_type type) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + struct inff_chip_specific *chip_spec = &devinfo->ci->chip_spec; + + switch (type) { + case INFF_BLOB_CLM: + *fw = chip_spec->clm_fw; + chip_spec->clm_fw = NULL; + break; + default: + return -ENOENT; + } + + if (!*fw) + return -ENOENT; + + return 0; +} + +static int inff_pcie_reset(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + struct inff_fw_request *fwreq; + int err; + + inff_pcie_intr_disable(devinfo); + + inff_pcie_bus_console_read(devinfo, true); + + inff_detach(dev); + + inff_pcie_release_irq(devinfo); + inff_pcie_release_scratchbuffers(devinfo); + inff_pcie_release_ringbuffers(devinfo); + inff_pcie_reset_device(devinfo); + + fwreq = inff_prepare_fw_request(devinfo->settings->firmware_path, + devinfo->ci, inff_pcie_fwnames, + ARRAY_SIZE(inff_pcie_fwnames), + devinfo->settings->board_type); + if (!fwreq) { + dev_err(dev, "Failed to prepare FW request\n"); + return -ENOMEM; + } + + /* NVRAM reserves PCI domain 0 for SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; + fwreq->bus_nr = devinfo->pdev->bus->number; + + err = inff_fw_get_firmwares(dev, fwreq, inff_pcie_setup); + if (err) { + dev_err(dev, "Failed to prepare FW request\n"); + kfree(fwreq); + } + + return err; +} + +static const struct inff_bus_ops inff_pcie_bus_ops = { + .preinit = inff_pcie_preinit, + .txdata = inff_pcie_tx, + .stop = inff_pcie_down, + .txctl = inff_pcie_tx_ctlpkt, + .rxctl = inff_pcie_rx_ctlpkt, + .wowl_config = inff_pcie_wowl_config, + .get_ramsize = inff_pcie_get_ramsize, + .get_memdump = inff_pcie_get_memdump, + .get_blob = inff_pcie_get_blob, + .reset = inff_pcie_reset, + .debugfs_create = inff_pcie_debugfs_create, + .interrupt_enable = inff_pcie_interrupt_enable, + .interrupt_disable = inff_pcie_interrupt_disable, +}; + +static void +inff_pcie_adjust_ramsize(struct inff_pciedev_info *devinfo, u8 *data, + u32 data_len) +{ + __le32 *field; + u32 newsize; + + if (data_len < INFF_RAMSIZE_OFFSET + 8) + return; + + field = (__le32 *)&data[INFF_RAMSIZE_OFFSET]; + if (le32_to_cpup(field) != INFF_RAMSIZE_MAGIC) + return; + field++; + newsize = le32_to_cpup(field); + + inff_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n", + newsize); + devinfo->ci->ramsize = newsize; +} + +static void +inff_pcie_write_rand(struct inff_pciedev_info *devinfo, u32 nvram_csm) +{ + struct inff_rand_metadata rand_data; + u8 rand_buf[INFF_ENTROPY_HOST_LEN]; + u32 count = INFF_ENTROPY_HOST_LEN; + u32 address; + + address = devinfo->ci->rambase + + (devinfo->ci->ramsize - INFF_NVRAM_OFFSET_TCM) - + ((nvram_csm & 0xffff) * INFF_NVRAM_COMPRS_FACTOR) - + sizeof(rand_data); + memset(rand_buf, 0, INFF_ENTROPY_HOST_LEN); + rand_data.signature = cpu_to_le32(INFF_NVRAM_RNG_SIGNATURE); + rand_data.count = cpu_to_le32(count); + inff_pcie_copy_mem_todev(devinfo, address, &rand_data, + sizeof(rand_data)); + address -= count; + get_random_bytes(rand_buf, count); + inff_pcie_copy_mem_todev(devinfo, address, rand_buf, count); +} + +static int +inff_pcie_init_share_ram_info(struct inff_pciedev_info *devinfo, + u32 sharedram_addr) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_pcie_shared_info *shared; + u32 addr; + u32 host_cap; + + shared = &devinfo->shared; + shared->tcm_base_address = sharedram_addr; + + shared->flags = inff_pcie_read_tcm32(devinfo, sharedram_addr); + shared->version = (u8)(shared->flags & INFF_PCIE_SHARED_VERSION_MASK); + inff_dbg(PCIE, "PCIe protocol version %d\n", shared->version); + if (shared->version > INFF_PCIE_MAX_SHARED_VERSION || + shared->version < INFF_PCIE_MIN_SHARED_VERSION) { + inff_err(bus, "Unsupported PCIE version %d\n", + shared->version); + return -EINVAL; + } + + /* check firmware support dma indicies */ + if (shared->flags & INFF_PCIE_SHARED_DMA_INDEX) { + if (shared->flags & INFF_PCIE_SHARED_DMA_2B_IDX) + devinfo->dma_idx_sz = sizeof(u16); + else + devinfo->dma_idx_sz = sizeof(u32); + } + + addr = sharedram_addr + INFF_SHARED_MAX_RXBUFPOST_OFFSET; + shared->max_rxbufpost = inff_pcie_read_tcm16(devinfo, addr); + if (shared->max_rxbufpost == 0) + shared->max_rxbufpost = INFF_DEF_MAX_RXBUFPOST; + + addr = sharedram_addr + INFF_SHARED_RX_DATAOFFSET_OFFSET; + shared->rx_dataoffset = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; + shared->htod_mb_data_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; + shared->dtoh_mb_data_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_RING_INFO_ADDR_OFFSET; + shared->ring_info_addr = inff_pcie_read_tcm32(devinfo, addr); + + if (shared->version >= INFF_PCIE_SHARED_VERSION_6) { + host_cap = shared->version; + + /* Disable OOB Device Wake based DeepSleep State Machine */ + host_cap |= INFF_HOSTCAP_DS_NO_OOB_DW; + + devinfo->hostready = + ((shared->flags & INFF_PCIE_SHARED_HOSTRDY_DB1) + == INFF_PCIE_SHARED_HOSTRDY_DB1); + if (devinfo->hostready) { + inff_dbg(PCIE, "HostReady supported by dongle.\n"); + host_cap |= INFF_HOSTCAP_H2D_ENABLE_HOSTRDY; + } + devinfo->use_mailbox = + ((shared->flags & INFF_PCIE_SHARED_USE_MAILBOX) + == INFF_PCIE_SHARED_USE_MAILBOX); + devinfo->use_d0_inform = false; + addr = sharedram_addr + INFF_SHARED_HOST_CAP_OFFSET; + + inff_pcie_write_tcm32(devinfo, addr, host_cap); + } else { + devinfo->use_d0_inform = true; + } + + inff_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", + shared->max_rxbufpost, shared->rx_dataoffset); + + inff_pcie_bus_console_init(devinfo); + inff_pcie_bus_console_read(devinfo, false); + + return 0; +} + +static int inff_pcie_download_fw_nvram(struct inff_pciedev_info *devinfo, + const struct firmware *fw, void *nvram, + u32 nvram_len) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct trx_header_le *trx = (struct trx_header_le *)fw->data; + u32 fw_size; + u32 sharedram_addr; + u32 sharedram_addr_written; + u32 loop_counter; + int err; + u32 address; + u32 resetintr; + u32 nvram_lenw; + u32 nvram_csm; + struct inff_chip_specific *chip_spec = &devinfo->ci->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + inff_dbg(PCIE, "Halt ARM.\n"); + err = inff_pcie_enter_download_state(devinfo); + if (err) + return err; + + inff_dbg(PCIE, "Download FW %s\n", fw_data[INFF_FW_CODE].fwnames.path); + address = devinfo->ci->rambase; + fw_size = fw->size; + if (trx->magic == cpu_to_le32(TRX_MAGIC)) { + address -= sizeof(struct trx_header_le); + fw_size = le32_to_cpu(trx->len); + } + inff_pcie_copy_mem_todev(devinfo, address, (void *)fw->data, fw_size); + + resetintr = get_unaligned_le32(fw->data); + release_firmware(fw); + + inff_pcie_bus_console_read(devinfo, false); + err = devinfo->ci->blhs->post_fwdl(devinfo->ci); + if (err) { + inff_err(bus, "FW download failed, err=%d\n", err); + return err; + } + + err = devinfo->ci->blhs->chk_validation(devinfo->ci); + if (err) { + inff_err(bus, "FW valication failed, err=%d\n", err); + return err; + } + + if (nvram) { + inff_dbg(PCIE, "Download NVRAM %s\n", fw_data[INFF_FW_NVRAM].fwnames.path); + address = devinfo->ci->rambase + devinfo->ci->ramsize - + nvram_len; + + address -= 4; + inff_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); + + /* Convert nvram_len to words to determine the length token */ + nvram_lenw = nvram_len / 4; + nvram_csm = (~nvram_lenw << 16) | (nvram_lenw & 0x0000FFFF); + inff_fw_nvram_free(nvram); + } else { + nvram_csm = 0; + inff_dbg(PCIE, "No matching NVRAM file found %s\n", + fw_data[INFF_FW_NVRAM].fwnames.path); + } + + if (devinfo->ci->chip == INF_CC_5557X_CHIP_ID) { + /* Write the length token to the last word of RAM address */ + inff_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, + cpu_to_le32(nvram_csm)); + + /* Write random numbers to TCM for randomizing heap address */ + inff_pcie_write_rand(devinfo, nvram_csm); + } + + sharedram_addr_written = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - + 4); + inff_dbg(PCIE, "Bring ARM in running state\n"); + err = inff_pcie_exit_download_state(devinfo, resetintr); + if (err) + return err; + + if (!inff_pcie_bus_readshared(devinfo, nvram_csm)) + inff_pcie_bus_console_read(devinfo, false); + + inff_dbg(PCIE, "Wait for FW init\n"); + sharedram_addr = sharedram_addr_written; + loop_counter = INFF_PCIE_FW_UP_TIMEOUT / 50; + while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { + msleep(50); + sharedram_addr = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - + 4); + loop_counter--; + } + if (sharedram_addr == sharedram_addr_written) { + inff_err(bus, "FW failed to initialize\n"); + return -ENODEV; + } + if (sharedram_addr < devinfo->ci->rambase || + sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) { + inff_err(bus, "Invalid shared RAM address 0x%08x\n", + sharedram_addr); + return -ENODEV; + } + inff_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); + + return inff_pcie_init_share_ram_info(devinfo, sharedram_addr); +} + +static int inff_pcie_get_resource(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + int err; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + err = pci_enable_device(pdev); + if (err) { + inff_err(bus, "pci_enable_device failed err=%d\n", err); + return err; + } + + pci_set_master(pdev); + + /* Bar-0 mapped address */ + bar0_addr = pci_resource_start(pdev, 0); + /* Bar-1 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + if (bar1_size == 0 || bar1_addr == 0) { + inff_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n", + bar1_size, (unsigned long long)bar1_addr); + return -EINVAL; + } + + devinfo->regs = ioremap(bar0_addr, INFF_PCIE_REG_MAP_SIZE); + devinfo->tcm = ioremap(bar1_addr, bar1_size); + devinfo->bar1_size = bar1_size; + + if (!devinfo->regs || !devinfo->tcm) { + inff_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs, + devinfo->tcm); + return -EINVAL; + } + inff_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", + devinfo->regs, (unsigned long long)bar0_addr); + inff_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n", + devinfo->tcm, (unsigned long long)bar1_addr, + (unsigned int)bar1_size); + + return 0; +} + +static void inff_pcie_release_resource(struct inff_pciedev_info *devinfo) +{ + if (devinfo->tcm) + iounmap(devinfo->tcm); + if (devinfo->regs) + iounmap(devinfo->regs); + + pci_disable_device(devinfo->pdev); +} + +static u32 inff_pcie_buscore_blhs_read(void *ctx, u32 reg_offset) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + return inff_pcie_read_reg32(devinfo, reg_offset); +} + +static void inff_pcie_buscore_blhs_write(void *ctx, u32 reg_offset, u32 value) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_pcie_write_reg32(devinfo, reg_offset, value); +} + +static u32 inff_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) +{ + u32 ret_addr; + + ret_addr = addr & (INFF_PCIE_BAR0_REG_SIZE - 1); + addr &= ~(INFF_PCIE_BAR0_REG_SIZE - 1); + pci_write_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, addr); + + return ret_addr; +} + +static u32 inff_pcie_buscore_read32(void *ctx, u32 addr) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + addr = inff_pcie_buscore_prep_addr(devinfo->pdev, addr); + return inff_pcie_read_reg32(devinfo, addr); +} + +static void inff_pcie_buscore_write32(void *ctx, u32 addr, u32 value) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + addr = inff_pcie_buscore_prep_addr(devinfo->pdev, addr); + inff_pcie_write_reg32(devinfo, addr, value); +} + +static int inff_pcie_buscoreprep(void *ctx) +{ + return inff_pcie_get_resource(ctx); +} + +static int inff_pcie_buscore_reset(void *ctx, struct inff_chip *chip) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + struct inff_core *core; + u32 val, reg; + + devinfo->ci = chip; + inff_pcie_reset_device(devinfo); + + /* reginfo is not ready yet */ + core = inff_chip_get_core(chip, INF_CORE_PCIE2); + if (core->rev >= 64) + reg = INFF_PCIE_64_PCIE2REG_MAILBOXINT; + else + reg = INFF_PCIE_PCIE2REG_MAILBOXINT; + + val = inff_pcie_read_reg32(devinfo, reg); + if (val != 0xffffffff) + inff_pcie_write_reg32(devinfo, reg, val); + + return 0; +} + +static void inff_pcie_buscore_activate(void *ctx, struct inff_chip *chip, + u32 rstvec) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_write_tcm32(devinfo, 0, rstvec); +} + +static int +inff_pcie_buscore_sec_attach(void *ctx, struct inff_blhs **blhs, struct inff_ccsec **ccsec, + u32 flag, uint timeout, uint interval) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_blhs *blhsh; + u32 regdata; + u32 pcie_enum; + u32 addr; + + if (devinfo->pdev->vendor != INF_PCIE_VENDOR_ID_CYPRESS) + return 0; + + pci_read_config_dword(devinfo->pdev, INFF_PCIE_CFGREG_REVID, ®data); + if (regdata & INFF_PCIE_CFGREG_REVID_SECURE_MODE) { + blhsh = kzalloc(sizeof(*blhsh), GFP_KERNEL); + if (!blhsh) + return -ENOMEM; + + blhsh->d2h = INFF_PCIE_PCIE2REG_DAR_D2H_MSG_0; + blhsh->h2d = INFF_PCIE_PCIE2REG_DAR_H2D_MSG_0; + blhsh->read = inff_pcie_buscore_blhs_read; + blhsh->write = inff_pcie_buscore_blhs_write; + + /* Host indication for bootloarder to start the init */ + if (devinfo->pdev->device == INF_PCIE_5557X_DEVICE_ID) + pcie_enum = INFF_CYW55572_PCIE_BAR0_PCIE_ENUM_OFFSET; + else + pcie_enum = INFF_PCIE_BAR0_PCIE_ENUM_OFFSET; + + pci_read_config_dword(devinfo->pdev, PCI_BASE_ADDRESS_0, + ®data); + addr = regdata + pcie_enum + blhsh->h2d; + inff_pcie_buscore_write32(ctx, addr, 0); + + addr = regdata + pcie_enum + blhsh->d2h; + SPINWAIT_MS((inff_pcie_buscore_read32(ctx, addr) & flag) == 0, + timeout, interval); + regdata = inff_pcie_buscore_read32(ctx, addr); + if (!(regdata & flag)) { + inff_err(bus, "Timeout waiting for bootloader ready\n"); + kfree(blhsh); + return -EPERM; + } + *blhs = blhsh; + } + + return 0; +} + +static const struct inff_buscore_ops inff_pcie_buscore_ops = { + .prepare = inff_pcie_buscoreprep, + .reset = inff_pcie_buscore_reset, + .activate = inff_pcie_buscore_activate, + .read32 = inff_pcie_buscore_read32, + .write32 = inff_pcie_buscore_write32, + .sec_attach = inff_pcie_buscore_sec_attach, +}; + +#define INFF_PCIE_FW_CODE 0 +#define INFF_PCIE_FW_NVRAM 1 +#define INFF_PCIE_FW_CLM 2 + +static void inff_pcie_setup(struct device *dev, int ret, + struct inff_fw_request *fwreq) +{ + const struct firmware *fw; + void *nvram; + struct inff_bus *bus; + struct inff_pciedev *pcie_bus_dev; + struct inff_pciedev_info *devinfo; + struct inff_commonring **flowrings; + struct inff_chip_specific *chip_spec; + u32 i, nvram_len; + + bus = dev_get_drvdata(dev); + pcie_bus_dev = bus->bus_priv.pcie; + devinfo = pcie_bus_dev->devinfo; + chip_spec = &devinfo->ci->chip_spec; + + /* check firmware loading result */ + if (ret) + goto fail; + + inff_pcie_attach(devinfo); + + fw = fwreq->items[INFF_PCIE_FW_CODE].binary; + nvram = fwreq->items[INFF_PCIE_FW_NVRAM].nv_data.data; + nvram_len = fwreq->items[INFF_PCIE_FW_NVRAM].nv_data.len; + chip_spec->clm_fw = fwreq->items[INFF_PCIE_FW_CLM].binary; + kfree(fwreq); + + ret = inff_chip_get_raminfo(devinfo->ci); + if (ret) { + inff_err(bus, "Failed to get RAM info\n"); + release_firmware(fw); + inff_fw_nvram_free(nvram); + goto fail; + } + + /* Some of the firmwares have the size of the memory of the device + * defined inside the firmware. This is because part of the memory in + * the device is shared and the division is determined by FW. Parse + * the firmware and adjust the chip memory size now. + */ + inff_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size); + + ret = inff_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); + if (ret) { + if (!inff_pcie_bus_readshared(devinfo, 0)) + inff_pcie_bus_console_read(devinfo, true); + goto fail; + } + + devinfo->state = INFFMAC_PCIE_STATE_UP; + + ret = inff_pcie_init_ringbuffers(devinfo); + if (ret) + goto fail; + + ret = inff_pcie_init_scratchbuffers(devinfo); + if (ret) + goto fail; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + ret = inff_pcie_request_irq(devinfo); + if (ret) + goto fail; + + /* hook the commonrings in the bus structure. */ + for (i = 0; i < INFF_NROF_COMMON_MSGRINGS; i++) + bus->msgbuf->commonrings[i] = + &devinfo->shared.commonrings[i]->commonring; + + flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings), + GFP_KERNEL); + if (!flowrings) + goto fail; + + for (i = 0; i < devinfo->shared.max_flowrings; i++) + flowrings[i] = &devinfo->shared.flowrings[i].commonring; + bus->msgbuf->flowrings = flowrings; + + bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; + bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; + bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings; + + init_waitqueue_head(&devinfo->mbdata_resp_wait); + + ret = inff_attach(&devinfo->pdev->dev, true); + if (ret) + goto fail; + + inff_pcie_bus_console_read(devinfo, false); + + inff_pcie_fwcon_timer(devinfo, true); + + return; + +fail: + inff_err(bus, "Dongle setup failed\n"); + inff_pcie_bus_console_read(devinfo, true); + inff_fw_crashed(dev); + device_release_driver(dev); +} + +#ifdef DEBUG +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active) +{ + if (!active) { + if (devinfo->console_active) { + timer_delete_sync(&devinfo->timer); + devinfo->console_active = false; + } + return; + } + + /* don't start the timer */ + if (devinfo->state != INFFMAC_PCIE_STATE_UP || + !devinfo->console_interval || !INFF_FWCON_ON()) + return; + + if (!devinfo->console_active) { + devinfo->timer.expires = jiffies + devinfo->console_interval; + add_timer(&devinfo->timer); + devinfo->console_active = true; + } else { + /* Reschedule the timer */ + mod_timer(&devinfo->timer, jiffies + devinfo->console_interval); + } +} + +static void +inff_pcie_fwcon(struct timer_list *t) +{ + struct inff_pciedev_info *devinfo = timer_container_of(devinfo, t, + timer); + + if (!devinfo->console_active) + return; + + inff_pcie_bus_console_read(devinfo, false); + + /* Reschedule the timer if console interval is not zero */ + mod_timer(&devinfo->timer, jiffies + devinfo->console_interval); +} + +static int inff_pcie_console_interval_get(void *data, u64 *val) +{ + struct inff_pciedev_info *devinfo = data; + + *val = devinfo->console_interval; + + return 0; +} + +static int inff_pcie_console_interval_set(void *data, u64 val) +{ + struct inff_pciedev_info *devinfo = data; + + if (val > MAX_CONSOLE_INTERVAL) + return -EINVAL; + + devinfo->console_interval = val; + + if (!val && devinfo->console_active) + inff_pcie_fwcon_timer(devinfo, false); + else if (val) + inff_pcie_fwcon_timer(devinfo, true); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(inff_pcie_console_interval_fops, + inff_pcie_console_interval_get, + inff_pcie_console_interval_set, + "%llu\n"); + +static void inff_pcie_debugfs_create(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + struct dentry *dentry = inff_debugfs_get_devdir(drvr); + + if (IS_ERR_OR_NULL(dentry)) + return; + + devinfo->console_interval = INFF_CONSOLE; + + debugfs_create_file("console_interval", 0644, dentry, devinfo, + &inff_pcie_console_interval_fops); +} + +#else +void inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active) +{ +} + +static void inff_pcie_debugfs_create(struct device *dev) +{ +} +#endif + +/* Forward declaration for pci_match_id() call */ +static const struct pci_device_id inff_pcie_devid_table[]; + +static int +inff_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct inff_fw_request *fwreq; + struct inff_pciedev_info *devinfo; + struct inff_pciedev *pcie_bus_dev; + struct inff_core *core; + struct inff_bus *bus; + + if (!id) { + id = pci_match_id(inff_pcie_devid_table, pdev); + if (!id) { + pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", + pdev->vendor, pdev->device); + return -ENODEV; + } + } + + inff_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); + + ret = -ENOMEM; + devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); + if (!devinfo) + return ret; + + devinfo->pdev = pdev; + pcie_bus_dev = NULL; + devinfo->ci = inff_chip_attach(devinfo, pdev->device, + &inff_pcie_buscore_ops); + if (IS_ERR(devinfo->ci)) { + ret = PTR_ERR(devinfo->ci); + devinfo->ci = NULL; + goto fail; + } + + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev >= 64) + devinfo->reginfo = &inff_reginfo_64; + else + devinfo->reginfo = &inff_reginfo_default; + + pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); + if (!pcie_bus_dev) { + ret = -ENOMEM; + goto fail; + } + + devinfo->settings = inff_get_module_param(&devinfo->pdev->dev, + INFF_BUSTYPE_PCIE, + devinfo->ci->chip, + devinfo->ci->chiprev); + if (!devinfo->settings) { + ret = -ENOMEM; + goto fail; + } + ret = PTR_ERR_OR_ZERO(devinfo->settings); + if (ret < 0) + goto fail; + + bus = kzalloc(sizeof(*bus), GFP_KERNEL); + if (!bus) { + ret = -ENOMEM; + goto fail; + } + bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); + if (!bus->msgbuf) { + ret = -ENOMEM; + kfree(bus); + goto fail; + } + + /* hook it all together. */ + pcie_bus_dev->devinfo = devinfo; + pcie_bus_dev->bus = bus; + bus->dev = &pdev->dev; + bus->bus_priv.pcie = pcie_bus_dev; + bus->ops = &inff_pcie_bus_ops; + bus->proto_type = INFF_PROTO_MSGBUF; + bus->chip = devinfo->coreid; + bus->chip_pub = devinfo->ci; + bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); + dev_set_drvdata(&pdev->dev, bus); + + ret = inff_alloc(&devinfo->pdev->dev, devinfo->settings); + if (ret) + goto fail_bus; + +#ifdef DEBUG + /* Set up the fwcon timer */ + timer_setup(&devinfo->timer, inff_pcie_fwcon, 0); +#endif + + fwreq = inff_prepare_fw_request(devinfo->settings->firmware_path, + devinfo->ci, inff_pcie_fwnames, + ARRAY_SIZE(inff_pcie_fwnames), + devinfo->settings->board_type); + if (!fwreq) { + ret = -ENOMEM; + goto fail_inff; + } + + /* NVRAM reserves PCI domain 0 for SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; + fwreq->bus_nr = devinfo->pdev->bus->number; + + ret = inff_fw_get_firmwares(bus->dev, fwreq, inff_pcie_setup); + if (ret < 0) { + kfree(fwreq); + goto fail_inff; + } + return 0; + +fail_inff: + inff_free(&devinfo->pdev->dev); +fail_bus: + kfree(bus->msgbuf); + kfree(bus); +fail: + inff_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device); + inff_pcie_release_resource(devinfo); + if (devinfo->ci) + inff_chip_detach(devinfo->ci); + if (devinfo->settings) + inff_release_module_param(devinfo->settings); + kfree(pcie_bus_dev); + kfree(devinfo); + return ret; +} + +static void +inff_pcie_remove(struct pci_dev *pdev) +{ + struct inff_pciedev_info *devinfo; + struct inff_chip_specific *chip_spec; + struct inff_bus *bus; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(&pdev->dev); + if (!bus) + return; + + devinfo = bus->bus_priv.pcie->devinfo; + chip_spec = &devinfo->ci->chip_spec; + + inff_pcie_bus_console_read(devinfo, false); + inff_pcie_fwcon_timer(devinfo, false); + + devinfo->state = INFFMAC_PCIE_STATE_DOWN; + if (devinfo->ci) + inff_pcie_intr_disable(devinfo); + + inff_detach(&pdev->dev); + inff_free(&pdev->dev); + + kfree(bus->bus_priv.pcie); + kfree(bus->msgbuf->flowrings); + kfree(bus->msgbuf); + kfree(bus); + + inff_pcie_release_irq(devinfo); + inff_pcie_release_scratchbuffers(devinfo); + inff_pcie_release_ringbuffers(devinfo); + inff_pcie_reset_device(devinfo); + inff_pcie_release_resource(devinfo); + release_firmware(chip_spec->clm_fw); + + if (devinfo->ci) + inff_chip_detach(devinfo->ci); + if (devinfo->settings) + inff_release_module_param(devinfo->settings); + + kfree(devinfo); + dev_set_drvdata(&pdev->dev, NULL); +} + +#ifdef CONFIG_PM + +static int inff_pcie_pm_enter_D3(struct device *dev) +{ + struct inff_pciedev_info *devinfo; + struct inff_bus *bus; + struct inff_cfg80211_info *config; + int retry = INFF_PM_WAIT_MAXRETRY; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(dev); + devinfo = bus->bus_priv.pcie->devinfo; + config = bus->drvr->config; + + while (retry && + config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) { + usleep_range(10000, 20000); + retry--; + } + if (!retry && config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) + inff_err(bus, "timed out wait for cfg80211 suspended\n"); + + inff_pcie_fwcon_timer(devinfo, false); + inff_bus_change_state(bus, INFF_BUS_DOWN); + + devinfo->mbdata_completed = false; + inff_pcie_send_mb_data(devinfo, INFF_H2D_HOST_D3_INFORM); + + wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, + INFF_PCIE_MBDATA_TIMEOUT); + if (!devinfo->mbdata_completed) { + inff_err(bus, "Timeout on response for entering D3 substate\n"); + inff_bus_change_state(bus, INFF_BUS_UP); + return -EIO; + } + + devinfo->state = INFFMAC_PCIE_STATE_DOWN; + + return 0; +} + +static int inff_pcie_pm_leave_D3(struct device *dev) +{ + struct inff_pciedev_info *devinfo; + struct inff_bus *bus; + struct pci_dev *pdev; + int err; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(dev); + devinfo = bus->bus_priv.pcie->devinfo; + inff_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus); + + /* Check if device is still up and running, if so we are ready */ + if (inff_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) { + inff_dbg(PCIE, "Try to wakeup device....\n"); + if (devinfo->use_d0_inform) { + if (inff_pcie_send_mb_data(devinfo, + INFF_H2D_HOST_D0_INFORM)) + goto cleanup; + } else { + inff_pcie_hostready(devinfo); + } + + inff_dbg(PCIE, "Hot resume, continue....\n"); + devinfo->state = INFFMAC_PCIE_STATE_UP; + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_bus_change_state(bus, INFF_BUS_UP); + inff_pcie_intr_enable(devinfo); + if (devinfo->use_d0_inform) { + inff_dbg(TRACE, "sending inff_pcie_hostready since use_d0_inform=%d\n", + devinfo->use_d0_inform); + inff_pcie_hostready(devinfo); + } + + inff_pcie_fwcon_timer(devinfo, true); + return 0; + } + +cleanup: + inff_chip_detach(devinfo->ci); + devinfo->ci = NULL; + pdev = devinfo->pdev; + inff_pcie_remove(pdev); + + err = inff_pcie_probe(pdev, NULL); + if (err) + __inff_err(NULL, __func__, "probe after resume failed, err=%d\n", err); + + return err; +} + +static const struct dev_pm_ops inff_pciedrvr_pm = { + .suspend = inff_pcie_pm_enter_D3, + .resume = inff_pcie_pm_leave_D3, + .freeze = inff_pcie_pm_enter_D3, + .restore = inff_pcie_pm_leave_D3, +}; + +#endif /* CONFIG_PM */ + +#define INFF_PCIE_DEVICE(dev_id) \ + { \ + INF_PCIE_VENDOR_ID_CYPRESS, dev_id, \ + PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ + 0 \ + } \ + +static const struct pci_device_id inff_pcie_devid_table[] = { + INFF_PCIE_DEVICE(INF_PCIE_5557X_DEVICE_ID), + { /* end: all zeroes */ } +}; + +MODULE_DEVICE_TABLE(pci, inff_pcie_devid_table); + +static struct pci_driver inff_pciedrvr = { + .name = KBUILD_MODNAME, + .id_table = inff_pcie_devid_table, + .probe = inff_pcie_probe, + .remove = inff_pcie_remove, +#ifdef CONFIG_PM + .driver.pm = &inff_pciedrvr_pm, +#endif + .driver.coredump = inff_dev_coredump, +}; + +int inff_pcie_register(void) +{ + inff_dbg(PCIE, "Enter\n"); + return pci_register_driver(&inff_pciedrvr); +} + +void inff_pcie_exit(void) +{ + inff_dbg(PCIE, "Enter\n"); + pci_unregister_driver(&inff_pciedrvr); +} diff --git a/drivers/net/wireless/infineon/inffmac/pcie.h b/drivers/net/wireless/infineon/inffmac/pcie.h new file mode 100644 index 000000000000..f319d9741e04 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pcie.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_PCIE_H +#define INFF_PCIE_H + +struct inff_pciedev { + struct inff_bus *bus; + struct inff_pciedev_info *devinfo; +}; + +void inff_pcie_handle_mb_data(struct inff_bus *bus_if, u32 d2h_mb_data); + +#endif /* INFF_PCIE_H */ -- 2.25.1 Driver implementation of the MSGBUF protocol used for the Control and Data communication with Infineon's WLAN Device over the PCIe BUS. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/msgbuf.c | 2205 +++++++++++++++++ .../net/wireless/infineon/inffmac/msgbuf.h | 109 + 2 files changed, 2314 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/msgbuf.c create mode 100644 drivers/net/wireless/infineon/inffmac/msgbuf.h diff --git a/drivers/net/wireless/infineon/inffmac/msgbuf.c b/drivers/net/wireless/infineon/inffmac/msgbuf.c new file mode 100644 index 000000000000..6d5430406906 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/msgbuf.c @@ -0,0 +1,2205 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +/******************************************************************************* + * Communicates with the dongle by using dcmd codes. + * For certain dcmd codes, the dongle interprets string data from the host. + ******************************************************************************/ + +#include +#include +#include +#include +#include + +#include "utils.h" +#include "core.h" +#include "debug.h" +#include "proto.h" +#include "msgbuf.h" +#include "commonring.h" +#include "flowring.h" +#include "bus.h" +#include "tracepoint.h" +#include "pcie.h" +#include "common.h" +#include "xdp.h" + +#define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) + +#define MSGBUF_TYPE_GEN_STATUS 0x1 +#define MSGBUF_TYPE_RING_STATUS 0x2 +#define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 +#define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 +#define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 +#define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 +#define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 +#define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 +#define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 +#define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA +#define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB +#define MSGBUF_TYPE_IOCTL_CMPLT 0xC +#define MSGBUF_TYPE_EVENT_BUF_POST 0xD +#define MSGBUF_TYPE_WL_EVENT 0xE +#define MSGBUF_TYPE_TX_POST 0xF +#define MSGBUF_TYPE_TX_STATUS 0x10 +#define MSGBUF_TYPE_RXBUF_POST 0x11 +#define MSGBUF_TYPE_RX_CMPLT 0x12 +#define MSGBUF_TYPE_LPBK_DMAXFER 0x13 +#define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 +#define MSGBUF_TYPE_H2D_MAILBOX_DATA 0x23 +#define MSGBUF_TYPE_D2H_MAILBOX_DATA 0x24 + +#define NR_TX_PKTIDS 2048 +#define NR_RX_PKTIDS 2048 + +#define INFF_IOCTL_REQ_PKTID 0xFFFE + +#define INFF_MSGBUF_MAX_PKT_SIZE 2048 +#define INFF_MSGBUF_MAX_CTL_PKT_SIZE 8192 +#define INFF_MSGBUF_RXBUFPOST_THRESHOLD 32 +#define INFF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 +#define INFF_MSGBUF_MAX_EVENTBUF_POST 8 + +#define INFF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 +#define INFF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 +#define INFF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 +#define INFF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 + +#define INFF_MSGBUF_TX_FLUSH_CNT1 32 +#define INFF_MSGBUF_TX_FLUSH_CNT2 96 + +#define INFF_MSGBUF_DELAY_TXWORKER_THRS 96 +#define INFF_MSGBUF_TRICKLE_TXWORKER_THRS 32 +#define INFF_MSGBUF_UPDATE_RX_PTR_THRS 48 + +#define INFF_MAX_TXSTATUS_WAIT_RETRIES 10 + +struct msgbuf_common_hdr { + u8 msgtype; + u8 ifidx; + u8 flags; + u8 rsvd0; + __le32 request_id; +}; + +struct msgbuf_ioctl_req_hdr { + struct msgbuf_common_hdr msg; + __le32 cmd; + __le16 trans_id; + __le16 input_buf_len; + __le16 output_buf_len; + __le16 rsvd0[3]; + struct msgbuf_buf_addr req_buf_addr; + __le32 rsvd1[2]; +}; + +struct msgbuf_tx_msghdr { + struct msgbuf_common_hdr msg; + u8 txhdr[ETH_HLEN]; + u8 flags; + u8 seg_cnt; + struct msgbuf_buf_addr metadata_buf_addr; + struct msgbuf_buf_addr data_buf_addr; + __le16 metadata_buf_len; + __le16 data_len; + __le32 rsvd0; +}; + +struct msgbuf_h2d_mbdata { + struct msgbuf_common_hdr msg; + __le32 mbdata; + __le16 rsvd0[7]; +}; + +struct msgbuf_rx_bufpost { + struct msgbuf_common_hdr msg; + __le16 metadata_buf_len; + __le16 data_buf_len; + __le32 rsvd0; + struct msgbuf_buf_addr metadata_buf_addr; + struct msgbuf_buf_addr data_buf_addr; +}; + +struct msgbuf_rx_ioctl_resp_or_event { + struct msgbuf_common_hdr msg; + __le16 host_buf_len; + __le16 rsvd0[3]; + struct msgbuf_buf_addr host_buf_addr; + __le32 rsvd1[4]; +}; + +struct msgbuf_completion_hdr { + __le16 status; + __le16 flow_ring_id; +}; + +/* Data struct for the MSGBUF_TYPE_GEN_STATUS */ +struct msgbuf_gen_status { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 write_idx; + __le32 rsvd0[3]; +}; + +/* Data struct for the MSGBUF_TYPE_RING_STATUS */ +struct msgbuf_ring_status { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 write_idx; + __le16 rsvd0[5]; +}; + +struct msgbuf_rx_event { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 event_data_len; + __le16 seqnum; + __le16 rsvd0[4]; +}; + +struct msgbuf_ioctl_resp_hdr { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 resp_len; + __le16 trans_id; + __le32 cmd; + __le32 rsvd0; +}; + +struct msgbuf_tx_status { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 metadata_len; + __le16 tx_status; +}; + +struct msgbuf_rx_complete { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le16 metadata_len; + __le16 data_len; + __le16 data_offset; + __le16 flags; + __le32 rx_status_0; + __le32 rx_status_1; + __le32 rsvd0; +}; + +struct msgbuf_tx_flowring_create_req { + struct msgbuf_common_hdr msg; + u8 da[ETH_ALEN]; + u8 sa[ETH_ALEN]; + u8 tid; + u8 if_flags; + __le16 flow_ring_id; + u8 tc; + u8 priority; + __le16 int_vector; + __le16 max_items; + __le16 len_item; + struct msgbuf_buf_addr flow_ring_addr; +}; + +struct msgbuf_tx_flowring_delete_req { + struct msgbuf_common_hdr msg; + __le16 flow_ring_id; + __le16 reason; + __le32 rsvd0[7]; +}; + +struct msgbuf_flowring_create_resp { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le32 rsvd0[3]; +}; + +struct msgbuf_flowring_delete_resp { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le32 rsvd0[3]; +}; + +struct msgbuf_flowring_flush_resp { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le32 rsvd0[3]; +}; + +struct msgbuf_d2h_mailbox_data { + struct msgbuf_common_hdr msg; + struct msgbuf_completion_hdr compl_hdr; + __le32 mbdata; + __le32 rsvd0[2]; +} d2h_mailbox_data_t; + +struct inff_msgbuf_work_item { + struct list_head queue; + u32 flowid; + int ifidx; + u8 sa[ETH_ALEN]; + u8 da[ETH_ALEN]; +}; + +struct inff_msgbuf_tx_status_entry { + struct list_head queue; + struct msgbuf_tx_status status; +}; + +struct inff_msgbuf { + struct inff_pub *drvr; + + struct inff_commonring **commonrings; + struct inff_commonring **flowrings; + dma_addr_t *flowring_dma_handle; + + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; + + u16 rx_dataoffset; + u32 max_rxbufpost; + u16 rx_metadata_offset; + u32 rxbufpost; + + u32 max_ioctlrespbuf; + u32 cur_ioctlrespbuf; + u32 max_eventbuf; + u32 cur_eventbuf; + + void *ioctbuf; + dma_addr_t ioctbuf_handle; + u32 ioctbuf_phys_hi; + u32 ioctbuf_phys_lo; + int ioctl_resp_status; + u32 ioctl_resp_ret_len; + u32 ioctl_resp_pktid; + + u16 data_seq_no; + u16 ioctl_seq_no; + u32 reqid; + wait_queue_head_t ioctl_resp_wait; + bool ctl_completed; + + struct inff_msgbuf_pktids *tx_pktids; + struct inff_msgbuf_pktids *rx_pktids; + struct inff_flowring *flow; + + struct workqueue_struct *txflow_wq; + struct work_struct txflow_work; + unsigned long *flow_map; + unsigned long *txstatus_done_map; + + struct work_struct flowring_work; + /* lock for flow ring resource protection */ + spinlock_t flowring_work_lock; + struct list_head work_queue; + struct workqueue_struct *rx_wq; + struct work_struct rx_work; + struct sk_buff_head rx_data_q; + struct work_struct tx_compl_work; + struct list_head tx_compl_list; + /* protect the data of tx complete */ + spinlock_t tx_compl_work_lock; +}; + +struct inff_msgbuf_pktid { + atomic_t allocated; + u16 data_offset; + struct sk_buff *skb; + struct page *page; + dma_addr_t physaddr; +}; + +struct inff_msgbuf_pktids { + u32 array_size; + u32 last_allocated_idx; + enum dma_data_direction direction; + struct inff_msgbuf_pktid *array; + + /* page pool */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; +}; + +static void inff_msgbuf_rxbuf_ioctlresp_post(struct inff_msgbuf *msgbuf); +static void inff_msgbuf_process_d2h_mbdata(struct inff_msgbuf *msgbuf, + void *buf); +static void inff_msgbuf_process_rx(struct inff_msgbuf *msgbuf, + struct inff_commonring *commonring); +static void inff_msgbuf_process_msgtype(struct inff_msgbuf *msgbuf, + void *buf, unsigned int *work_done); + +static struct inff_msgbuf_pktids * +inff_msgbuf_init_tx_pktids(struct inff_msgbuf *msgbuf, u32 nr_array_entries, + enum dma_data_direction direction) +{ + struct inff_msgbuf_pktid *array; + struct inff_msgbuf_pktids *pktids; + + array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); + if (!array) + return NULL; + + pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); + if (!pktids) { + kfree(array); + return NULL; + } + pktids->array = array; + pktids->array_size = nr_array_entries; + + return pktids; +} + +static struct inff_msgbuf_pktids * +inff_msgbuf_init_rx_pktids(struct inff_msgbuf *msgbuf, u32 nr_array_entries, + enum dma_data_direction direction) +{ + struct inff_msgbuf_pktid *array; + struct inff_msgbuf_pktids *pktids = NULL; + struct device *dev = msgbuf->drvr->bus_if->dev; + + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = nr_array_entries, + .offset = INFF_SKB_HEADROOM, + .nid = NUMA_NO_NODE, + .dev = dev->parent, + .dma_dir = direction, + .max_len = INFF_MAX_RX_BUF_SIZE, + }; + + array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); + if (!array) + return NULL; + + pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); + if (!pktids) { + kfree(array); + return NULL; + } + pktids->array = array; + pktids->array_size = nr_array_entries; + + pktids->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pktids->page_pool)) { + pktids->page_pool = NULL; + goto fail; + } + + return pktids; +fail: + kfree(pktids->array); + kfree(pktids); + + return NULL; +} + +static int +inff_msgbuf_alloc_pktid(struct device *dev, + struct inff_msgbuf_pktids *pktids, + struct sk_buff *skb, struct page *page, + u16 data_offset, dma_addr_t *physaddr, u32 *idx) +{ + struct inff_msgbuf_pktid *array; + u32 count; + + array = pktids->array; + + if (page) { + *physaddr = page_pool_get_dma_addr(page) + INFF_SKB_HEADROOM; + } else if (skb) { + *physaddr = dma_map_single(dev, skb->data + data_offset, + skb->len - data_offset, + pktids->direction); + if (dma_mapping_error(dev, *physaddr)) { + inff_err("dma_map_single failed !!\n"); + return -ENOMEM; + } + } + + *idx = pktids->last_allocated_idx; + + count = 0; + do { + (*idx)++; + if (*idx == pktids->array_size) + *idx = 0; + if (array[*idx].allocated.counter == 0) + if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) + break; + count++; + } while (count < pktids->array_size); + + if (count == pktids->array_size) { + if (page) + page_pool_put_full_page(pktids->page_pool, page, true); + else if (skb) + dma_unmap_single(dev, *physaddr, skb->len - data_offset, + pktids->direction); + return -ENOMEM; + } + + array[*idx].data_offset = data_offset; + array[*idx].physaddr = *physaddr; + + if (page) { + array[*idx].page = page; + array[*idx].skb = NULL; + } else if (skb) { + array[*idx].skb = skb; + array[*idx].page = NULL; + } + + pktids->last_allocated_idx = *idx; + + return 0; +} + +static void* +inff_msgbuf_get_pktid(struct device *dev, struct inff_msgbuf_pktids *pktids, + u32 idx) +{ + struct inff_msgbuf_pktid *pktid; + void *ret; + + if (idx >= pktids->array_size) { + inff_err("Invalid packet id %d (max %d)\n", idx, + pktids->array_size); + return NULL; + } + if (pktids->array[idx].allocated.counter) { + pktid = &pktids->array[idx]; + + if (pktid->page) { + enum dma_data_direction dma_dir = + page_pool_get_dma_dir(pktids->page_pool); + dma_sync_single_for_cpu(dev->parent, pktid->physaddr, + INFF_MAX_RX_BUF_SIZE, dma_dir); + ret = (void *)pktid->page; + } else if (pktid->skb) { + dma_unmap_single(dev, pktid->physaddr, + pktid->skb->len - pktid->data_offset, + pktids->direction); + ret = (void *)pktid->skb; + } + + pktid->allocated.counter = 0; + pktid->physaddr = 0; + } else { + inff_err("Invalid packet id %d (not in use)\n", idx); + } + + return ret; +} + +static void +inff_msgbuf_release_array(struct device *dev, + struct inff_msgbuf_pktids *pktids) +{ + struct inff_msgbuf_pktid *array; + struct inff_msgbuf_pktid *pktid; + u32 count; + + array = pktids->array; + count = 0; + do { + if (array[count].allocated.counter) { + pktid = &array[count]; + + if (pktid->page) { + page_pool_put_full_page(pktids->page_pool, + pktid->page, false); + } else if (pktid->skb) { + dma_unmap_single(dev, pktid->physaddr, + pktid->skb->len - + pktid->data_offset, + pktids->direction); + inff_pkt_buf_free_skb(pktid->skb); + } + } + count++; + } while (count < pktids->array_size); + + if (pktids->page_pool) + page_pool_destroy(pktids->page_pool); + + kfree(array); + kfree(pktids); +} + +static void inff_msgbuf_release_pktids(struct inff_msgbuf *msgbuf) +{ + if (msgbuf->rx_pktids) + inff_msgbuf_release_array(msgbuf->drvr->bus_if->dev, + msgbuf->rx_pktids); + if (msgbuf->tx_pktids) + inff_msgbuf_release_array(msgbuf->drvr->bus_if->dev, + msgbuf->tx_pktids); +} + +int inff_msgbuf_tx_mbdata(struct inff_pub *drvr, u32 mbdata) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_commonring *commonring; + struct msgbuf_h2d_mbdata *h2d_mbdata; + void *ret_ptr; + int err; + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + inff_commonring_lock(commonring); + ret_ptr = inff_commonring_reserve_for_write(commonring); + if (!ret_ptr) { + inff_err("Failed to reserve space in commonring\n"); + inff_commonring_unlock(commonring); + return -ENOMEM; + } + h2d_mbdata = (struct msgbuf_h2d_mbdata *)ret_ptr; + memset(h2d_mbdata, 0, sizeof(*h2d_mbdata)); + + h2d_mbdata->msg.msgtype = MSGBUF_TYPE_H2D_MAILBOX_DATA; + h2d_mbdata->mbdata = cpu_to_le32(mbdata); + + err = inff_commonring_write_complete(commonring); + inff_commonring_unlock(commonring); + + return err; +} + +static int inff_msgbuf_tx_ioctl(struct inff_pub *drvr, int ifidx, + uint cmd, void *buf, uint len) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_commonring *commonring; + struct msgbuf_ioctl_req_hdr *request; + u16 buf_len; + void *ret_ptr; + int err; + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + inff_commonring_lock(commonring); + ret_ptr = inff_commonring_reserve_for_write(commonring); + if (!ret_ptr) { + iphy_err(drvr, "Failed to reserve space in commonring\n"); + inff_commonring_unlock(commonring); + return -ENOMEM; + } + + msgbuf->reqid++; + + request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; + request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; + request->msg.ifidx = (u8)ifidx; + request->msg.flags = 0; + request->msg.request_id = cpu_to_le32(INFF_IOCTL_REQ_PKTID); + request->cmd = cpu_to_le32(cmd); + request->output_buf_len = cpu_to_le16(len); + request->trans_id = cpu_to_le16(msgbuf->reqid); + + buf_len = min_t(u16, len, INFF_TX_IOCTL_MAX_MSG_SIZE); + request->input_buf_len = cpu_to_le16(buf_len); + request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); + request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); + if (buf) + memcpy(msgbuf->ioctbuf, buf, buf_len); + else + memset(msgbuf->ioctbuf, 0, buf_len); + + err = inff_commonring_write_complete(commonring); + inff_commonring_unlock(commonring); + + return err; +} + +static int inff_msgbuf_ioctl_resp_wait(struct inff_msgbuf *msgbuf) +{ + return wait_event_timeout(msgbuf->ioctl_resp_wait, + msgbuf->ctl_completed, + MSGBUF_IOCTL_RESP_TIMEOUT); +} + +static void inff_msgbuf_ioctl_resp_wake(struct inff_msgbuf *msgbuf) +{ + msgbuf->ctl_completed = true; + wake_up(&msgbuf->ioctl_resp_wait); +} + +static int inff_msgbuf_query_dcmd(struct inff_pub *drvr, int ifidx, + uint cmd, void *buf, uint len, int *fwerr) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct sk_buff *skb = NULL; + int timeout; + int err; + + inff_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); + *fwerr = 0; + msgbuf->ctl_completed = false; + err = inff_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); + if (err) + return err; + + timeout = inff_msgbuf_ioctl_resp_wait(msgbuf); + if (!timeout) { + iphy_err(drvr, "Timeout on response for query command\n"); + return -EIO; + } + + skb = (struct sk_buff *)inff_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, + msgbuf->rx_pktids, + msgbuf->ioctl_resp_pktid); + if (msgbuf->ioctl_resp_ret_len != 0) { + if (!skb) + return -EBADF; + + memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? + len : msgbuf->ioctl_resp_ret_len); + } + inff_pkt_buf_free_skb(skb); + + *fwerr = msgbuf->ioctl_resp_status; + return 0; +} + +static int inff_msgbuf_set_dcmd(struct inff_pub *drvr, int ifidx, + uint cmd, void *buf, uint len, int *fwerr) +{ + return inff_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); +} + +static int inff_msgbuf_hdrpull(struct inff_pub *drvr, bool do_fws, + struct sk_buff *skb, struct inff_if **ifp) +{ + return -ENODEV; +} + +static void inff_msgbuf_rxreorder(struct inff_if *ifp, struct sk_buff *skb, + bool inirq) +{ +} + +static void inff_msgbuf_poll_ring(struct inff_msgbuf *msgbuf, struct inff_commonring *ring, + unsigned int *work_done, int budget) +{ + void *buf; + u16 count; + u16 processed = 0; + + buf = inff_commonring_get_read_ptr(ring, &count); + while (buf && count) { + if (*work_done >= budget) + break; + inff_msgbuf_process_msgtype(msgbuf, + buf + msgbuf->rx_dataoffset, + work_done); + buf += inff_commonring_len_item(ring); + processed++; + if (processed == INFF_MSGBUF_UPDATE_RX_PTR_THRS) { + inff_commonring_read_complete(ring, processed); + processed = 0; + } + count--; + } + if (processed) + inff_commonring_read_complete(ring, processed); +} + +static int inff_msgbuf_napi_poll(struct napi_struct *napi, int budget) +{ + struct inff_if *ifp; + struct inff_msgbuf *msgbuf; + unsigned int work_done = 0; + + inff_dbg(TRACE, "NAPI poll started, work_done %d, budget %d\n", + work_done, budget); + + ifp = container_of(napi, struct inff_if, napi); + if (!ifp) + return budget; + + msgbuf = ifp->drvr->proto->pd; + inff_msgbuf_poll_ring(msgbuf, + msgbuf->commonrings[INFF_D2H_MSGRING_RX_COMPLETE], + &work_done, budget); + if (work_done < budget) + inff_msgbuf_poll_ring(msgbuf, + msgbuf->commonrings[INFF_D2H_MSGRING_TX_COMPLETE], + &work_done, budget); + + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + if (work_done < budget) { + napi_complete(napi); + inff_dbg(TRACE, "NAPI poll completed, work_done %d, budget %d\n", + work_done, budget); + } + + return work_done; +} + +static void inff_msgbuf_napi_add(struct inff_if *ifp) +{ + struct net_device *ndev; + + inff_dbg(TRACE, "Enter\n"); + ndev = ifp->ndev; + if (!ndev || !ifp->drvr->settings->napi_enable) + return; + + netif_napi_add(ndev, &ifp->napi, inff_msgbuf_napi_poll); + napi_enable(&ifp->napi); + ifp->napi_gro = true; + inff_dbg(TRACE, "Exit\n"); +} + +static void inff_msgbuf_napi_del(struct inff_if *ifp) +{ + struct net_device *ndev; + + inff_dbg(TRACE, "Enter\n"); + ndev = ifp->ndev; + if (!ndev || !ifp->drvr->settings->napi_enable) + return; + + napi_synchronize(&ifp->napi); + napi_disable(&ifp->napi); + netif_napi_del(&ifp->napi); + inff_dbg(TRACE, "Exit\n"); +} + +static void inff_msgbuf_napi_schedule(void *info) +{ + struct inff_if *ifp = (struct inff_if *)info; + + napi_schedule(&ifp->napi); +} + +static int inff_msgbuf_xdp_init(struct inff_pub *drvr, struct inff_if *ifp) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_msgbuf_pktids *pktids = msgbuf->rx_pktids; + + return inff_xdp_init(ifp->ndev, &pktids->xdp_rxq, pktids->page_pool); +} + +static void inff_msgbuf_xdp_deinit(struct inff_pub *drvr) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_msgbuf_pktids *pktids = msgbuf->rx_pktids; + + inff_xdp_deinit(&pktids->xdp_rxq); +} + +static void +inff_msgbuf_remove_flowring(struct inff_msgbuf *msgbuf, u16 flowid) +{ + u32 dma_sz; + void *dma_buf; + struct inff_commonring *commonring = msgbuf->flowrings[flowid]; + + inff_dbg(MSGBUF, "Removing flowring %d\n", flowid); + /* The flowring addr and dpeth be copied by the inff_commonring_config(), + * while flowring create. To keep the same logic as the commonring buf, so + * the commonring ptr is used in here. + */ + dma_buf = commonring->buf_addr; + if (dma_buf) { + dma_sz = commonring->depth * commonring->item_len; + dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, + msgbuf->flowring_dma_handle[flowid]); + } + inff_flowring_delete(msgbuf->flow, flowid); +} + +static struct inff_msgbuf_work_item * +inff_msgbuf_dequeue_work(struct inff_msgbuf *msgbuf) +{ + struct inff_msgbuf_work_item *work = NULL; + ulong flags; + + spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); + if (!list_empty(&msgbuf->work_queue)) { + work = list_first_entry(&msgbuf->work_queue, + struct inff_msgbuf_work_item, queue); + list_del(&work->queue); + } + spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); + + return work; +} + +static u32 +inff_msgbuf_flowring_create_worker(struct inff_msgbuf *msgbuf, + struct inff_msgbuf_work_item *work) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_tx_flowring_create_req *create; + struct inff_commonring *commonring; + void *ret_ptr; + u32 flowid; + void *dma_buf; + u32 dma_sz, ring_max_item; + u64 address; + int err; + u32 flowring_depth = msgbuf->drvr->settings->flowring_depth; + + if (flowring_depth >= INFF_RING_MAX_ITEM_LOWER_LIMIT && + flowring_depth <= INFF_RING_MAX_ITEM_UPPER_LIMIT) + ring_max_item = flowring_depth; + else + ring_max_item = INFF_H2D_TXFLOWRING_MAX_ITEM; + flowid = work->flowid; + dma_sz = ring_max_item * INFF_H2D_TXFLOWRING_ITEMSIZE; + dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, + &msgbuf->flowring_dma_handle[flowid], + GFP_KERNEL); + if (!dma_buf) { + iphy_err(drvr, "dma_alloc_coherent failed\n"); + inff_flowring_delete(msgbuf->flow, flowid); + return INFF_FLOWRING_INVALID_ID; + } + + inff_commonring_config(msgbuf->flowrings[flowid], + ring_max_item, + INFF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + inff_commonring_lock(commonring); + ret_ptr = inff_commonring_reserve_for_write(commonring); + if (!ret_ptr) { + iphy_err(drvr, "Failed to reserve space in commonring\n"); + inff_commonring_unlock(commonring); + inff_msgbuf_remove_flowring(msgbuf, flowid); + return INFF_FLOWRING_INVALID_ID; + } + + create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; + create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; + create->msg.ifidx = work->ifidx; + create->msg.request_id = 0; + create->tid = inff_flowring_tid(msgbuf->flow, flowid); + create->flow_ring_id = cpu_to_le16(flowid + + INFF_H2D_MSGRING_FLOWRING_IDSTART); + memcpy(create->sa, work->sa, ETH_ALEN); + memcpy(create->da, work->da, ETH_ALEN); + address = (u64)msgbuf->flowring_dma_handle[flowid]; + create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); + create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); + create->max_items = cpu_to_le16(ring_max_item); + create->len_item = cpu_to_le16(INFF_H2D_TXFLOWRING_ITEMSIZE); + + inff_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", + flowid, work->da, create->tid, work->ifidx); + + err = inff_commonring_write_complete(commonring); + inff_commonring_unlock(commonring); + if (err) { + iphy_err(drvr, "Failed to write commonring\n"); + inff_msgbuf_remove_flowring(msgbuf, flowid); + return INFF_FLOWRING_INVALID_ID; + } + + return flowid; +} + +static void inff_msgbuf_flowring_worker(struct work_struct *work) +{ + struct inff_msgbuf *msgbuf; + struct inff_msgbuf_work_item *create; + + msgbuf = container_of(work, struct inff_msgbuf, flowring_work); + + while ((create = inff_msgbuf_dequeue_work(msgbuf))) { + inff_msgbuf_flowring_create_worker(msgbuf, create); + kfree(create); + } +} + +static u32 inff_msgbuf_flowring_create(struct inff_msgbuf *msgbuf, int ifidx, + struct sk_buff *skb) +{ + struct inff_msgbuf_work_item *create; + struct ethhdr *eh = (struct ethhdr *)(skb->data); + u32 flowid; + ulong flags; + + create = kzalloc(sizeof(*create), GFP_ATOMIC); + if (!create) + return INFF_FLOWRING_INVALID_ID; + + flowid = inff_flowring_create(msgbuf->flow, eh->h_dest, + skb->priority, ifidx); + if (flowid == INFF_FLOWRING_INVALID_ID) { + kfree(create); + return flowid; + } + + create->flowid = flowid; + create->ifidx = ifidx; + memcpy(create->sa, eh->h_source, ETH_ALEN); + memcpy(create->da, eh->h_dest, ETH_ALEN); + + spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); + list_add_tail(&create->queue, &msgbuf->work_queue); + spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); + + schedule_work_on(inff_work_sched_cpu(msgbuf->drvr->settings->tx_cpu), + &msgbuf->flowring_work); + + return flowid; +} + +static void inff_msgbuf_txflow(struct inff_msgbuf *msgbuf, u16 flowid) +{ + struct inff_flowring *flow = msgbuf->flow; + struct inff_pub *drvr = msgbuf->drvr; + struct device *dev = drvr->bus_if->dev; + struct inff_commonring *commonring; + void *ret_ptr; + u32 count; + struct sk_buff *skb; + dma_addr_t physaddr; + struct inff_msgbuf_pktids *pktids = msgbuf->tx_pktids; + u32 pktid; + struct msgbuf_tx_msghdr *tx_msghdr; + u64 address; + + commonring = msgbuf->flowrings[flowid]; + if (!inff_commonring_write_available(commonring)) { + if (commonring->was_full) + iphy_err(drvr, "%d, TXPOST: commonring full !!\n", __LINE__); + return; + } + + inff_commonring_lock(commonring); + + count = INFF_MSGBUF_TX_FLUSH_CNT2 - INFF_MSGBUF_TX_FLUSH_CNT1; + while (inff_flowring_qlen(flow, flowid)) { + skb = inff_flowring_dequeue(flow, flowid); + if (!skb) { + iphy_err(drvr, "No SKB, but qlen %d\n", + inff_flowring_qlen(flow, flowid)); + break; + } + skb_tx_timestamp(skb); + skb_orphan(skb); + if (inff_msgbuf_alloc_pktid(dev, pktids, skb, NULL, + ETH_HLEN, &physaddr, &pktid)) { + inff_flowring_reinsert(flow, flowid, skb); + iphy_err(drvr, "TXPOST: No PKTID available (last alloc %d) !!\n", + pktids->last_allocated_idx); + break; + } + ret_ptr = inff_commonring_reserve_for_write(commonring); + if (!ret_ptr) { + if (commonring->was_full) + iphy_err(drvr, "%d, TXPOST: commonring full !!\n", __LINE__); + + skb = (struct sk_buff *)inff_msgbuf_get_pktid(dev, + pktids, + pktid); + if (skb) + inff_flowring_reinsert(flow, flowid, skb); + break; + } + count++; + + tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; + + tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; + tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1); + tx_msghdr->msg.ifidx = inff_flowring_ifidx_get(flow, flowid); + tx_msghdr->flags = INFF_MSGBUF_PKT_FLAGS_FRAME_802_3; + tx_msghdr->flags |= (skb->priority & 0x07) << + INFF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; + tx_msghdr->seg_cnt = 1; + memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); + tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); + address = (u64)physaddr; + tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); + tx_msghdr->data_buf_addr.low_addr = + cpu_to_le32(address & 0xffffffff); + tx_msghdr->metadata_buf_len = 0; + tx_msghdr->metadata_buf_addr.high_addr = 0; + tx_msghdr->metadata_buf_addr.low_addr = 0; + atomic_inc(&commonring->outstanding_tx); + if (count >= INFF_MSGBUF_TX_FLUSH_CNT2) { + inff_commonring_write_complete(commonring); + count = 0; + } + } + if (count) + inff_commonring_write_complete(commonring); + inff_commonring_unlock(commonring); +} + +static void inff_msgbuf_rx(struct inff_msgbuf *msgbuf) +{ + struct sk_buff *skb; + struct inff_if *ifp; + + while ((skb = skb_dequeue(&msgbuf->rx_data_q))) { + ifp = netdev_priv(skb->dev); + if (ifp) { + inff_netif_rx(ifp, skb, false); + } else { + iphy_err(msgbuf->drvr, "NULL ifp, unexpected pkt\n"); + inff_pkt_buf_free_skb(skb); + break; + } + } +} + +static void inff_msgbuf_txflow_worker(struct work_struct *worker) +{ + struct inff_msgbuf *msgbuf; + u32 flowid; + + msgbuf = container_of(worker, struct inff_msgbuf, txflow_work); + for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { + clear_bit(flowid, msgbuf->flow_map); + inff_msgbuf_txflow(msgbuf, flowid); + } +} + +static void inff_msgbuf_rx_worker(struct work_struct *worker) +{ + struct inff_msgbuf *msgbuf; + + msgbuf = container_of(worker, struct inff_msgbuf, rx_work); + inff_msgbuf_rx(msgbuf); +} + +static int inff_msgbuf_schedule_txdata(struct inff_msgbuf *msgbuf, u32 flowid, + bool force) +{ + struct inff_commonring *commonring; + int qlen; + + set_bit(flowid, msgbuf->flow_map); + commonring = msgbuf->flowrings[flowid]; + + qlen = inff_flowring_qlen(msgbuf->flow, flowid); + inff_dbg(MSGBUF, "force: %u, txflowring[%u] qlen: %u, outstanding_tx: %u", + force, flowid, qlen, atomic_read(&commonring->outstanding_tx)); + + if ((force) || (atomic_read(&commonring->outstanding_tx) < + INFF_MSGBUF_DELAY_TXWORKER_THRS)) + queue_work_on(inff_work_sched_cpu(msgbuf->drvr->settings->tx_cpu), + msgbuf->txflow_wq, &msgbuf->txflow_work); + + return 0; +} + +static int inff_msgbuf_schedule_rxdata(struct inff_msgbuf *msgbuf, bool force) +{ + if (force) + queue_work(msgbuf->rx_wq, &msgbuf->rx_work); + + return 0; +} + +static int inff_msgbuf_tx_queue_data(struct inff_pub *drvr, int ifidx, + struct sk_buff *skb) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_flowring *flow = msgbuf->flow; + struct ethhdr *eh = (struct ethhdr *)(skb->data); + u32 flowid; + u32 queue_count; + bool force; + + flowid = inff_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); + if (flowid == INFF_FLOWRING_INVALID_ID) { + flowid = inff_msgbuf_flowring_create(msgbuf, ifidx, skb); + if (flowid == INFF_FLOWRING_INVALID_ID) + return -ENOMEM; + + inff_flowring_enqueue(flow, flowid, skb); + return 0; + } + queue_count = inff_flowring_enqueue(flow, flowid, skb); + force = ((queue_count % INFF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); + inff_msgbuf_schedule_txdata(msgbuf, flowid, force); + + return 0; +} + +static void +inff_msgbuf_configure_addr_mode(struct inff_pub *drvr, int ifidx, + enum proto_addr_mode addr_mode) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + + inff_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); +} + +static void +inff_msgbuf_delete_peer(struct inff_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + + inff_flowring_delete_peer(msgbuf->flow, ifidx, peer); +} + +static void +inff_msgbuf_add_tdls_peer(struct inff_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + + inff_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); +} + +static void +inff_msgbuf_process_ioctl_complete(struct inff_msgbuf *msgbuf, void *buf) +{ + struct msgbuf_ioctl_resp_hdr *ioctl_resp; + + ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; + + msgbuf->ioctl_resp_status = + (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); + msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); + msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); + + inff_msgbuf_ioctl_resp_wake(msgbuf); + + if (msgbuf->cur_ioctlrespbuf) + msgbuf->cur_ioctlrespbuf--; + inff_msgbuf_rxbuf_ioctlresp_post(msgbuf); +} + +static void +inff_msgbuf_process_txstatus(struct inff_msgbuf *msgbuf, void *buf, unsigned int *work_done) +{ + struct inff_commonring *commonring; + struct msgbuf_tx_status *tx_status; + u32 idx; + struct sk_buff *skb; + u16 flowid; + + tx_status = (struct msgbuf_tx_status *)buf; + idx = le32_to_cpu(tx_status->msg.request_id) - 1; + flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); + flowid -= INFF_H2D_MSGRING_FLOWRING_IDSTART; + skb = (struct sk_buff *)inff_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, + msgbuf->tx_pktids, idx); + if (!skb) + return; + + set_bit(flowid, msgbuf->txstatus_done_map); + commonring = msgbuf->flowrings[flowid]; + atomic_dec(&commonring->outstanding_tx); + + inff_txfinalize(inff_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), + skb, true); + if (work_done) + (*work_done)++; +} + +static struct inff_msgbuf_tx_status_entry * +inff_msgbuf_dequeue_tx_status(struct inff_msgbuf *msgbuf) +{ + struct inff_msgbuf_tx_status_entry *status = NULL; + ulong flags; + + spin_lock_irqsave(&msgbuf->tx_compl_work_lock, flags); + if (!list_empty(&msgbuf->tx_compl_list)) { + status = list_first_entry(&msgbuf->tx_compl_list, + struct inff_msgbuf_tx_status_entry, queue); + list_del(&status->queue); + } + spin_unlock_irqrestore(&msgbuf->tx_compl_work_lock, flags); + + return status; +} + +static void inff_msgbuf_tx_compl_worker(struct work_struct *work) +{ + struct inff_msgbuf *msgbuf; + struct inff_msgbuf_tx_status_entry *entry = NULL; + + msgbuf = container_of(work, struct inff_msgbuf, tx_compl_work); + while ((entry = inff_msgbuf_dequeue_tx_status(msgbuf))) { + inff_msgbuf_process_txstatus(msgbuf, &entry->status, NULL); + kfree(entry); + } +} + +static u32 inff_msgbuf_rxbuf_data_post(struct inff_msgbuf *msgbuf, u32 count) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct inff_commonring *commonring; + void *ret_ptr; + struct page *page; + u16 alloced; + u32 pktlen; + dma_addr_t physaddr; + struct msgbuf_rx_bufpost *rx_bufpost; + u64 address; + u32 pktid; + u32 i; + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_RXPOST_SUBMIT]; + ret_ptr = inff_commonring_reserve_for_write_multiple(commonring, + count, + &alloced); + if (!ret_ptr) { + inff_dbg(MSGBUF, "Failed to reserve space in commonring\n"); + return 0; + } + + for (i = 0; i < alloced; i++) { + rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; + memset(rx_bufpost, 0, sizeof(*rx_bufpost)); + + page = page_pool_dev_alloc_pages(msgbuf->rx_pktids->page_pool); + if (!page) { + iphy_err(drvr, "Failed to alloc PAGE\n"); + inff_commonring_write_cancel(commonring, alloced - i); + break; + } + + pktlen = INFF_MAX_RX_BUF_SIZE; + if (inff_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, + msgbuf->rx_pktids, NULL, page, 0, + &physaddr, &pktid)) { + page_pool_put_full_page(msgbuf->rx_pktids->page_pool, + page, true); + iphy_err(drvr, "RXPOST: No PKTID available (last alloc %d) !!\n", + msgbuf->rx_pktids->last_allocated_idx); + inff_commonring_write_cancel(commonring, alloced - i); + break; + } + + if (msgbuf->rx_metadata_offset) { + address = (u64)physaddr; + rx_bufpost->metadata_buf_len = + cpu_to_le16(msgbuf->rx_metadata_offset); + rx_bufpost->metadata_buf_addr.high_addr = + cpu_to_le32(address >> 32); + rx_bufpost->metadata_buf_addr.low_addr = + cpu_to_le32(address & 0xffffffff); + + pktlen -= msgbuf->rx_metadata_offset; + physaddr += msgbuf->rx_metadata_offset; + } + rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; + rx_bufpost->msg.request_id = cpu_to_le32(pktid); + + address = (u64)physaddr; + rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); + rx_bufpost->data_buf_addr.high_addr = + cpu_to_le32(address >> 32); + rx_bufpost->data_buf_addr.low_addr = + cpu_to_le32(address & 0xffffffff); + + ret_ptr += inff_commonring_len_item(commonring); + } + + if (i) + inff_commonring_write_complete(commonring); + + return i; +} + +static void +inff_msgbuf_rxbuf_data_fill(struct inff_msgbuf *msgbuf) +{ + u32 fillbufs; + u32 retcount; + + fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; + + while (fillbufs) { + retcount = inff_msgbuf_rxbuf_data_post(msgbuf, fillbufs); + if (!retcount) + break; + msgbuf->rxbufpost += retcount; + fillbufs -= retcount; + } +} + +static void +inff_msgbuf_update_rxbufpost_count(struct inff_msgbuf *msgbuf, u16 rxcnt) +{ + msgbuf->rxbufpost -= rxcnt; + if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - + INFF_MSGBUF_RXBUFPOST_THRESHOLD)) + inff_msgbuf_rxbuf_data_fill(msgbuf); +} + +static u32 +inff_msgbuf_rxbuf_ctrl_post(struct inff_msgbuf *msgbuf, bool event_buf, + u32 count) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct inff_commonring *commonring; + void *ret_ptr; + struct sk_buff *skb; + u16 alloced; + u32 pktlen; + dma_addr_t physaddr; + struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; + u64 address; + u32 pktid; + u32 i; + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + inff_commonring_lock(commonring); + ret_ptr = inff_commonring_reserve_for_write_multiple(commonring, + count, + &alloced); + if (!ret_ptr) { + iphy_err(drvr, "Failed to reserve space in commonring\n"); + inff_commonring_unlock(commonring); + return 0; + } + + for (i = 0; i < alloced; i++) { + rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; + memset(rx_bufpost, 0, sizeof(*rx_bufpost)); + + skb = __inff_pkt_buf_get_skb(INFF_MSGBUF_MAX_CTL_PKT_SIZE, GFP_KERNEL); + if (!skb) { + iphy_err(drvr, "Failed to alloc SKB\n"); + inff_commonring_write_cancel(commonring, alloced - i); + break; + } + + pktlen = skb->len; + if (inff_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, + msgbuf->rx_pktids, skb, NULL, 0, + &physaddr, &pktid)) { + dev_kfree_skb_any(skb); + iphy_err(drvr, "CTRLPOST: No PKTID available (last alloc %d) !!\n", + msgbuf->rx_pktids->last_allocated_idx); + inff_commonring_write_cancel(commonring, alloced - i); + break; + } + if (event_buf) + rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; + else + rx_bufpost->msg.msgtype = + MSGBUF_TYPE_IOCTLRESP_BUF_POST; + rx_bufpost->msg.request_id = cpu_to_le32(pktid); + + address = (u64)physaddr; + rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); + rx_bufpost->host_buf_addr.high_addr = + cpu_to_le32(address >> 32); + rx_bufpost->host_buf_addr.low_addr = + cpu_to_le32(address & 0xffffffff); + + ret_ptr += inff_commonring_len_item(commonring); + } + + if (i) + inff_commonring_write_complete(commonring); + + inff_commonring_unlock(commonring); + + return i; +} + +static void inff_msgbuf_rxbuf_ioctlresp_post(struct inff_msgbuf *msgbuf) +{ + u32 count; + + count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; + count = inff_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); + msgbuf->cur_ioctlrespbuf += count; +} + +static void inff_msgbuf_rxbuf_event_post(struct inff_msgbuf *msgbuf) +{ + u32 count; + + count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; + count = inff_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); + msgbuf->cur_eventbuf += count; +} + +static void inff_msgbuf_process_event(struct inff_msgbuf *msgbuf, void *buf) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_rx_event *event; + u32 idx; + u16 buflen; + struct sk_buff *skb; + struct inff_if *ifp; + + event = (struct msgbuf_rx_event *)buf; + idx = le32_to_cpu(event->msg.request_id); + buflen = le16_to_cpu(event->event_data_len); + + if (msgbuf->cur_eventbuf) + msgbuf->cur_eventbuf--; + inff_msgbuf_rxbuf_event_post(msgbuf); + + skb = (struct sk_buff *)inff_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, + msgbuf->rx_pktids, idx); + if (!skb) + return; + + if (msgbuf->rx_dataoffset) + skb_pull(skb, msgbuf->rx_dataoffset); + + skb_trim(skb, buflen); + + ifp = inff_get_ifp(msgbuf->drvr, event->msg.ifidx); + if (!ifp || !ifp->ndev) { + iphy_err(drvr, "Received pkt for invalid ifidx %d\n", + event->msg.ifidx); + goto exit; + } + + skb->protocol = eth_type_trans(skb, ifp->ndev); + + inff_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL); + +exit: + inff_pkt_buf_free_skb(skb); +} + +static void +inff_msgbuf_process_rx_complete(struct inff_msgbuf *msgbuf, void *buf, + unsigned int *work_done) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_rx_complete *rx_complete; + struct sk_buff *skb, *cpskb = NULL; + struct ethhdr *eh; + u16 data_offset; + u16 buflen; + u16 flags; + u32 idx; + struct inff_if *ifp; + struct inff_msgbuf_pktids *pktids = msgbuf->rx_pktids; + struct page *page; + unsigned char *data; + struct xdp_buff xdp_buff; + + inff_msgbuf_update_rxbufpost_count(msgbuf, 1); + + rx_complete = (struct msgbuf_rx_complete *)buf; + data_offset = le16_to_cpu(rx_complete->data_offset); + buflen = le16_to_cpu(rx_complete->data_len); + idx = le32_to_cpu(rx_complete->msg.request_id); + flags = le16_to_cpu(rx_complete->flags); + + ifp = inff_get_ifp(drvr, rx_complete->msg.ifidx); + if (!ifp || !ifp->ndev) { + iphy_err(drvr, "Received pkt for invalid ifidx %d\n", + rx_complete->msg.ifidx); + return; + } + + page = (struct page *)inff_msgbuf_get_pktid(drvr->bus_if->dev, + pktids, idx); + if (!page) + return; + + data = page_address(page); + prefetch(data); + + if (data_offset) + data += data_offset; + else if (msgbuf->rx_dataoffset) + data += msgbuf->rx_dataoffset; + + /* Prepare XDP BUFF from DMA mapped Page having the RX packet */ + inff_xdp_prepare_buff(&pktids->xdp_rxq, &xdp_buff, data, buflen); + + /* Run the attached XDP Program */ + if (inff_xdp_run_prog(ifp, &xdp_buff)) { + page_pool_put_full_page(pktids->page_pool, page, true); + return; + } + + /* Create SKB from XDP BUFF */ + skb = inff_xdp_prepare_skb(&xdp_buff); + if (!skb) { + page_pool_put_full_page(pktids->page_pool, page, true); + return; + } + + if (work_done) + (*work_done)++; + + if ((flags & INFF_MSGBUF_PKT_FLAGS_FRAME_MASK) == + INFF_MSGBUF_PKT_FLAGS_FRAME_802_11) { + ifp = msgbuf->drvr->mon_if; + + if (!ifp) { + iphy_err(drvr, "Received unexpected monitor pkt\n"); + inff_pkt_buf_free_skb(skb); + return; + } + + inff_netif_mon_rx(ifp, skb); + /* NAPI without gro will use skb queue to handle data */ + if (!drvr->settings->napi_enable) + skb_queue_tail(&msgbuf->rx_data_q, skb); + + return; + } + + if (ifp->isap && ifp->fmac_pkt_fwd_en) { + eh = (struct ethhdr *)(skb->data); + skb_set_network_header(skb, sizeof(struct ethhdr)); + skb->protocol = eh->h_proto; + skb->priority = cfg80211_classify8021d(skb, NULL); + if (is_unicast_ether_addr(eh->h_dest)) { + if (inff_find_sta(ifp, eh->h_dest)) { + /* determine the priority */ + if (skb->priority == 0 || skb->priority > 7) { + skb->priority = + cfg80211_classify8021d(skb, + NULL); + } + inff_proto_tx_queue_data(ifp->drvr, + ifp->ifidx, skb); + return; + } + } else { + cpskb = pskb_copy(skb, GFP_ATOMIC); + if (cpskb) { + inff_proto_tx_queue_data(ifp->drvr, + ifp->ifidx, + cpskb); + } else { + inff_err("Unable to do skb copy\n"); + } + } + } + skb->dev = ifp->ndev; + skb->protocol = eth_type_trans(skb, ifp->ndev); + /* NAPI disable will use skb queue to handle data */ + if (drvr->settings->napi_enable) + inff_netif_rx(ifp, skb, false); + else + skb_queue_tail(&msgbuf->rx_data_q, skb); +} + +static void inff_msgbuf_process_gen_status(struct inff_msgbuf *msgbuf, + void *buf) +{ + struct msgbuf_gen_status *gen_status = buf; + struct inff_pub *drvr = msgbuf->drvr; + int err; + + err = le16_to_cpu(gen_status->compl_hdr.status); + if (err) + iphy_err(drvr, "Firmware reported general error: %d\n", err); +} + +static void inff_msgbuf_process_ring_status(struct inff_msgbuf *msgbuf, + void *buf) +{ + struct msgbuf_ring_status *ring_status = buf; + struct inff_pub *drvr = msgbuf->drvr; + int err; + + err = le16_to_cpu(ring_status->compl_hdr.status); + if (err) { + int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); + + iphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, + err); + } +} + +static void +inff_msgbuf_process_flow_ring_create_response(struct inff_msgbuf *msgbuf, + void *buf) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_flowring_create_resp *flowring_create_resp; + u16 status; + u16 flowid; + + flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; + + flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); + flowid -= INFF_H2D_MSGRING_FLOWRING_IDSTART; + status = le16_to_cpu(flowring_create_resp->compl_hdr.status); + + if (status) { + iphy_err(drvr, "Flowring creation failed, code %d\n", status); + inff_msgbuf_remove_flowring(msgbuf, flowid); + return; + } + inff_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, + status); + + inff_flowring_open(msgbuf->flow, flowid); + + inff_msgbuf_schedule_txdata(msgbuf, flowid, true); +} + +static void +inff_msgbuf_process_flow_ring_delete_response(struct inff_msgbuf *msgbuf, + void *buf) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_flowring_delete_resp *flowring_delete_resp; + u16 status; + u16 flowid; + + flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; + + flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); + flowid -= INFF_H2D_MSGRING_FLOWRING_IDSTART; + status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); + + if (status) { + iphy_err(drvr, "Flowring deletion failed, code %d\n", status); + inff_flowring_delete(msgbuf->flow, flowid); + return; + } + inff_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, + status); + + inff_msgbuf_remove_flowring(msgbuf, flowid); +} + +static void +inff_msgbuf_process_d2h_mbdata(struct inff_msgbuf *msgbuf, + void *buf) +{ + struct msgbuf_d2h_mailbox_data *d2h_mbdata; + + d2h_mbdata = (struct msgbuf_d2h_mailbox_data *)buf; + + if (!d2h_mbdata) { + inff_err("d2h_mbdata is null\n"); + return; + } + + inff_pcie_handle_mb_data(msgbuf->drvr->bus_if, d2h_mbdata->mbdata); +} + +static void inff_msgbuf_process_msgtype(struct inff_msgbuf *msgbuf, void *buf, + unsigned int *work_done) +{ + struct inff_pub *drvr = msgbuf->drvr; + struct msgbuf_common_hdr *msg; + struct inff_msgbuf_tx_status_entry *entry; + ulong flags; + + msg = (struct msgbuf_common_hdr *)buf; + switch (msg->msgtype) { + case MSGBUF_TYPE_GEN_STATUS: + inff_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); + inff_msgbuf_process_gen_status(msgbuf, buf); + break; + case MSGBUF_TYPE_RING_STATUS: + inff_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); + inff_msgbuf_process_ring_status(msgbuf, buf); + break; + case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: + inff_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); + inff_msgbuf_process_flow_ring_create_response(msgbuf, buf); + break; + case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: + inff_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); + inff_msgbuf_process_flow_ring_delete_response(msgbuf, buf); + break; + case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: + inff_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); + break; + case MSGBUF_TYPE_IOCTL_CMPLT: + inff_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); + inff_msgbuf_process_ioctl_complete(msgbuf, buf); + break; + case MSGBUF_TYPE_WL_EVENT: + inff_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); + inff_msgbuf_process_event(msgbuf, buf); + break; + case MSGBUF_TYPE_TX_STATUS: + inff_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); + if (drvr->settings->napi_enable) { + inff_msgbuf_process_txstatus(msgbuf, buf, work_done); + } else { + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return; + + memcpy(&entry->status, buf, sizeof(entry->status)); + spin_lock_irqsave(&msgbuf->tx_compl_work_lock, flags); + list_add_tail(&entry->queue, &msgbuf->tx_compl_list); + spin_unlock_irqrestore(&msgbuf->tx_compl_work_lock, flags); + schedule_work_on(inff_work_sched_cpu(drvr->settings->tx_cpu), + &msgbuf->tx_compl_work); + } + break; + case MSGBUF_TYPE_RX_CMPLT: + inff_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); + inff_msgbuf_process_rx_complete(msgbuf, buf, work_done); + break; + case MSGBUF_TYPE_D2H_MAILBOX_DATA: + inff_dbg(MSGBUF, "MSGBUF_TYPE_D2H_MAILBOX_DATA\n"); + inff_msgbuf_process_d2h_mbdata(msgbuf, buf); + break; + + default: + iphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); + break; + } +} + +static void inff_msgbuf_process_rx(struct inff_msgbuf *msgbuf, + struct inff_commonring *commonring) +{ + void *buf; + u16 count; + u16 processed; + struct inff_if *ifp; + struct msgbuf_common_hdr *msg; + int napi_cpu; + +again: + buf = inff_commonring_get_read_ptr(commonring, &count); + if (!buf) + return; + + msg = (struct msgbuf_common_hdr *)(buf + msgbuf->rx_dataoffset); + ifp = inff_get_ifp(msgbuf->drvr, msg->ifidx); + if (ifp && ifp->napi.poll && + (count && (msg->msgtype == MSGBUF_TYPE_RX_CMPLT || + msg->msgtype == MSGBUF_TYPE_TX_STATUS))) { + napi_cpu = inff_work_sched_cpu(msgbuf->drvr->settings->napi_cpu); + if (napi_cpu == WORK_CPU_UNBOUND) + inff_msgbuf_napi_schedule(ifp); + else + smp_call_function_single(napi_cpu, + inff_msgbuf_napi_schedule, + ifp, 0); + return; + } + + processed = 0; + while (count) { + inff_msgbuf_process_msgtype(msgbuf, + buf + msgbuf->rx_dataoffset, NULL); + buf += inff_commonring_len_item(commonring); + processed++; + if (processed == INFF_MSGBUF_UPDATE_RX_PTR_THRS) { + inff_commonring_read_complete(commonring, processed); + processed = 0; + } + count--; + } + if (processed) + inff_commonring_read_complete(commonring, processed); + + if (commonring->r_ptr == 0) + goto again; +} + +int inff_proto_msgbuf_rx_trigger(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_commonring *commonring; + void *buf; + u32 flowid; + int qlen; + + buf = msgbuf->commonrings[INFF_D2H_MSGRING_RX_COMPLETE]; + inff_msgbuf_process_rx(msgbuf, buf); + /* Schedule workqueue only when NAPI is disabled */ + if (!drvr->settings->napi_enable) { + /* Put rxdata into the workqueue only for RX t-put enhancement. */ + inff_msgbuf_schedule_rxdata(msgbuf, true); + } + buf = msgbuf->commonrings[INFF_D2H_MSGRING_TX_COMPLETE]; + inff_msgbuf_process_rx(msgbuf, buf); + buf = msgbuf->commonrings[INFF_D2H_MSGRING_CONTROL_COMPLETE]; + inff_msgbuf_process_rx(msgbuf, buf); + + for_each_set_bit(flowid, msgbuf->txstatus_done_map, + msgbuf->max_flowrings) { + clear_bit(flowid, msgbuf->txstatus_done_map); + commonring = msgbuf->flowrings[flowid]; + qlen = inff_flowring_qlen(msgbuf->flow, flowid); + if (qlen > INFF_MSGBUF_TRICKLE_TXWORKER_THRS || + ((qlen) && (atomic_read(&commonring->outstanding_tx) < + INFF_MSGBUF_TRICKLE_TXWORKER_THRS))) + inff_msgbuf_schedule_txdata(msgbuf, flowid, true); + } + + return 0; +} + +void inff_msgbuf_delete_flowring(struct inff_pub *drvr, u16 flowid) +{ + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct msgbuf_tx_flowring_delete_req *delete; + struct inff_commonring *commonring; + struct inff_commonring *commonring_del = msgbuf->flowrings[flowid]; + struct inff_flowring *flow = msgbuf->flow; + void *ret_ptr; + u8 ifidx; + int err; + int retry = INFF_MAX_TXSTATUS_WAIT_RETRIES; + + /* make sure it is not in txflow */ + inff_commonring_lock(commonring_del); + flow->rings[flowid]->status = RING_CLOSING; + inff_commonring_unlock(commonring_del); + + /* wait for commonring txflow finished */ + while (retry && atomic_read(&commonring_del->outstanding_tx)) { + usleep_range(5000, 10000); + retry--; + } + if (!retry) { + inff_err("timed out waiting for txstatus\n"); + atomic_set(&commonring_del->outstanding_tx, 0); + } + + /* no need to submit if firmware can not be reached */ + if (drvr->bus_if->state != INFF_BUS_UP) { + inff_dbg(MSGBUF, "bus down, flowring will be removed\n"); + inff_msgbuf_remove_flowring(msgbuf, flowid); + return; + } + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + inff_commonring_lock(commonring); + ret_ptr = inff_commonring_reserve_for_write(commonring); + if (!ret_ptr) { + iphy_err(drvr, "FW unaware, flowring will be removed !!\n"); + inff_commonring_unlock(commonring); + inff_msgbuf_remove_flowring(msgbuf, flowid); + return; + } + + delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; + + ifidx = inff_flowring_ifidx_get(msgbuf->flow, flowid); + + delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; + delete->msg.ifidx = ifidx; + delete->msg.request_id = 0; + + delete->flow_ring_id = cpu_to_le16(flowid + + INFF_H2D_MSGRING_FLOWRING_IDSTART); + delete->reason = 0; + + inff_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", + flowid, ifidx); + + err = inff_commonring_write_complete(commonring); + inff_commonring_unlock(commonring); + if (err) { + iphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); + inff_msgbuf_remove_flowring(msgbuf, flowid); + } +} + +#ifdef DEBUG +static int inff_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_pub *drvr = bus_if->drvr; + struct inff_msgbuf *msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + struct inff_commonring *commonring; + u16 i; + struct inff_flowring_ring *ring; + struct inff_flowring_hash *hash; + + for (i = 0; i <= INFF_NROF_H2D_COMMON_MSGRINGS; i++) { + commonring = msgbuf->commonrings[i]; + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + } + + for (i = 0; i <= INFF_NROF_D2H_COMMON_MSGRINGS; i++) { + commonring = msgbuf->commonrings[i]; + if (commonring->cr_update_wptr) + commonring->cr_update_wptr(commonring->cr_ctx); + } + + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_CONTROL_SUBMIT]; + seq_puts(seq, "\nh2d Submission commonrings:\n"); + seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u, was_full %u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth, + commonring->was_full); + commonring = msgbuf->commonrings[INFF_H2D_MSGRING_RXPOST_SUBMIT]; + seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u, was_full %u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth, + commonring->was_full); + + seq_puts(seq, "\nd2h Completion commonrings:\n"); + commonring = msgbuf->commonrings[INFF_D2H_MSGRING_CONTROL_COMPLETE]; + seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u, was_full %u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth, + commonring->was_full); + commonring = msgbuf->commonrings[INFF_D2H_MSGRING_TX_COMPLETE]; + seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u, was_full %u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth, + commonring->was_full); + commonring = msgbuf->commonrings[INFF_D2H_MSGRING_RX_COMPLETE]; + seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u, was_full %u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth, + commonring->was_full); + + seq_puts(seq, "\n h2d Active flowrings:\n"); + for (i = 0; i < msgbuf->flow->nrofrings; i++) { + if (!msgbuf->flow->rings[i]) + continue; + ring = msgbuf->flow->rings[i]; + if (ring->status != RING_OPEN) + continue; + commonring = msgbuf->flowrings[i]; + + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + + hash = &msgbuf->flow->hash[ring->hash_id]; + seq_printf(seq, "flowid %3u: rp %4u, wp %4u, depth %4u, was_full %u, qlen %4u, blocked %u\n" + " ifidx %u, fifo %u, da %pM\n", + i, commonring->r_ptr, commonring->w_ptr, + commonring->depth, commonring->was_full, + skb_queue_len(&ring->skblist), ring->blocked, + hash->ifidx, hash->fifo, hash->mac); + } + + return 0; +} +#else +static int inff_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + return 0; +} +#endif + +static void inff_msgbuf_debugfs_create(struct inff_pub *drvr) +{ + inff_debugfs_add_entry(drvr, "msgbuf_stats", inff_msgbuf_stats_read); +} + +int inff_proto_msgbuf_attach(struct inff_pub *drvr) +{ + struct inff_bus_msgbuf *if_msgbuf; + struct inff_msgbuf *msgbuf; + u64 address; + u32 count; + unsigned int wq_flags; + + if_msgbuf = drvr->bus_if->msgbuf; + + if (if_msgbuf->max_flowrings >= INFF_FLOWRING_HASHSIZE) { + iphy_err(drvr, "driver not configured for this many flowrings %d\n", + if_msgbuf->max_flowrings); + if_msgbuf->max_flowrings = INFF_FLOWRING_HASHSIZE - 1; + } + + msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); + if (!msgbuf) + goto fail; + + wq_flags = WQ_HIGHPRI | WQ_MEM_RECLAIM; + + if (inff_work_sched_cpu(drvr->settings->tx_cpu) == WORK_CPU_UNBOUND) + wq_flags |= WQ_UNBOUND; + else + wq_flags |= WQ_CPU_INTENSIVE; + + msgbuf->txflow_wq = alloc_workqueue("msgbuf_txflow", wq_flags, 1); + if (!msgbuf->txflow_wq) { + iphy_err(drvr, "workqueue creation failed\n"); + goto fail; + } + INIT_WORK(&msgbuf->txflow_work, inff_msgbuf_txflow_worker); + if (!drvr->settings->napi_enable) { + msgbuf->rx_wq = alloc_workqueue("msgbuf_rx", WQ_HIGHPRI | + WQ_MEM_RECLAIM | WQ_UNBOUND, 1); + if (!msgbuf->rx_wq) { + iphy_err(drvr, "RX workqueue creation failed\n"); + goto fail; + } + INIT_WORK(&msgbuf->rx_work, inff_msgbuf_rx_worker); + } + count = BITS_TO_LONGS(if_msgbuf->max_flowrings); + count = count * sizeof(unsigned long); + msgbuf->flow_map = kzalloc(count, GFP_KERNEL); + if (!msgbuf->flow_map) + goto fail; + + msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); + if (!msgbuf->txstatus_done_map) + goto fail; + + msgbuf->drvr = drvr; + msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, + INFF_TX_IOCTL_MAX_MSG_SIZE, + &msgbuf->ioctbuf_handle, + GFP_KERNEL); + if (!msgbuf->ioctbuf) + goto fail; + address = (u64)msgbuf->ioctbuf_handle; + msgbuf->ioctbuf_phys_hi = address >> 32; + msgbuf->ioctbuf_phys_lo = address & 0xffffffff; + + drvr->proto->hdrpull = inff_msgbuf_hdrpull; + drvr->proto->query_dcmd = inff_msgbuf_query_dcmd; + drvr->proto->set_dcmd = inff_msgbuf_set_dcmd; + drvr->proto->tx_queue_data = inff_msgbuf_tx_queue_data; + drvr->proto->configure_addr_mode = inff_msgbuf_configure_addr_mode; + drvr->proto->delete_peer = inff_msgbuf_delete_peer; + drvr->proto->add_tdls_peer = inff_msgbuf_add_tdls_peer; + drvr->proto->rxreorder = inff_msgbuf_rxreorder; + drvr->proto->debugfs_create = inff_msgbuf_debugfs_create; + drvr->proto->pd = msgbuf; + drvr->proto->add_if = inff_msgbuf_napi_add; + drvr->proto->del_if = inff_msgbuf_napi_del; + drvr->proto->xdp_init = inff_msgbuf_xdp_init; + drvr->proto->xdp_deinit = inff_msgbuf_xdp_deinit; + + init_waitqueue_head(&msgbuf->ioctl_resp_wait); + + msgbuf->commonrings = + (struct inff_commonring **)if_msgbuf->commonrings; + msgbuf->flowrings = (struct inff_commonring **)if_msgbuf->flowrings; + msgbuf->max_flowrings = if_msgbuf->max_flowrings; + msgbuf->flowring_dma_handle = + kcalloc(msgbuf->max_flowrings, + sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); + if (!msgbuf->flowring_dma_handle) + goto fail; + + msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; + msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; + + msgbuf->max_ioctlrespbuf = INFF_MSGBUF_MAX_IOCTLRESPBUF_POST; + msgbuf->max_eventbuf = INFF_MSGBUF_MAX_EVENTBUF_POST; + + msgbuf->tx_pktids = inff_msgbuf_init_tx_pktids(msgbuf, + NR_TX_PKTIDS, + DMA_TO_DEVICE); + if (!msgbuf->tx_pktids) + goto fail; + msgbuf->rx_pktids = inff_msgbuf_init_rx_pktids(msgbuf, + NR_RX_PKTIDS, + DMA_FROM_DEVICE); + if (!msgbuf->rx_pktids) + goto fail; + msgbuf->flow = inff_flowring_attach(drvr->bus_if->dev, + if_msgbuf->max_flowrings); + if (!msgbuf->flow) + goto fail; + + /* In NAPI disabled scenario, this skb queue is used to + * handle rx data in rx_flow worker + */ + if (!drvr->settings->napi_enable) + skb_queue_head_init(&msgbuf->rx_data_q); + + inff_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", + msgbuf->max_rxbufpost, msgbuf->max_eventbuf, + msgbuf->max_ioctlrespbuf); + count = 0; + do { + inff_msgbuf_rxbuf_data_fill(msgbuf); + if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) + usleep_range(10000, 10001); + else + break; + count++; + } while (count < 10); + inff_msgbuf_rxbuf_event_post(msgbuf); + inff_msgbuf_rxbuf_ioctlresp_post(msgbuf); + INIT_WORK(&msgbuf->flowring_work, inff_msgbuf_flowring_worker); + spin_lock_init(&msgbuf->flowring_work_lock); + INIT_LIST_HEAD(&msgbuf->work_queue); + INIT_WORK(&msgbuf->tx_compl_work, inff_msgbuf_tx_compl_worker); + INIT_LIST_HEAD(&msgbuf->tx_compl_list); + spin_lock_init(&msgbuf->tx_compl_work_lock); + return 0; + +fail: + if (msgbuf) { + kfree(msgbuf->flow_map); + kfree(msgbuf->txstatus_done_map); + inff_msgbuf_release_pktids(msgbuf); + kfree(msgbuf->flowring_dma_handle); + if (msgbuf->ioctbuf) + dma_free_coherent(drvr->bus_if->dev, + INFF_TX_IOCTL_MAX_MSG_SIZE, + msgbuf->ioctbuf, + msgbuf->ioctbuf_handle); + if (msgbuf->txflow_wq) + destroy_workqueue(msgbuf->txflow_wq); + if (msgbuf->rx_wq) + destroy_workqueue(msgbuf->rx_wq); + kfree(msgbuf); + } + return -ENOMEM; +} + +void inff_proto_msgbuf_detach(struct inff_pub *drvr) +{ + struct inff_msgbuf *msgbuf; + struct inff_msgbuf_work_item *work; + struct inff_msgbuf_tx_status_entry *status; + + inff_dbg(TRACE, "Enter\n"); + if (drvr->proto->pd) { + msgbuf = (struct inff_msgbuf *)drvr->proto->pd; + cancel_work_sync(&msgbuf->flowring_work); + cancel_work_sync(&msgbuf->tx_compl_work); + if (!drvr->settings->napi_enable) + cancel_work_sync(&msgbuf->rx_work); + while (!list_empty(&msgbuf->work_queue)) { + work = list_first_entry(&msgbuf->work_queue, + struct inff_msgbuf_work_item, + queue); + list_del(&work->queue); + kfree(work); + } + while (!list_empty(&msgbuf->tx_compl_list)) { + status = list_first_entry(&msgbuf->tx_compl_list, + struct inff_msgbuf_tx_status_entry, + queue); + list_del(&status->queue); + kfree(status); + } + kfree(msgbuf->flow_map); + kfree(msgbuf->txstatus_done_map); + if (msgbuf->txflow_wq) + destroy_workqueue(msgbuf->txflow_wq); + + if (msgbuf->rx_wq) { + flush_workqueue(msgbuf->rx_wq); + destroy_workqueue(msgbuf->rx_wq); + } + /* In NAPI disabled scenario, this skb queue is used to + * handle rx data in rx_flow worker + */ + if (!drvr->settings->napi_enable) + skb_queue_purge(&msgbuf->rx_data_q); + + inff_flowring_detach(msgbuf->flow); + dma_free_coherent(drvr->bus_if->dev, + INFF_TX_IOCTL_MAX_MSG_SIZE, + msgbuf->ioctbuf, msgbuf->ioctbuf_handle); + inff_msgbuf_release_pktids(msgbuf); + kfree(msgbuf->flowring_dma_handle); + kfree(msgbuf); + drvr->proto->pd = NULL; + } +} diff --git a/drivers/net/wireless/infineon/inffmac/msgbuf.h b/drivers/net/wireless/infineon/inffmac/msgbuf.h new file mode 100644 index 000000000000..fd12973a9f68 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/msgbuf.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_MSGBUF_H +#define INFF_MSGBUF_H + +#ifdef CONFIG_INFFMAC_PROTO_MSGBUF + +/* IDs of the 6 default common rings of msgbuf protocol */ +#define INFF_H2D_MSGRING_CONTROL_SUBMIT 0 +#define INFF_H2D_MSGRING_RXPOST_SUBMIT 1 +#define INFF_H2D_MSGRING_FLOWRING_IDSTART 2 +#define INFF_D2H_MSGRING_CONTROL_COMPLETE 2 +#define INFF_D2H_MSGRING_TX_COMPLETE 3 +#define INFF_D2H_MSGRING_RX_COMPLETE 4 + +#define INFF_NROF_H2D_COMMON_MSGRINGS 2 +#define INFF_NROF_D2H_COMMON_MSGRINGS 3 +#define INFF_NROF_COMMON_MSGRINGS (INFF_NROF_H2D_COMMON_MSGRINGS + \ + INFF_NROF_D2H_COMMON_MSGRINGS) + +#define INFF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64 +#define INFF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 1024 +#define INFF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64 +#define INFF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024 +#define INFF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 1024 +#define INFF_H2D_TXFLOWRING_MAX_ITEM 512 +#define INFF_RING_MAX_ITEM_LOWER_LIMIT 64 +#define INFF_RING_MAX_ITEM_UPPER_LIMIT 2048 + +#define INFF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40 +#define INFF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE 32 +#define INFF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE 24 +#define INFF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7 16 +#define INFF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE 24 +#define INFF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 32 +#define INFF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE 40 +#define INFF_H2D_TXFLOWRING_ITEMSIZE 48 + +static const u32 inff_ring_max_item[INFF_NROF_COMMON_MSGRINGS] = { + INFF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM, + INFF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM, + INFF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM, + INFF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM, + INFF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM +}; + +static const u32 inff_ring_itemsize_pre_v7[INFF_NROF_COMMON_MSGRINGS] = { + INFF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, + INFF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, + INFF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, + INFF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE_PRE_V7, + INFF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE_PRE_V7 +}; + +static const u32 inff_ring_itemsize[INFF_NROF_COMMON_MSGRINGS] = { + INFF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE, + INFF_H2D_MSGRING_RXPOST_SUBMIT_ITEMSIZE, + INFF_D2H_MSGRING_CONTROL_COMPLETE_ITEMSIZE, + INFF_D2H_MSGRING_TX_COMPLETE_ITEMSIZE, + INFF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE +}; + +struct msgbuf_buf_addr { + __le32 low_addr; + __le32 high_addr; +}; + +/** + * struct inff_bus_msgbuf - bus ringbuf if in case of msgbuf. + * + * @commonrings: commonrings which are always there. + * @flowrings: commonrings which are dynamically created and destroyed for data. + * @rx_dataoffset: if set then all rx data has this offset. + * @max_rxbufpost: maximum number of buffers to post for rx. + * @max_flowrings: maximum number of tx flow rings supported. + * @max_submissionrings: maximum number of submission rings(h2d) supported. + * @max_completionrings: maximum number of completion rings(d2h) supported. + */ +struct inff_bus_msgbuf { + struct inff_commonring *commonrings[INFF_NROF_COMMON_MSGRINGS]; + struct inff_commonring **flowrings; + u32 rx_dataoffset; + u32 max_rxbufpost; + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; +}; + +int inff_proto_msgbuf_rx_trigger(struct device *dev); +void inff_msgbuf_delete_flowring(struct inff_pub *drvr, u16 flowid); +int inff_proto_msgbuf_attach(struct inff_pub *drvr); +void inff_proto_msgbuf_detach(struct inff_pub *drvr); +#else +static inline int inff_proto_msgbuf_attach(struct inff_pub *drvr) +{ + return 0; +} + +static inline void inff_proto_msgbuf_detach(struct inff_pub *drvr) {} +#endif +int inff_msgbuf_tx_mbdata(struct inff_pub *drvr, u32 mbdata); + +#endif /* INFF_MSGBUF_H */ -- 2.25.1 Implements the specific bus logic for Infineon devices connected to the linux machine via a PCIe interface. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/pcie.c | 2909 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/pcie.h | 19 + 2 files changed, 2928 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/pcie.c create mode 100644 drivers/net/wireless/infineon/inffmac/pcie.h diff --git a/drivers/net/wireless/infineon/inffmac/pcie.c b/drivers/net/wireless/infineon/inffmac/pcie.c new file mode 100644 index 000000000000..5ec1bd7e3c52 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pcie.c @@ -0,0 +1,2909 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "chipcommon.h" +#include "utils.h" +#include "hw_ids.h" + +/* Custom inff_err() that takes bus arg and passes it further */ +#define inff_err(bus, fmt, ...) \ + do { \ + if (IS_ENABLED(CONFIG_INF_DEBUG) || \ + IS_ENABLED(CONFIG_INF_TRACING) || \ + net_ratelimit()) \ + __inff_err(bus, __func__, fmt, ##__VA_ARGS__); \ + } while (0) + +#include "debug.h" +#include "bus.h" +#include "commonring.h" +#include "msgbuf.h" +#include "pcie.h" +#include "firmware.h" +#include "chip.h" +#include "core.h" +#include "common.h" +#include "cfg80211.h" +#include "trxhdr.h" + +enum inff_pcie_state { + INFFMAC_PCIE_STATE_DOWN, + INFFMAC_PCIE_STATE_UP +}; + +/* per-board firmware binaries */ +#define INFF_55572_FIRMWARE_BASENAME INFF_FW_DEFAULT_PATH "inffmac55572-pcie" + +MODULE_FIRMWARE(INFF_55572_FIRMWARE_BASENAME ".trxse"); + +static const struct inff_firmware_mapping inff_pcie_fwnames[] = { + INFF_FW_ENTRY(INF_CC_5557X_CHIP_ID, 0xFFFFFFFF, 55572), +}; + +#define INFF_PCIE_READ_SHARED_TIMEOUT 5000 /* msec */ +#define INFF_PCIE_FW_UP_TIMEOUT 5000 /* msec */ + +#define INFF_PCIE_REG_MAP_SIZE (32 * 1024) + +/* backplane address space accessed by BAR0 */ +#define INFF_PCIE_BAR0_WINDOW 0x80 +#define INFF_PCIE_BAR0_REG_SIZE 0x1000 +#define INFF_PCIE_BAR0_WRAPPERBASE 0x70 + +#define INFF_PCIE_BAR0_WRAPBASE_DMP_OFFSET 0x1000 +#define INFF_PCIE_BAR0_PCIE_ENUM_OFFSET 0x2000 +#define INFF_CYW55572_PCIE_BAR0_PCIE_ENUM_OFFSET 0x3000 + +#define INFF_PCIE_BAR1_WINDOW 0x84 + +#define INFF_PCIE_ARMCR4REG_BANKIDX 0x40 +#define INFF_PCIE_ARMCR4REG_BANKPDA 0x4C + +#define INFF_PCIE_REG_INTSTATUS 0x90 +#define INFF_PCIE_REG_INTMASK 0x94 +#define INFF_PCIE_REG_SBMBX 0x98 + +#define INFF_PCIE_REG_LINK_STATUS_CTRL 0xBC + +#define INFF_PCIE_PCIE2REG_INTMASK 0x24 +#define INFF_PCIE_PCIE2REG_MAILBOXINT 0x48 +#define INFF_PCIE_PCIE2REG_MAILBOXMASK 0x4C +#define INFF_PCIE_PCIE2REG_CONFIGADDR 0x120 +#define INFF_PCIE_PCIE2REG_CONFIGDATA 0x124 +#define INFF_PCIE_PCIE2REG_H2D_MAILBOX_0 0x140 +#define INFF_PCIE_PCIE2REG_H2D_MAILBOX_1 0x144 +#define INFF_PCIE_PCIE2REG_DAR_D2H_MSG_0 0xA80 +#define INFF_PCIE_PCIE2REG_DAR_H2D_MSG_0 0xA90 + +#define INFF_PCIE_64_PCIE2REG_INTMASK 0xC14 +#define INFF_PCIE_64_PCIE2REG_MAILBOXINT 0xC30 +#define INFF_PCIE_64_PCIE2REG_MAILBOXMASK 0xC34 +#define INFF_PCIE_64_PCIE2REG_H2D_MAILBOX_0 0xA20 +#define INFF_PCIE_64_PCIE2REG_H2D_MAILBOX_1 0xA24 + +#define INFF_PCIE2_INTA 0x01 +#define INFF_PCIE2_INTB 0x02 + +#define INFF_PCIE_INT_0 0x01 +#define INFF_PCIE_INT_1 0x02 +#define INFF_PCIE_INT_DEF (INFF_PCIE_INT_0 | \ + INFF_PCIE_INT_1) + +#define INFF_PCIE_MB_INT_FN0_0 0x0100 +#define INFF_PCIE_MB_INT_FN0_1 0x0200 +#define INFF_PCIE_MB_INT_D2H0_DB0 0x10000 +#define INFF_PCIE_MB_INT_D2H0_DB1 0x20000 +#define INFF_PCIE_MB_INT_D2H1_DB0 0x40000 +#define INFF_PCIE_MB_INT_D2H1_DB1 0x80000 +#define INFF_PCIE_MB_INT_D2H2_DB0 0x100000 +#define INFF_PCIE_MB_INT_D2H2_DB1 0x200000 +#define INFF_PCIE_MB_INT_D2H3_DB0 0x400000 +#define INFF_PCIE_MB_INT_D2H3_DB1 0x800000 + +#define INFF_PCIE_MB_INT_FN0 (INFF_PCIE_MB_INT_FN0_0 | \ + INFF_PCIE_MB_INT_FN0_1) +#define INFF_PCIE_MB_INT_D2H_DB (INFF_PCIE_MB_INT_D2H0_DB0 | \ + INFF_PCIE_MB_INT_D2H0_DB1 | \ + INFF_PCIE_MB_INT_D2H1_DB0 | \ + INFF_PCIE_MB_INT_D2H1_DB1 | \ + INFF_PCIE_MB_INT_D2H2_DB0 | \ + INFF_PCIE_MB_INT_D2H2_DB1 | \ + INFF_PCIE_MB_INT_D2H3_DB0 | \ + INFF_PCIE_MB_INT_D2H3_DB1) + +#define INFF_PCIE_64_MB_INT_D2H0_DB0 0x1 +#define INFF_PCIE_64_MB_INT_D2H0_DB1 0x2 +#define INFF_PCIE_64_MB_INT_D2H1_DB0 0x4 +#define INFF_PCIE_64_MB_INT_D2H1_DB1 0x8 +#define INFF_PCIE_64_MB_INT_D2H2_DB0 0x10 +#define INFF_PCIE_64_MB_INT_D2H2_DB1 0x20 +#define INFF_PCIE_64_MB_INT_D2H3_DB0 0x40 +#define INFF_PCIE_64_MB_INT_D2H3_DB1 0x80 +#define INFF_PCIE_64_MB_INT_D2H4_DB0 0x100 +#define INFF_PCIE_64_MB_INT_D2H4_DB1 0x200 +#define INFF_PCIE_64_MB_INT_D2H5_DB0 0x400 +#define INFF_PCIE_64_MB_INT_D2H5_DB1 0x800 +#define INFF_PCIE_64_MB_INT_D2H6_DB0 0x1000 +#define INFF_PCIE_64_MB_INT_D2H6_DB1 0x2000 +#define INFF_PCIE_64_MB_INT_D2H7_DB0 0x4000 +#define INFF_PCIE_64_MB_INT_D2H7_DB1 0x8000 + +#define INFF_PCIE_64_MB_INT_D2H_DB (INFF_PCIE_64_MB_INT_D2H0_DB0 | \ + INFF_PCIE_64_MB_INT_D2H0_DB1 | \ + INFF_PCIE_64_MB_INT_D2H1_DB0 | \ + INFF_PCIE_64_MB_INT_D2H1_DB1 | \ + INFF_PCIE_64_MB_INT_D2H2_DB0 | \ + INFF_PCIE_64_MB_INT_D2H2_DB1 | \ + INFF_PCIE_64_MB_INT_D2H3_DB0 | \ + INFF_PCIE_64_MB_INT_D2H3_DB1 | \ + INFF_PCIE_64_MB_INT_D2H4_DB0 | \ + INFF_PCIE_64_MB_INT_D2H4_DB1 | \ + INFF_PCIE_64_MB_INT_D2H5_DB0 | \ + INFF_PCIE_64_MB_INT_D2H5_DB1 | \ + INFF_PCIE_64_MB_INT_D2H6_DB0 | \ + INFF_PCIE_64_MB_INT_D2H6_DB1 | \ + INFF_PCIE_64_MB_INT_D2H7_DB0 | \ + INFF_PCIE_64_MB_INT_D2H7_DB1) + +#define INFF_PCIE_SHARED_VERSION_6 6 +#define INFF_PCIE_SHARED_VERSION_7 7 +#define INFF_PCIE_MIN_SHARED_VERSION 5 +#define INFF_PCIE_MAX_SHARED_VERSION INFF_PCIE_SHARED_VERSION_7 +#define INFF_PCIE_SHARED_VERSION_MASK 0x00FF +#define INFF_PCIE_SHARED_DMA_INDEX 0x10000 +#define INFF_PCIE_SHARED_DMA_2B_IDX 0x100000 +#define INFF_PCIE_SHARED_USE_MAILBOX 0x2000000 +#define INFF_PCIE_SHARED_HOSTRDY_DB1 0x10000000 + +#define INFF_PCIE_FLAGS_HTOD_SPLIT 0x4000 +#define INFF_PCIE_FLAGS_DTOH_SPLIT 0x8000 + +#define INFF_SHARED_MAX_RXBUFPOST_OFFSET 34 +#define INFF_SHARED_RING_BASE_OFFSET 52 +#define INFF_SHARED_RX_DATAOFFSET_OFFSET 36 +#define INFF_SHARED_CONSOLE_ADDR_OFFSET 20 +#define INFF_SHARED_HTOD_MB_DATA_ADDR_OFFSET 40 +#define INFF_SHARED_DTOH_MB_DATA_ADDR_OFFSET 44 +#define INFF_SHARED_RING_INFO_ADDR_OFFSET 48 +#define INFF_SHARED_DMA_SCRATCH_LEN_OFFSET 52 +#define INFF_SHARED_DMA_SCRATCH_ADDR_OFFSET 56 +#define INFF_SHARED_DMA_RINGUPD_LEN_OFFSET 64 +#define INFF_SHARED_DMA_RINGUPD_ADDR_OFFSET 68 +#define INFF_SHARED_HOST_CAP_OFFSET 84 + +#define INFF_RING_H2D_RING_COUNT_OFFSET 0 +#define INFF_RING_D2H_RING_COUNT_OFFSET 1 +#define INFF_RING_H2D_RING_MEM_OFFSET 4 +#define INFF_RING_H2D_RING_STATE_OFFSET 8 + +#define INFF_RING_MEM_BASE_ADDR_OFFSET 8 +#define INFF_RING_MAX_ITEM_OFFSET 4 +#define INFF_RING_LEN_ITEMS_OFFSET 6 +#define INFF_RING_MEM_SZ 16 +#define INFF_RING_STATE_SZ 8 + +#define INFF_DEF_MAX_RXBUFPOST 255 + +#define INFF_HOSTCAP_H2D_ENABLE_HOSTRDY 0x400 +#define INFF_HOSTCAP_DS_NO_OOB_DW 0x1000 + +#define INFF_CONSOLE_BUFADDR_OFFSET 8 +#define INFF_CONSOLE_BUFSIZE_OFFSET 12 +#define INFF_CONSOLE_WRITEIDX_OFFSET 16 + +#define INFF_DMA_D2H_SCRATCH_BUF_LEN 8 +#define INFF_DMA_D2H_RINGUPD_BUF_LEN 1024 + +#define INFF_D2H_DEV_D3_ACK 0x00000001 +#define INFF_D2H_DEV_DS_ENTER_REQ 0x00000002 +#define INFF_D2H_DEV_DS_EXIT_NOTE 0x00000004 +#define INFF_D2H_DEV_FWHALT 0x10000000 + +#define INFF_H2D_HOST_D3_INFORM 0x00000001 +#define INFF_H2D_HOST_DS_ACK 0x00000002 +#define INFF_H2D_HOST_D0_INFORM_IN_USE 0x00000008 +#define INFF_H2D_HOST_D0_INFORM 0x00000010 + +#define INFF_PCIE_MBDATA_TIMEOUT msecs_to_jiffies(2000) + +#define INFF_PCIE_CFGREG_STATUS_CMD 0x4 +#define INFF_PCIE_CFGREG_PM_CSR 0x4C +#define INFF_PCIE_CFGREG_MSI_CAP 0x58 +#define INFF_PCIE_CFGREG_MSI_ADDR_L 0x5C +#define INFF_PCIE_CFGREG_MSI_ADDR_H 0x60 +#define INFF_PCIE_CFGREG_MSI_DATA 0x64 +#define INFF_PCIE_CFGREG_REVID 0x6C +#define INFF_PCIE_CFGREG_LINK_STATUS_CTRL 0xBC +#define INFF_PCIE_CFGREG_LINK_STATUS_CTRL2 0xDC +#define INFF_PCIE_CFGREG_RBAR_CTRL 0x228 +#define INFF_PCIE_CFGREG_PML1_SUB_CTRL1 0x248 +#define INFF_PCIE_CFGREG_REG_BAR2_CONFIG 0x4E0 +#define INFF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 +#define INFF_PCIE_CFGREG_REVID_SECURE_MODE BIT(31) +#define INFF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 + +/* Magic number at a magic location to find RAM size */ +#define INFF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */ +#define INFF_RAMSIZE_OFFSET 0x6c + +#define INFF_ENTROPY_SEED_LEN 64u +#define INFF_ENTROPY_NONCE_LEN 16u +#define INFF_ENTROPY_HOST_LEN (INFF_ENTROPY_SEED_LEN + \ + INFF_ENTROPY_NONCE_LEN) +#define INFF_NVRAM_OFFSET_TCM 4u +#define INFF_NVRAM_COMPRS_FACTOR 4u +#define INFF_NVRAM_RNG_SIGNATURE 0xFEEDC0DEu + +struct inff_rand_metadata { + u32 signature; + u32 count; +}; + +struct inff_pcie_console { + u32 base_addr; + u32 buf_addr; + u32 bufsize; + u32 read_idx; + u8 log_str[256]; + u8 log_idx; +}; + +struct inff_pcie_shared_info { + u32 tcm_base_address; + u32 flags; + struct inff_pcie_ringbuf *commonrings[INFF_NROF_COMMON_MSGRINGS]; + struct inff_pcie_ringbuf *flowrings; + u16 max_rxbufpost; + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; + u32 rx_dataoffset; + u32 htod_mb_data_addr; + u32 dtoh_mb_data_addr; + u32 ring_info_addr; + struct inff_pcie_console console; + void *scratch; + dma_addr_t scratch_dmahandle; + void *ringupd; + dma_addr_t ringupd_dmahandle; + u8 version; +}; + +struct inff_pcie_core_info { + u32 base; + u32 wrapbase; +}; + +struct inff_pciedev_info { + enum inff_pcie_state state; + bool in_irq; + struct pci_dev *pdev; + const struct inff_pcie_reginfo *reginfo; + void __iomem *regs; + void __iomem *tcm; + u32 ram_base; + u32 ram_size; + struct inff_chip *ci; + u32 coreid; + struct inff_pcie_shared_info shared; + u8 hostready; + bool use_mailbox; + bool use_d0_inform; + wait_queue_head_t mbdata_resp_wait; + bool mbdata_completed; + bool irq_allocated; + bool wowl_enabled; + u8 dma_idx_sz; + void *idxbuf; + u32 idxbuf_sz; + dma_addr_t idxbuf_dmahandle; + u16 (*read_ptr)(struct inff_pciedev_info *devinfo, u32 mem_offset); + void (*write_ptr)(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value); + struct inff_mp_device *settings; + ulong bar1_size; +#ifdef DEBUG + u32 console_interval; + bool console_active; + struct timer_list timer; +#endif +}; + +struct inff_pcie_ringbuf { + struct inff_commonring commonring; + dma_addr_t dma_handle; + u32 w_idx_addr; + u32 r_idx_addr; + struct inff_pciedev_info *devinfo; + u8 id; +}; + +/** + * struct inff_pcie_dhi_ringinfo - dongle/host interface shared ring info + * + * @ringmem: dongle memory pointer to ring memory location + * @h2d_w_idx_ptr: h2d ring write indices dongle memory pointers + * @h2d_r_idx_ptr: h2d ring read indices dongle memory pointers + * @d2h_w_idx_ptr: d2h ring write indices dongle memory pointers + * @d2h_r_idx_ptr: d2h ring read indices dongle memory pointers + * @h2d_w_idx_hostaddr: h2d ring write indices host memory pointers + * @h2d_r_idx_hostaddr: h2d ring read indices host memory pointers + * @d2h_w_idx_hostaddr: d2h ring write indices host memory pointers + * @d2h_r_idx_hostaddr: d2h ring reaD indices host memory pointers + * @max_flowrings: maximum number of tx flow rings supported. + * @max_submissionrings: maximum number of submission rings(h2d) supported. + * @max_completionrings: maximum number of completion rings(d2h) supported. + */ +struct inff_pcie_dhi_ringinfo { + __le32 ringmem; + __le32 h2d_w_idx_ptr; + __le32 h2d_r_idx_ptr; + __le32 d2h_w_idx_ptr; + __le32 d2h_r_idx_ptr; + struct msgbuf_buf_addr h2d_w_idx_hostaddr; + struct msgbuf_buf_addr h2d_r_idx_hostaddr; + struct msgbuf_buf_addr d2h_w_idx_hostaddr; + struct msgbuf_buf_addr d2h_r_idx_hostaddr; + __le16 max_flowrings; + __le16 max_submissionrings; + __le16 max_completionrings; +}; + +struct inff_pcie_reginfo { + u32 intmask; + u32 mailboxint; + u32 mailboxmask; + u32 h2d_mailbox_0; + u32 h2d_mailbox_1; + u32 int_d2h_db; + u32 int_fn0; +}; + +static const struct inff_pcie_reginfo inff_reginfo_default = { + .intmask = INFF_PCIE_PCIE2REG_INTMASK, + .mailboxint = INFF_PCIE_PCIE2REG_MAILBOXINT, + .mailboxmask = INFF_PCIE_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = INFF_PCIE_MB_INT_D2H_DB, + .int_fn0 = INFF_PCIE_MB_INT_FN0, +}; + +static const struct inff_pcie_reginfo inff_reginfo_64 = { + .intmask = INFF_PCIE_64_PCIE2REG_INTMASK, + .mailboxint = INFF_PCIE_64_PCIE2REG_MAILBOXINT, + .mailboxmask = INFF_PCIE_64_PCIE2REG_MAILBOXMASK, + .h2d_mailbox_0 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_0, + .h2d_mailbox_1 = INFF_PCIE_PCIE2REG_H2D_MAILBOX_1, + .int_d2h_db = INFF_PCIE_64_MB_INT_D2H_DB, + .int_fn0 = INFF_PCIE_MB_INT_FN0, +}; + +static void inff_pcie_setup(struct device *dev, int ret, + struct inff_fw_request *fwreq); +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active); +static void inff_pcie_debugfs_create(struct device *dev); +static void inff_pcie_bus_console_init(struct inff_pciedev_info *devinfo); +static void inff_pcie_bus_console_read(struct inff_pciedev_info *devinfo, + bool error); +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active); +static void inff_pcie_debugfs_create(struct device *dev); + +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ +DEFINE_RAW_SPINLOCK(pcie_lock); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + +static u32 +inff_pcie_read_reg32(struct inff_pciedev_info *devinfo, u32 reg_offset) +{ + void __iomem *address = devinfo->regs + reg_offset; + + return ioread32(address); +} + +static void +inff_pcie_write_reg32(struct inff_pciedev_info *devinfo, u32 reg_offset, + u32 value) +{ + void __iomem *address = devinfo->regs + reg_offset; + + iowrite32(value, address); +} + +static u8 +inff_pcie_read_tcm8(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + u8 value; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread8(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread8(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u16 +inff_pcie_read_tcm16(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u16 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread16(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread16(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_tcm16(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + + iowrite16(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite16(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u16 +inff_pcie_read_idx(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + return (*(address)); +} + +static void +inff_pcie_write_idx(struct inff_pciedev_info *devinfo, u32 mem_offset, + u16 value) +{ + u16 *address = devinfo->idxbuf + mem_offset; + + *(address) = value; +} + +static u32 +inff_pcie_read_tcm32(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u32 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread32(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread32(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_tcm32(struct inff_pciedev_info *devinfo, u32 mem_offset, + u32 value) +{ + void __iomem *address = devinfo->tcm + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + iowrite32(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite32(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static u32 +inff_pcie_read_ram32(struct inff_pciedev_info *devinfo, u32 mem_offset) +{ + void __iomem *address = devinfo->tcm + devinfo->ci->rambase + + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + u32 value; + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + value = ioread32(address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); + + return value; +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return -EINVAL; + } + + return (ioread32(address)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_write_ram32(struct inff_pciedev_info *devinfo, u32 mem_offset, + u32 value) +{ + void __iomem *address = devinfo->tcm + devinfo->ci->rambase + + mem_offset; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; + + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } + iowrite32(value, address); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_BAR1_WINDOW, 0x0); + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#else + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + if ((address - devinfo->tcm) >= devinfo->bar1_size) { + inff_err(bus, "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } + + iowrite32(value, address); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_copy_mem_todev(struct inff_pciedev_info *devinfo, u32 mem_offset, + void *srcaddr, u32 len) +{ + struct pci_dev *pdev = devinfo->pdev; +#ifndef CONFIG_INFFMAC_PCIE_BARWIN_SZ + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); +#endif + void __iomem *address = devinfo->tcm + mem_offset; + __le32 *src32; + __le16 *src16; + u8 *src8; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) { + if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) { + src8 = (u8 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite8(*src8, address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address++; + src8++; + len--; + } + } else { + len = len / 2; + src16 = (__le16 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite16(le16_to_cpu(*src16), address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 2; + src16++; + len--; + } + } + } else { + len = len / 4; + src32 = (__le32 *)srcaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + iowrite32(le32_to_cpu(*src32), address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 4; + src32++; + len--; + } + } +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + pci_write_config_dword(pdev, INFF_PCIE_BAR1_WINDOW, 0x0); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +static void +inff_pcie_copy_dev_tomem(struct inff_pciedev_info *devinfo, u32 mem_offset, + void *dstaddr, u32 len) +{ + struct pci_dev *pdev = devinfo->pdev; +#ifndef CONFIG_INFFMAC_PCIE_BARWIN_SZ + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); +#endif + void __iomem *address = devinfo->tcm + mem_offset; + __le32 *dst32; + __le16 *dst16; + u8 *dst8; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + unsigned long flags; +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (((ulong)address & 4) || ((ulong)dstaddr & 4) || (len & 4)) { + if (((ulong)address & 2) || ((ulong)dstaddr & 2) || (len & 2)) { + dst8 = (u8 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst8 = ioread8(address); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address++; + dst8++; + len--; + } + } else { + len = len / 2; + dst16 = (__le16 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - + devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst16 = cpu_to_le16(ioread16(address)); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 2; + dst16++; + len--; + } + } + } else { + len = len / 4; + dst32 = (__le32 *)dstaddr; + while (len) { +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_lock_irqsave(&pcie_lock, flags); + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + pci_write_config_dword + (pdev, + INFF_PCIE_BAR1_WINDOW, + devinfo->bar1_size); + address = address - devinfo->bar1_size; + } +#else + if ((address - devinfo->tcm) >= + devinfo->bar1_size) { + inff_err(bus, + "mem_offset:%d exceeds device size=%ld\n", + mem_offset, devinfo->bar1_size); + return; + } +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + *dst32 = cpu_to_le32(ioread32(address)); +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + raw_spin_unlock_irqrestore(&pcie_lock, flags); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + address += 4; + dst32++; + len--; + } + } +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + pci_write_config_dword(pdev, INFF_PCIE_BAR1_WINDOW, 0x0); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ +} + +#define WRITECC32(devinfo, reg, value) inff_pcie_write_reg32(devinfo, \ + CHIPCREGOFFS(reg), value) + +static void +inff_pcie_select_core(struct inff_pciedev_info *devinfo, u16 coreid) +{ + const struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + struct inff_core *core; + u32 bar0_win; + + core = inff_chip_get_core(devinfo->ci, coreid); + if (core) { + bar0_win = core->base; + pci_write_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, bar0_win); + if (pci_read_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, + &bar0_win) == 0) { + if (bar0_win != core->base) { + bar0_win = core->base; + pci_write_config_dword(pdev, + INFF_PCIE_BAR0_WINDOW, + bar0_win); + } + } + } else { + inff_err(bus, "Unsupported core selected %x\n", coreid); + } +} + +static void inff_pcie_reset_device(struct inff_pciedev_info *devinfo) +{ + struct inff_core *core; + static const u16 cfg_offset[] = { + INFF_PCIE_CFGREG_STATUS_CMD, + INFF_PCIE_CFGREG_PM_CSR, + INFF_PCIE_CFGREG_MSI_CAP, + INFF_PCIE_CFGREG_MSI_ADDR_L, + INFF_PCIE_CFGREG_MSI_ADDR_H, + INFF_PCIE_CFGREG_MSI_DATA, +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + INFF_PCIE_BAR1_WINDOW, +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + INFF_PCIE_CFGREG_LINK_STATUS_CTRL2, + INFF_PCIE_CFGREG_RBAR_CTRL, + INFF_PCIE_CFGREG_PML1_SUB_CTRL1, + INFF_PCIE_CFGREG_REG_BAR2_CONFIG, + INFF_PCIE_CFGREG_REG_BAR3_CONFIG + }; + u32 i; + u32 val; + u32 lsc; + + if (!devinfo->ci) + return; + + /* Disable ASPM */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + pci_read_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + &lsc); + val = lsc & (~INFF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + val); + + /* Watchdog reset */ + devinfo->ci->blhs->init(devinfo->ci); + inff_pcie_select_core(devinfo, INF_CORE_CHIPCOMMON); + WRITECC32(devinfo, watchdog, 4); + msleep(100); + if (devinfo->ci->blhs->post_wdreset(devinfo->ci)) + return; + + /* Restore ASPM */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_LINK_STATUS_CTRL, + lsc); + + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev <= 13) { + for (i = 0; i < ARRAY_SIZE(cfg_offset); i++) { + inff_pcie_write_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGADDR, + cfg_offset[i]); + val = inff_pcie_read_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGDATA); + inff_dbg(PCIE, "config offset 0x%04x, value 0x%04x\n", + cfg_offset[i], val); + inff_pcie_write_reg32(devinfo, + INFF_PCIE_PCIE2REG_CONFIGDATA, + val); + } + } +} + +static void inff_pcie_attach(struct inff_pciedev_info *devinfo) +{ + u32 config; + + /* BAR1 window may not be sized properly */ + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_pcie_write_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0); + config = inff_pcie_read_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGDATA); + inff_pcie_write_reg32(devinfo, INFF_PCIE_PCIE2REG_CONFIGDATA, config); + + device_wakeup_enable(&devinfo->pdev->dev); +} + +static int inff_pcie_bus_readshared(struct inff_pciedev_info *devinfo, + u32 nvram_csm) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + u32 loop_counter; + u32 addr_le; + u32 addr = 0; + + loop_counter = INFF_PCIE_READ_SHARED_TIMEOUT / 50; + while ((addr == 0 || addr == nvram_csm) && (loop_counter)) { + msleep(50); + addr_le = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - 4); + addr = le32_to_cpu(addr_le); + loop_counter--; + } + if (addr == 0 || addr == nvram_csm || addr < devinfo->ci->rambase || + addr >= devinfo->ci->rambase + devinfo->ci->ramsize) { + inff_err(bus, "Invalid shared RAM address 0x%08x\n", addr); + return -ENODEV; + } + devinfo->shared.tcm_base_address = addr; + inff_dbg(PCIE, "Shared RAM addr: 0x%08x\n", addr); + + inff_pcie_bus_console_init(devinfo); + return 0; +} + +static int inff_pcie_enter_download_state(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + int err = 0; + + err = devinfo->ci->blhs->prep_fwdl(devinfo->ci); + if (err) { + inff_err(bus, "FW download preparation failed"); + return err; + } + + if (!inff_pcie_bus_readshared(devinfo, 0)) + inff_pcie_bus_console_read(devinfo, false); + + return err; +} + +static int inff_pcie_exit_download_state(struct inff_pciedev_info *devinfo, + u32 resetintr) +{ + inff_pcie_bus_console_read(devinfo, false); + devinfo->ci->blhs->post_nvramdl(devinfo->ci); + + return 0; +} + +static int +inff_pcie_send_mb_data(struct inff_pciedev_info *devinfo, u32 htod_mb_data) +{ + struct inff_pcie_shared_info *shared; + struct inff_bus *bus; + int err; + struct inff_core *core; + u32 addr; + u32 cur_htod_mb_data; + u32 i; + + shared = &devinfo->shared; + bus = dev_get_drvdata(&devinfo->pdev->dev); + if (shared->version >= INFF_PCIE_SHARED_VERSION_6 && + !devinfo->use_mailbox) { + err = inff_msgbuf_tx_mbdata(bus->drvr, htod_mb_data); + if (err) { + inff_err(bus, "sendimg mbdata failed err=%d\n", err); + return err; + } + } else { + addr = shared->htod_mb_data_addr; + cur_htod_mb_data = inff_pcie_read_tcm32(devinfo, addr); + + if (cur_htod_mb_data != 0) + inff_dbg(PCIE, "MB transaction is already pending 0x%04x\n", + cur_htod_mb_data); + + i = 0; + while (cur_htod_mb_data != 0) { + usleep_range(10000, 10001); + i++; + if (i > 100) + return -EIO; + cur_htod_mb_data = inff_pcie_read_tcm32(devinfo, addr); + } + + inff_pcie_write_tcm32(devinfo, addr, htod_mb_data); + pci_write_config_dword(devinfo->pdev, INFF_PCIE_REG_SBMBX, 1); + + /* Send mailbox interrupt twice as a hardware workaround */ + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev <= 13) + pci_write_config_dword(devinfo->pdev, + INFF_PCIE_REG_SBMBX, 1); + } + return 0; +} + +static u32 inff_pcie_read_mb_data(struct inff_pciedev_info *devinfo) +{ + struct inff_pcie_shared_info *shared; + u32 addr; + u32 dtoh_mb_data; + + shared = &devinfo->shared; + addr = shared->dtoh_mb_data_addr; + dtoh_mb_data = inff_pcie_read_tcm32(devinfo, addr); + inff_pcie_write_tcm32(devinfo, addr, 0); + return dtoh_mb_data; +} + +void inff_pcie_handle_mb_data(struct inff_bus *bus_if, u32 d2h_mb_data) +{ + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(INFO, "D2H_MB_DATA: 0x%04x\n", d2h_mb_data); + + if (d2h_mb_data & INFF_D2H_DEV_DS_ENTER_REQ) { + inff_dbg(INFO, "D2H_MB_DATA: DEEP SLEEP REQ\n"); + inff_pcie_send_mb_data(devinfo, INFF_H2D_HOST_DS_ACK); + inff_dbg(INFO, "D2H_MB_DATA: sent DEEP SLEEP ACK\n"); + } + + if (d2h_mb_data & INFF_D2H_DEV_DS_EXIT_NOTE) + inff_dbg(INFO, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); + if (d2h_mb_data & INFF_D2H_DEV_D3_ACK) { + inff_dbg(INFO, "D2H_MB_DATA: D3 ACK\n"); + devinfo->mbdata_completed = true; + wake_up(&devinfo->mbdata_resp_wait); + } + + if (d2h_mb_data & INFF_D2H_DEV_FWHALT) { + inff_dbg(INFO, "D2H_MB_DATA: FW HALT\n"); + inff_fw_crashed(&devinfo->pdev->dev); + } +} + +static void inff_pcie_bus_console_init(struct inff_pciedev_info *devinfo) +{ + struct inff_pcie_shared_info *shared; + struct inff_pcie_console *console; + u32 buf_addr; + u32 addr; + + shared = &devinfo->shared; + console = &shared->console; + addr = shared->tcm_base_address + INFF_SHARED_CONSOLE_ADDR_OFFSET; + console->base_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = console->base_addr + INFF_CONSOLE_BUFADDR_OFFSET; + buf_addr = inff_pcie_read_tcm32(devinfo, addr); + /* reset console index when buffer address is updated */ + if (console->buf_addr != buf_addr) { + console->buf_addr = buf_addr; + console->read_idx = 0; + } + addr = console->base_addr + INFF_CONSOLE_BUFSIZE_OFFSET; + console->bufsize = inff_pcie_read_tcm32(devinfo, addr); + + inff_dbg(FWCON, "Console: base %x, buf %x, size %d\n", + console->base_addr, console->buf_addr, console->bufsize); +} + +/** + * inff_pcie_bus_console_read - reads firmware messages + * + * @devinfo: pointer to the device data structure + * @error: specifies if error has occurred (prints messages unconditionally) + */ +static void inff_pcie_bus_console_read(struct inff_pciedev_info *devinfo, + bool error) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + struct inff_pcie_console *console; + u32 addr; + u8 ch; + u32 newidx; + + if (!error && !INFF_FWCON_ON()) + return; + + console = &devinfo->shared.console; + if (!console->base_addr) + return; + addr = console->base_addr + INFF_CONSOLE_WRITEIDX_OFFSET; + newidx = inff_pcie_read_tcm32(devinfo, addr); + while (newidx != console->read_idx) { + addr = console->buf_addr + console->read_idx; + ch = inff_pcie_read_tcm8(devinfo, addr); + console->read_idx++; + if (console->read_idx == console->bufsize) + console->read_idx = 0; + if (ch == '\r') + continue; + console->log_str[console->log_idx] = ch; + console->log_idx++; + if ((ch != '\n') && + (console->log_idx == (sizeof(console->log_str) - 2))) { + ch = '\n'; + console->log_str[console->log_idx] = ch; + console->log_idx++; + } + if (ch == '\n') { + console->log_str[console->log_idx] = 0; + if (error) + __inff_err(bus, __func__, "CONSOLE: %s", + console->log_str); + else + pr_debug("CONSOLE: %s", console->log_str); + console->log_idx = 0; + } + } +} + +static void inff_pcie_intr_disable(struct inff_pciedev_info *devinfo) +{ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, 0); +} + +static void inff_pcie_interrupt_disable(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_intr_disable(devinfo); +} + +static void inff_pcie_intr_enable(struct inff_pciedev_info *devinfo) +{ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxmask, + devinfo->reginfo->int_d2h_db | + devinfo->reginfo->int_fn0); +} + +static void inff_pcie_interrupt_enable(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_intr_enable(devinfo); +} + +static void inff_pcie_hostready(struct inff_pciedev_info *devinfo) +{ + if (devinfo->shared.flags & INFF_PCIE_SHARED_HOSTRDY_DB1) + inff_pcie_write_reg32(devinfo, + devinfo->reginfo->h2d_mailbox_1, 1); +} + +static irqreturn_t inff_pcie_quick_check_isr(int irq, void *arg) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)arg; + + if (inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint)) { + inff_pcie_intr_disable(devinfo); + inff_dbg(PCIE, "Enter\n"); + return IRQ_WAKE_THREAD; + } + return IRQ_NONE; +} + +static irqreturn_t inff_pcie_isr_thread(int irq, void *arg) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)arg; + u32 status; + u32 d2h_mbdata; + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + devinfo->in_irq = true; + status = inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); + inff_dbg(PCIE, "Enter %x\n", status); + if (status) { + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, + status); + if (status & devinfo->reginfo->int_fn0) { + d2h_mbdata = inff_pcie_read_mb_data(devinfo); + inff_pcie_handle_mb_data(bus, d2h_mbdata); + } + if (status & devinfo->reginfo->int_d2h_db) { + if (devinfo->state == INFFMAC_PCIE_STATE_UP) + inff_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); + } + } + inff_pcie_bus_console_read(devinfo, false); + if (devinfo->state == INFFMAC_PCIE_STATE_UP) + inff_pcie_intr_enable(devinfo); + devinfo->in_irq = false; + return IRQ_HANDLED; +} + +static int inff_pcie_request_irq(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + + inff_pcie_intr_disable(devinfo); + + inff_dbg(PCIE, "Enter\n"); + + pci_enable_msi(pdev); + if (request_threaded_irq(pdev->irq, inff_pcie_quick_check_isr, + inff_pcie_isr_thread, IRQF_SHARED, + "inff_pcie_intr", devinfo)) { + pci_disable_msi(pdev); + inff_err(bus, "Failed to request IRQ %d\n", pdev->irq); + return -EIO; + } + devinfo->irq_allocated = true; + return 0; +} + +static void inff_pcie_release_irq(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + u32 status; + u32 count; + + if (!devinfo->irq_allocated) + return; + + inff_pcie_intr_disable(devinfo); + free_irq(pdev->irq, devinfo); + pci_disable_msi(pdev); + + msleep(50); + count = 0; + while ((devinfo->in_irq) && (count < 20)) { + msleep(50); + count++; + } + if (devinfo->in_irq) + inff_err(bus, "Still in IRQ (processing) !!!\n"); + + status = inff_pcie_read_reg32(devinfo, devinfo->reginfo->mailboxint); + inff_pcie_write_reg32(devinfo, devinfo->reginfo->mailboxint, status); + + devinfo->irq_allocated = false; +} + +static int inff_pcie_ring_mb_write_rptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr, + commonring->w_ptr, ring->id); + + devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr); + + return 0; +} + +static int inff_pcie_ring_mb_write_wptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr, + commonring->r_ptr, ring->id); + + devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr); + + return 0; +} + +static int inff_pcie_ring_mb_ring_bell(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + inff_dbg(PCIE, "RING !\n"); + /* Any arbitrary value will do, lets use 1 */ + inff_pcie_write_reg32(devinfo, devinfo->reginfo->h2d_mailbox_0, 1); + + return 0; +} + +static int inff_pcie_ring_mb_update_rptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr); + + inff_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr, + commonring->w_ptr, ring->id); + + return 0; +} + +static int inff_pcie_ring_mb_update_wptr(void *ctx) +{ + struct inff_pcie_ringbuf *ring = (struct inff_pcie_ringbuf *)ctx; + struct inff_pciedev_info *devinfo = ring->devinfo; + struct inff_commonring *commonring = &ring->commonring; + + if (devinfo->state != INFFMAC_PCIE_STATE_UP) + return -EIO; + + commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr); + + inff_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr, + commonring->r_ptr, ring->id); + + return 0; +} + +static void * +inff_pcie_init_dmabuffer_for_device(struct inff_pciedev_info *devinfo, + u32 size, u32 tcm_dma_phys_addr, + dma_addr_t *dma_handle) +{ + void *ring; + u64 address; + + ring = dma_alloc_coherent(&devinfo->pdev->dev, size, dma_handle, + GFP_KERNEL); + if (!ring) + return NULL; + + address = (u64)*dma_handle; + inff_pcie_write_tcm32(devinfo, tcm_dma_phys_addr, + address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, tcm_dma_phys_addr + 4, address >> 32); + + return ring; +} + +static struct inff_pcie_ringbuf * +inff_pcie_alloc_dma_and_ring(struct inff_pciedev_info *devinfo, u32 ring_id, + u32 tcm_ring_phys_addr) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + void *dma_buf; + dma_addr_t dma_handle; + struct inff_pcie_ringbuf *ring; + u32 size; + u32 addr; + u32 ring_max_item, ring_item_size; + u32 commonring_depth = bus->drvr->settings->commonring_depth[ring_id]; + + if (devinfo->shared.version < INFF_PCIE_SHARED_VERSION_7) + ring_item_size = inff_ring_itemsize_pre_v7[ring_id]; + else + ring_item_size = inff_ring_itemsize[ring_id]; + + if (commonring_depth >= INFF_RING_MAX_ITEM_LOWER_LIMIT && + commonring_depth <= INFF_RING_MAX_ITEM_UPPER_LIMIT) + ring_max_item = commonring_depth; + else + ring_max_item = inff_ring_max_item[ring_id]; + + size = ring_max_item * ring_item_size; + dma_buf = inff_pcie_init_dmabuffer_for_device(devinfo, size, + tcm_ring_phys_addr + + INFF_RING_MEM_BASE_ADDR_OFFSET, + &dma_handle); + if (!dma_buf) + return NULL; + + addr = tcm_ring_phys_addr + INFF_RING_MAX_ITEM_OFFSET; + inff_pcie_write_tcm16(devinfo, addr, ring_max_item); + addr = tcm_ring_phys_addr + INFF_RING_LEN_ITEMS_OFFSET; + inff_pcie_write_tcm16(devinfo, addr, ring_item_size); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) { + dma_free_coherent(&devinfo->pdev->dev, size, dma_buf, + dma_handle); + return NULL; + } + inff_commonring_config(&ring->commonring, ring_max_item, + ring_item_size, dma_buf); + ring->dma_handle = dma_handle; + ring->devinfo = devinfo; + inff_commonring_register_cb(&ring->commonring, + inff_pcie_ring_mb_ring_bell, + inff_pcie_ring_mb_update_rptr, + inff_pcie_ring_mb_update_wptr, + inff_pcie_ring_mb_write_rptr, + inff_pcie_ring_mb_write_wptr, ring); + + return ring; +} + +static void inff_pcie_release_ringbuffer(struct device *dev, + struct inff_pcie_ringbuf *ring) +{ + void *dma_buf; + u32 size; + + if (!ring) + return; + + dma_buf = ring->commonring.buf_addr; + if (dma_buf) { + size = ring->commonring.depth * ring->commonring.item_len; + dma_free_coherent(dev, size, dma_buf, ring->dma_handle); + } + kfree(ring); +} + +static void inff_pcie_release_ringbuffers(struct inff_pciedev_info *devinfo) +{ + u32 i; + + for (i = 0; i < INFF_NROF_COMMON_MSGRINGS; i++) { + inff_pcie_release_ringbuffer(&devinfo->pdev->dev, + devinfo->shared.commonrings[i]); + devinfo->shared.commonrings[i] = NULL; + } + kfree(devinfo->shared.flowrings); + devinfo->shared.flowrings = NULL; + if (devinfo->idxbuf) { + dma_free_coherent(&devinfo->pdev->dev, + devinfo->idxbuf_sz, + devinfo->idxbuf, + devinfo->idxbuf_dmahandle); + devinfo->idxbuf = NULL; + } +} + +static int inff_pcie_init_ringbuffers(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_pcie_ringbuf *ring; + struct inff_pcie_ringbuf *rings; + u32 d2h_w_idx_ptr; + u32 d2h_r_idx_ptr; + u32 h2d_w_idx_ptr; + u32 h2d_r_idx_ptr; + u32 ring_mem_ptr; + u32 i; + u64 address; + u32 bufsz; + u8 idx_offset; + struct inff_pcie_dhi_ringinfo ringinfo; + u16 max_flowrings; + u16 max_submissionrings; + u16 max_completionrings; +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + inff_pcie_copy_dev_tomem(devinfo, devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#else + memcpy_fromio(&ringinfo, devinfo->tcm + devinfo->shared.ring_info_addr, + sizeof(ringinfo)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + + if (devinfo->shared.version >= 6) { + max_submissionrings = le16_to_cpu(ringinfo.max_submissionrings); + max_flowrings = le16_to_cpu(ringinfo.max_flowrings); + max_completionrings = le16_to_cpu(ringinfo.max_completionrings); + } else { + max_submissionrings = le16_to_cpu(ringinfo.max_flowrings); + max_flowrings = max_submissionrings - + INFF_NROF_H2D_COMMON_MSGRINGS; + max_completionrings = INFF_NROF_D2H_COMMON_MSGRINGS; + } + if (max_flowrings > 256) { + inff_err(bus, "invalid max_flowrings(%d)\n", max_flowrings); + return -EIO; + } + + if (devinfo->dma_idx_sz != 0) { + bufsz = (max_submissionrings + max_completionrings) * + devinfo->dma_idx_sz * 2; + devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz, + &devinfo->idxbuf_dmahandle, + GFP_KERNEL); + if (!devinfo->idxbuf) + devinfo->dma_idx_sz = 0; + } + + if (devinfo->dma_idx_sz == 0) { + d2h_w_idx_ptr = le32_to_cpu(ringinfo.d2h_w_idx_ptr); + d2h_r_idx_ptr = le32_to_cpu(ringinfo.d2h_r_idx_ptr); + h2d_w_idx_ptr = le32_to_cpu(ringinfo.h2d_w_idx_ptr); + h2d_r_idx_ptr = le32_to_cpu(ringinfo.h2d_r_idx_ptr); + idx_offset = sizeof(u32); + devinfo->write_ptr = inff_pcie_write_tcm16; + devinfo->read_ptr = inff_pcie_read_tcm16; + inff_dbg(PCIE, "Using TCM indices\n"); + } else { + memset(devinfo->idxbuf, 0, bufsz); + devinfo->idxbuf_sz = bufsz; + idx_offset = devinfo->dma_idx_sz; + devinfo->write_ptr = inff_pcie_write_idx; + devinfo->read_ptr = inff_pcie_read_idx; + + h2d_w_idx_ptr = 0; + address = (u64)devinfo->idxbuf_dmahandle; + ringinfo.h2d_w_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.h2d_w_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + h2d_r_idx_ptr = h2d_w_idx_ptr + + max_submissionrings * idx_offset; + address += max_submissionrings * idx_offset; + ringinfo.h2d_r_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.h2d_r_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + d2h_w_idx_ptr = h2d_r_idx_ptr + + max_submissionrings * idx_offset; + address += max_submissionrings * idx_offset; + ringinfo.d2h_w_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.d2h_w_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + + d2h_r_idx_ptr = d2h_w_idx_ptr + + max_completionrings * idx_offset; + address += max_completionrings * idx_offset; + ringinfo.d2h_r_idx_hostaddr.low_addr = + cpu_to_le32(address & 0xffffffff); + ringinfo.d2h_r_idx_hostaddr.high_addr = + cpu_to_le32(address >> 32); + +#ifdef CONFIG_INFFMAC_PCIE_BARWIN_SZ + inff_pcie_copy_mem_todev(devinfo, + devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#else + memcpy_toio(devinfo->tcm + devinfo->shared.ring_info_addr, + &ringinfo, sizeof(ringinfo)); +#endif /* CONFIG_INFFMAC_PCIE_BARWIN_SZ */ + inff_dbg(PCIE, "Using host memory indices\n"); + } + + ring_mem_ptr = le32_to_cpu(ringinfo.ringmem); + + for (i = 0; i < INFF_NROF_H2D_COMMON_MSGRINGS; i++) { + ring = inff_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); + if (!ring) + goto fail; + ring->w_idx_addr = h2d_w_idx_ptr; + ring->r_idx_addr = h2d_r_idx_ptr; + ring->id = i; + devinfo->shared.commonrings[i] = ring; + + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; + ring_mem_ptr += INFF_RING_MEM_SZ; + } + + for (i = INFF_NROF_H2D_COMMON_MSGRINGS; + i < INFF_NROF_COMMON_MSGRINGS; i++) { + ring = inff_pcie_alloc_dma_and_ring(devinfo, i, ring_mem_ptr); + if (!ring) + goto fail; + ring->w_idx_addr = d2h_w_idx_ptr; + ring->r_idx_addr = d2h_r_idx_ptr; + ring->id = i; + devinfo->shared.commonrings[i] = ring; + + d2h_w_idx_ptr += idx_offset; + d2h_r_idx_ptr += idx_offset; + ring_mem_ptr += INFF_RING_MEM_SZ; + } + + devinfo->shared.max_flowrings = max_flowrings; + devinfo->shared.max_submissionrings = max_submissionrings; + devinfo->shared.max_completionrings = max_completionrings; + rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL); + if (!rings) + goto fail; + + inff_dbg(PCIE, "Nr of flowrings is %d\n", max_flowrings); + + for (i = 0; i < max_flowrings; i++) { + ring = &rings[i]; + ring->devinfo = devinfo; + ring->id = i + INFF_H2D_MSGRING_FLOWRING_IDSTART; + inff_commonring_register_cb(&ring->commonring, + inff_pcie_ring_mb_ring_bell, + inff_pcie_ring_mb_update_rptr, + inff_pcie_ring_mb_update_wptr, + inff_pcie_ring_mb_write_rptr, + inff_pcie_ring_mb_write_wptr, + ring); + ring->w_idx_addr = h2d_w_idx_ptr; + ring->r_idx_addr = h2d_r_idx_ptr; + h2d_w_idx_ptr += idx_offset; + h2d_r_idx_ptr += idx_offset; + } + devinfo->shared.flowrings = rings; + + return 0; + +fail: + inff_err(bus, "Allocating ring buffers failed\n"); + inff_pcie_release_ringbuffers(devinfo); + return -ENOMEM; +} + +static void +inff_pcie_release_scratchbuffers(struct inff_pciedev_info *devinfo) +{ + if (devinfo->shared.scratch) + dma_free_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_SCRATCH_BUF_LEN, + devinfo->shared.scratch, + devinfo->shared.scratch_dmahandle); + if (devinfo->shared.ringupd) + dma_free_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_RINGUPD_BUF_LEN, + devinfo->shared.ringupd, + devinfo->shared.ringupd_dmahandle); +} + +static int inff_pcie_init_scratchbuffers(struct inff_pciedev_info *devinfo) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + u64 address; + u32 addr; + + devinfo->shared.scratch = + dma_alloc_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_SCRATCH_BUF_LEN, + &devinfo->shared.scratch_dmahandle, + GFP_KERNEL); + if (!devinfo->shared.scratch) + goto fail; + + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_SCRATCH_ADDR_OFFSET; + address = (u64)devinfo->shared.scratch_dmahandle; + inff_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_SCRATCH_LEN_OFFSET; + inff_pcie_write_tcm32(devinfo, addr, INFF_DMA_D2H_SCRATCH_BUF_LEN); + + devinfo->shared.ringupd = + dma_alloc_coherent(&devinfo->pdev->dev, + INFF_DMA_D2H_RINGUPD_BUF_LEN, + &devinfo->shared.ringupd_dmahandle, + GFP_KERNEL); + if (!devinfo->shared.ringupd) + goto fail; + + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_RINGUPD_ADDR_OFFSET; + address = (u64)devinfo->shared.ringupd_dmahandle; + inff_pcie_write_tcm32(devinfo, addr, address & 0xffffffff); + inff_pcie_write_tcm32(devinfo, addr + 4, address >> 32); + addr = devinfo->shared.tcm_base_address + + INFF_SHARED_DMA_RINGUPD_LEN_OFFSET; + inff_pcie_write_tcm32(devinfo, addr, INFF_DMA_D2H_RINGUPD_BUF_LEN); + return 0; + +fail: + inff_err(bus, "Allocating scratch buffers failed\n"); + inff_pcie_release_scratchbuffers(devinfo); + return -ENOMEM; +} + +static void inff_pcie_down(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + + inff_pcie_fwcon_timer(devinfo, false); +} + +static int inff_pcie_preinit(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + + inff_dbg(PCIE, "Enter\n"); + + inff_pcie_intr_enable(buspub->devinfo); + inff_pcie_hostready(buspub->devinfo); + + return 0; +} + +static int inff_pcie_tx(struct device *dev, struct sk_buff *skb) +{ + return 0; +} + +static int inff_pcie_tx_ctlpkt(struct device *dev, unsigned char *msg, + uint len) +{ + return 0; +} + +static int inff_pcie_rx_ctlpkt(struct device *dev, unsigned char *msg, + uint len) +{ + return 0; +} + +static void inff_pcie_wowl_config(struct device *dev, bool enabled) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(PCIE, "Configuring WOWL, enabled=%d\n", enabled); + devinfo->wowl_enabled = enabled; +} + +static size_t inff_pcie_get_ramsize(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + return devinfo->ci->ramsize - devinfo->ci->srsize; +} + +static int inff_pcie_get_memdump(struct device *dev, void *data, size_t len) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + + inff_dbg(PCIE, "dump at 0x%08X: len=%zu\n", devinfo->ci->rambase, len); + inff_pcie_copy_dev_tomem(devinfo, devinfo->ci->rambase, data, len); + return 0; +} + +static int inff_pcie_get_blob(struct device *dev, const struct firmware **fw, + enum inff_blob_type type) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + struct inff_chip_specific *chip_spec = &devinfo->ci->chip_spec; + + switch (type) { + case INFF_BLOB_CLM: + *fw = chip_spec->clm_fw; + chip_spec->clm_fw = NULL; + break; + default: + return -ENOENT; + } + + if (!*fw) + return -ENOENT; + + return 0; +} + +static int inff_pcie_reset(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pciedev *buspub = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = buspub->devinfo; + struct inff_fw_request *fwreq; + int err; + + inff_pcie_intr_disable(devinfo); + + inff_pcie_bus_console_read(devinfo, true); + + inff_detach(dev); + + inff_pcie_release_irq(devinfo); + inff_pcie_release_scratchbuffers(devinfo); + inff_pcie_release_ringbuffers(devinfo); + inff_pcie_reset_device(devinfo); + + fwreq = inff_prepare_fw_request(devinfo->settings->firmware_path, + devinfo->ci, inff_pcie_fwnames, + ARRAY_SIZE(inff_pcie_fwnames), + devinfo->settings->board_type); + if (!fwreq) { + dev_err(dev, "Failed to prepare FW request\n"); + return -ENOMEM; + } + + /* NVRAM reserves PCI domain 0 for SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; + fwreq->bus_nr = devinfo->pdev->bus->number; + + err = inff_fw_get_firmwares(dev, fwreq, inff_pcie_setup); + if (err) { + dev_err(dev, "Failed to prepare FW request\n"); + kfree(fwreq); + } + + return err; +} + +static const struct inff_bus_ops inff_pcie_bus_ops = { + .preinit = inff_pcie_preinit, + .txdata = inff_pcie_tx, + .stop = inff_pcie_down, + .txctl = inff_pcie_tx_ctlpkt, + .rxctl = inff_pcie_rx_ctlpkt, + .wowl_config = inff_pcie_wowl_config, + .get_ramsize = inff_pcie_get_ramsize, + .get_memdump = inff_pcie_get_memdump, + .get_blob = inff_pcie_get_blob, + .reset = inff_pcie_reset, + .debugfs_create = inff_pcie_debugfs_create, + .interrupt_enable = inff_pcie_interrupt_enable, + .interrupt_disable = inff_pcie_interrupt_disable, +}; + +static void +inff_pcie_adjust_ramsize(struct inff_pciedev_info *devinfo, u8 *data, + u32 data_len) +{ + __le32 *field; + u32 newsize; + + if (data_len < INFF_RAMSIZE_OFFSET + 8) + return; + + field = (__le32 *)&data[INFF_RAMSIZE_OFFSET]; + if (le32_to_cpup(field) != INFF_RAMSIZE_MAGIC) + return; + field++; + newsize = le32_to_cpup(field); + + inff_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n", + newsize); + devinfo->ci->ramsize = newsize; +} + +static void +inff_pcie_write_rand(struct inff_pciedev_info *devinfo, u32 nvram_csm) +{ + struct inff_rand_metadata rand_data; + u8 rand_buf[INFF_ENTROPY_HOST_LEN]; + u32 count = INFF_ENTROPY_HOST_LEN; + u32 address; + + address = devinfo->ci->rambase + + (devinfo->ci->ramsize - INFF_NVRAM_OFFSET_TCM) - + ((nvram_csm & 0xffff) * INFF_NVRAM_COMPRS_FACTOR) - + sizeof(rand_data); + memset(rand_buf, 0, INFF_ENTROPY_HOST_LEN); + rand_data.signature = cpu_to_le32(INFF_NVRAM_RNG_SIGNATURE); + rand_data.count = cpu_to_le32(count); + inff_pcie_copy_mem_todev(devinfo, address, &rand_data, + sizeof(rand_data)); + address -= count; + get_random_bytes(rand_buf, count); + inff_pcie_copy_mem_todev(devinfo, address, rand_buf, count); +} + +static int +inff_pcie_init_share_ram_info(struct inff_pciedev_info *devinfo, + u32 sharedram_addr) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_pcie_shared_info *shared; + u32 addr; + u32 host_cap; + + shared = &devinfo->shared; + shared->tcm_base_address = sharedram_addr; + + shared->flags = inff_pcie_read_tcm32(devinfo, sharedram_addr); + shared->version = (u8)(shared->flags & INFF_PCIE_SHARED_VERSION_MASK); + inff_dbg(PCIE, "PCIe protocol version %d\n", shared->version); + if (shared->version > INFF_PCIE_MAX_SHARED_VERSION || + shared->version < INFF_PCIE_MIN_SHARED_VERSION) { + inff_err(bus, "Unsupported PCIE version %d\n", + shared->version); + return -EINVAL; + } + + /* check firmware support dma indicies */ + if (shared->flags & INFF_PCIE_SHARED_DMA_INDEX) { + if (shared->flags & INFF_PCIE_SHARED_DMA_2B_IDX) + devinfo->dma_idx_sz = sizeof(u16); + else + devinfo->dma_idx_sz = sizeof(u32); + } + + addr = sharedram_addr + INFF_SHARED_MAX_RXBUFPOST_OFFSET; + shared->max_rxbufpost = inff_pcie_read_tcm16(devinfo, addr); + if (shared->max_rxbufpost == 0) + shared->max_rxbufpost = INFF_DEF_MAX_RXBUFPOST; + + addr = sharedram_addr + INFF_SHARED_RX_DATAOFFSET_OFFSET; + shared->rx_dataoffset = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_HTOD_MB_DATA_ADDR_OFFSET; + shared->htod_mb_data_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_DTOH_MB_DATA_ADDR_OFFSET; + shared->dtoh_mb_data_addr = inff_pcie_read_tcm32(devinfo, addr); + + addr = sharedram_addr + INFF_SHARED_RING_INFO_ADDR_OFFSET; + shared->ring_info_addr = inff_pcie_read_tcm32(devinfo, addr); + + if (shared->version >= INFF_PCIE_SHARED_VERSION_6) { + host_cap = shared->version; + + /* Disable OOB Device Wake based DeepSleep State Machine */ + host_cap |= INFF_HOSTCAP_DS_NO_OOB_DW; + + devinfo->hostready = + ((shared->flags & INFF_PCIE_SHARED_HOSTRDY_DB1) + == INFF_PCIE_SHARED_HOSTRDY_DB1); + if (devinfo->hostready) { + inff_dbg(PCIE, "HostReady supported by dongle.\n"); + host_cap |= INFF_HOSTCAP_H2D_ENABLE_HOSTRDY; + } + devinfo->use_mailbox = + ((shared->flags & INFF_PCIE_SHARED_USE_MAILBOX) + == INFF_PCIE_SHARED_USE_MAILBOX); + devinfo->use_d0_inform = false; + addr = sharedram_addr + INFF_SHARED_HOST_CAP_OFFSET; + + inff_pcie_write_tcm32(devinfo, addr, host_cap); + } else { + devinfo->use_d0_inform = true; + } + + inff_dbg(PCIE, "max rx buf post %d, rx dataoffset %d\n", + shared->max_rxbufpost, shared->rx_dataoffset); + + inff_pcie_bus_console_init(devinfo); + inff_pcie_bus_console_read(devinfo, false); + + return 0; +} + +static int inff_pcie_download_fw_nvram(struct inff_pciedev_info *devinfo, + const struct firmware *fw, void *nvram, + u32 nvram_len) +{ + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct trx_header_le *trx = (struct trx_header_le *)fw->data; + u32 fw_size; + u32 sharedram_addr; + u32 sharedram_addr_written; + u32 loop_counter; + int err; + u32 address; + u32 resetintr; + u32 nvram_lenw; + u32 nvram_csm; + struct inff_chip_specific *chip_spec = &devinfo->ci->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + inff_dbg(PCIE, "Halt ARM.\n"); + err = inff_pcie_enter_download_state(devinfo); + if (err) + return err; + + inff_dbg(PCIE, "Download FW %s\n", fw_data[INFF_FW_CODE].fwnames.path); + address = devinfo->ci->rambase; + fw_size = fw->size; + if (trx->magic == cpu_to_le32(TRX_MAGIC)) { + address -= sizeof(struct trx_header_le); + fw_size = le32_to_cpu(trx->len); + } + inff_pcie_copy_mem_todev(devinfo, address, (void *)fw->data, fw_size); + + resetintr = get_unaligned_le32(fw->data); + release_firmware(fw); + + inff_pcie_bus_console_read(devinfo, false); + err = devinfo->ci->blhs->post_fwdl(devinfo->ci); + if (err) { + inff_err(bus, "FW download failed, err=%d\n", err); + return err; + } + + err = devinfo->ci->blhs->chk_validation(devinfo->ci); + if (err) { + inff_err(bus, "FW valication failed, err=%d\n", err); + return err; + } + + if (nvram) { + inff_dbg(PCIE, "Download NVRAM %s\n", fw_data[INFF_FW_NVRAM].fwnames.path); + address = devinfo->ci->rambase + devinfo->ci->ramsize - + nvram_len; + + address -= 4; + inff_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len); + + /* Convert nvram_len to words to determine the length token */ + nvram_lenw = nvram_len / 4; + nvram_csm = (~nvram_lenw << 16) | (nvram_lenw & 0x0000FFFF); + inff_fw_nvram_free(nvram); + } else { + nvram_csm = 0; + inff_dbg(PCIE, "No matching NVRAM file found %s\n", + fw_data[INFF_FW_NVRAM].fwnames.path); + } + + if (devinfo->ci->chip == INF_CC_5557X_CHIP_ID) { + /* Write the length token to the last word of RAM address */ + inff_pcie_write_ram32(devinfo, devinfo->ci->ramsize - 4, + cpu_to_le32(nvram_csm)); + + /* Write random numbers to TCM for randomizing heap address */ + inff_pcie_write_rand(devinfo, nvram_csm); + } + + sharedram_addr_written = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - + 4); + inff_dbg(PCIE, "Bring ARM in running state\n"); + err = inff_pcie_exit_download_state(devinfo, resetintr); + if (err) + return err; + + if (!inff_pcie_bus_readshared(devinfo, nvram_csm)) + inff_pcie_bus_console_read(devinfo, false); + + inff_dbg(PCIE, "Wait for FW init\n"); + sharedram_addr = sharedram_addr_written; + loop_counter = INFF_PCIE_FW_UP_TIMEOUT / 50; + while ((sharedram_addr == sharedram_addr_written) && (loop_counter)) { + msleep(50); + sharedram_addr = inff_pcie_read_ram32(devinfo, + devinfo->ci->ramsize - + 4); + loop_counter--; + } + if (sharedram_addr == sharedram_addr_written) { + inff_err(bus, "FW failed to initialize\n"); + return -ENODEV; + } + if (sharedram_addr < devinfo->ci->rambase || + sharedram_addr >= devinfo->ci->rambase + devinfo->ci->ramsize) { + inff_err(bus, "Invalid shared RAM address 0x%08x\n", + sharedram_addr); + return -ENODEV; + } + inff_dbg(PCIE, "Shared RAM addr: 0x%08x\n", sharedram_addr); + + return inff_pcie_init_share_ram_info(devinfo, sharedram_addr); +} + +static int inff_pcie_get_resource(struct inff_pciedev_info *devinfo) +{ + struct pci_dev *pdev = devinfo->pdev; + struct inff_bus *bus = dev_get_drvdata(&pdev->dev); + int err; + phys_addr_t bar0_addr, bar1_addr; + ulong bar1_size; + + err = pci_enable_device(pdev); + if (err) { + inff_err(bus, "pci_enable_device failed err=%d\n", err); + return err; + } + + pci_set_master(pdev); + + /* Bar-0 mapped address */ + bar0_addr = pci_resource_start(pdev, 0); + /* Bar-1 mapped address */ + bar1_addr = pci_resource_start(pdev, 2); + /* read Bar-1 mapped memory range */ + bar1_size = pci_resource_len(pdev, 2); + if (bar1_size == 0 || bar1_addr == 0) { + inff_err(bus, "BAR1 Not enabled, device size=%ld, addr=%#016llx\n", + bar1_size, (unsigned long long)bar1_addr); + return -EINVAL; + } + + devinfo->regs = ioremap(bar0_addr, INFF_PCIE_REG_MAP_SIZE); + devinfo->tcm = ioremap(bar1_addr, bar1_size); + devinfo->bar1_size = bar1_size; + + if (!devinfo->regs || !devinfo->tcm) { + inff_err(bus, "ioremap() failed (%p,%p)\n", devinfo->regs, + devinfo->tcm); + return -EINVAL; + } + inff_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", + devinfo->regs, (unsigned long long)bar0_addr); + inff_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n", + devinfo->tcm, (unsigned long long)bar1_addr, + (unsigned int)bar1_size); + + return 0; +} + +static void inff_pcie_release_resource(struct inff_pciedev_info *devinfo) +{ + if (devinfo->tcm) + iounmap(devinfo->tcm); + if (devinfo->regs) + iounmap(devinfo->regs); + + pci_disable_device(devinfo->pdev); +} + +static u32 inff_pcie_buscore_blhs_read(void *ctx, u32 reg_offset) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + return inff_pcie_read_reg32(devinfo, reg_offset); +} + +static void inff_pcie_buscore_blhs_write(void *ctx, u32 reg_offset, u32 value) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_pcie_write_reg32(devinfo, reg_offset, value); +} + +static u32 inff_pcie_buscore_prep_addr(const struct pci_dev *pdev, u32 addr) +{ + u32 ret_addr; + + ret_addr = addr & (INFF_PCIE_BAR0_REG_SIZE - 1); + addr &= ~(INFF_PCIE_BAR0_REG_SIZE - 1); + pci_write_config_dword(pdev, INFF_PCIE_BAR0_WINDOW, addr); + + return ret_addr; +} + +static u32 inff_pcie_buscore_read32(void *ctx, u32 addr) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + addr = inff_pcie_buscore_prep_addr(devinfo->pdev, addr); + return inff_pcie_read_reg32(devinfo, addr); +} + +static void inff_pcie_buscore_write32(void *ctx, u32 addr, u32 value) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + addr = inff_pcie_buscore_prep_addr(devinfo->pdev, addr); + inff_pcie_write_reg32(devinfo, addr, value); +} + +static int inff_pcie_buscoreprep(void *ctx) +{ + return inff_pcie_get_resource(ctx); +} + +static int inff_pcie_buscore_reset(void *ctx, struct inff_chip *chip) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + struct inff_core *core; + u32 val, reg; + + devinfo->ci = chip; + inff_pcie_reset_device(devinfo); + + /* reginfo is not ready yet */ + core = inff_chip_get_core(chip, INF_CORE_PCIE2); + if (core->rev >= 64) + reg = INFF_PCIE_64_PCIE2REG_MAILBOXINT; + else + reg = INFF_PCIE_PCIE2REG_MAILBOXINT; + + val = inff_pcie_read_reg32(devinfo, reg); + if (val != 0xffffffff) + inff_pcie_write_reg32(devinfo, reg, val); + + return 0; +} + +static void inff_pcie_buscore_activate(void *ctx, struct inff_chip *chip, + u32 rstvec) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + + inff_pcie_write_tcm32(devinfo, 0, rstvec); +} + +static int +inff_pcie_buscore_sec_attach(void *ctx, struct inff_blhs **blhs, struct inff_ccsec **ccsec, + u32 flag, uint timeout, uint interval) +{ + struct inff_pciedev_info *devinfo = (struct inff_pciedev_info *)ctx; + struct inff_bus *bus = dev_get_drvdata(&devinfo->pdev->dev); + struct inff_blhs *blhsh; + u32 regdata; + u32 pcie_enum; + u32 addr; + + if (devinfo->pdev->vendor != INF_PCIE_VENDOR_ID_CYPRESS) + return 0; + + pci_read_config_dword(devinfo->pdev, INFF_PCIE_CFGREG_REVID, ®data); + if (regdata & INFF_PCIE_CFGREG_REVID_SECURE_MODE) { + blhsh = kzalloc(sizeof(*blhsh), GFP_KERNEL); + if (!blhsh) + return -ENOMEM; + + blhsh->d2h = INFF_PCIE_PCIE2REG_DAR_D2H_MSG_0; + blhsh->h2d = INFF_PCIE_PCIE2REG_DAR_H2D_MSG_0; + blhsh->read = inff_pcie_buscore_blhs_read; + blhsh->write = inff_pcie_buscore_blhs_write; + + /* Host indication for bootloarder to start the init */ + if (devinfo->pdev->device == INF_PCIE_5557X_DEVICE_ID) + pcie_enum = INFF_CYW55572_PCIE_BAR0_PCIE_ENUM_OFFSET; + else + pcie_enum = INFF_PCIE_BAR0_PCIE_ENUM_OFFSET; + + pci_read_config_dword(devinfo->pdev, PCI_BASE_ADDRESS_0, + ®data); + addr = regdata + pcie_enum + blhsh->h2d; + inff_pcie_buscore_write32(ctx, addr, 0); + + addr = regdata + pcie_enum + blhsh->d2h; + SPINWAIT_MS((inff_pcie_buscore_read32(ctx, addr) & flag) == 0, + timeout, interval); + regdata = inff_pcie_buscore_read32(ctx, addr); + if (!(regdata & flag)) { + inff_err(bus, "Timeout waiting for bootloader ready\n"); + kfree(blhsh); + return -EPERM; + } + *blhs = blhsh; + } + + return 0; +} + +static const struct inff_buscore_ops inff_pcie_buscore_ops = { + .prepare = inff_pcie_buscoreprep, + .reset = inff_pcie_buscore_reset, + .activate = inff_pcie_buscore_activate, + .read32 = inff_pcie_buscore_read32, + .write32 = inff_pcie_buscore_write32, + .sec_attach = inff_pcie_buscore_sec_attach, +}; + +#define INFF_PCIE_FW_CODE 0 +#define INFF_PCIE_FW_NVRAM 1 +#define INFF_PCIE_FW_CLM 2 + +static void inff_pcie_setup(struct device *dev, int ret, + struct inff_fw_request *fwreq) +{ + const struct firmware *fw; + void *nvram; + struct inff_bus *bus; + struct inff_pciedev *pcie_bus_dev; + struct inff_pciedev_info *devinfo; + struct inff_commonring **flowrings; + struct inff_chip_specific *chip_spec; + u32 i, nvram_len; + + bus = dev_get_drvdata(dev); + pcie_bus_dev = bus->bus_priv.pcie; + devinfo = pcie_bus_dev->devinfo; + chip_spec = &devinfo->ci->chip_spec; + + /* check firmware loading result */ + if (ret) + goto fail; + + inff_pcie_attach(devinfo); + + fw = fwreq->items[INFF_PCIE_FW_CODE].binary; + nvram = fwreq->items[INFF_PCIE_FW_NVRAM].nv_data.data; + nvram_len = fwreq->items[INFF_PCIE_FW_NVRAM].nv_data.len; + chip_spec->clm_fw = fwreq->items[INFF_PCIE_FW_CLM].binary; + kfree(fwreq); + + ret = inff_chip_get_raminfo(devinfo->ci); + if (ret) { + inff_err(bus, "Failed to get RAM info\n"); + release_firmware(fw); + inff_fw_nvram_free(nvram); + goto fail; + } + + /* Some of the firmwares have the size of the memory of the device + * defined inside the firmware. This is because part of the memory in + * the device is shared and the division is determined by FW. Parse + * the firmware and adjust the chip memory size now. + */ + inff_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size); + + ret = inff_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); + if (ret) { + if (!inff_pcie_bus_readshared(devinfo, 0)) + inff_pcie_bus_console_read(devinfo, true); + goto fail; + } + + devinfo->state = INFFMAC_PCIE_STATE_UP; + + ret = inff_pcie_init_ringbuffers(devinfo); + if (ret) + goto fail; + + ret = inff_pcie_init_scratchbuffers(devinfo); + if (ret) + goto fail; + + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + ret = inff_pcie_request_irq(devinfo); + if (ret) + goto fail; + + /* hook the commonrings in the bus structure. */ + for (i = 0; i < INFF_NROF_COMMON_MSGRINGS; i++) + bus->msgbuf->commonrings[i] = + &devinfo->shared.commonrings[i]->commonring; + + flowrings = kcalloc(devinfo->shared.max_flowrings, sizeof(*flowrings), + GFP_KERNEL); + if (!flowrings) + goto fail; + + for (i = 0; i < devinfo->shared.max_flowrings; i++) + flowrings[i] = &devinfo->shared.flowrings[i].commonring; + bus->msgbuf->flowrings = flowrings; + + bus->msgbuf->rx_dataoffset = devinfo->shared.rx_dataoffset; + bus->msgbuf->max_rxbufpost = devinfo->shared.max_rxbufpost; + bus->msgbuf->max_flowrings = devinfo->shared.max_flowrings; + + init_waitqueue_head(&devinfo->mbdata_resp_wait); + + ret = inff_attach(&devinfo->pdev->dev, true); + if (ret) + goto fail; + + inff_pcie_bus_console_read(devinfo, false); + + inff_pcie_fwcon_timer(devinfo, true); + + return; + +fail: + inff_err(bus, "Dongle setup failed\n"); + inff_pcie_bus_console_read(devinfo, true); + inff_fw_crashed(dev); + device_release_driver(dev); +} + +#ifdef DEBUG +static void +inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active) +{ + if (!active) { + if (devinfo->console_active) { + timer_delete_sync(&devinfo->timer); + devinfo->console_active = false; + } + return; + } + + /* don't start the timer */ + if (devinfo->state != INFFMAC_PCIE_STATE_UP || + !devinfo->console_interval || !INFF_FWCON_ON()) + return; + + if (!devinfo->console_active) { + devinfo->timer.expires = jiffies + devinfo->console_interval; + add_timer(&devinfo->timer); + devinfo->console_active = true; + } else { + /* Reschedule the timer */ + mod_timer(&devinfo->timer, jiffies + devinfo->console_interval); + } +} + +static void +inff_pcie_fwcon(struct timer_list *t) +{ + struct inff_pciedev_info *devinfo = timer_container_of(devinfo, t, + timer); + + if (!devinfo->console_active) + return; + + inff_pcie_bus_console_read(devinfo, false); + + /* Reschedule the timer if console interval is not zero */ + mod_timer(&devinfo->timer, jiffies + devinfo->console_interval); +} + +static int inff_pcie_console_interval_get(void *data, u64 *val) +{ + struct inff_pciedev_info *devinfo = data; + + *val = devinfo->console_interval; + + return 0; +} + +static int inff_pcie_console_interval_set(void *data, u64 val) +{ + struct inff_pciedev_info *devinfo = data; + + if (val > MAX_CONSOLE_INTERVAL) + return -EINVAL; + + devinfo->console_interval = val; + + if (!val && devinfo->console_active) + inff_pcie_fwcon_timer(devinfo, false); + else if (val) + inff_pcie_fwcon_timer(devinfo, true); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(inff_pcie_console_interval_fops, + inff_pcie_console_interval_get, + inff_pcie_console_interval_set, + "%llu\n"); + +static void inff_pcie_debugfs_create(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_pciedev *pcie_bus_dev = bus_if->bus_priv.pcie; + struct inff_pciedev_info *devinfo = pcie_bus_dev->devinfo; + struct dentry *dentry = inff_debugfs_get_devdir(drvr); + + if (IS_ERR_OR_NULL(dentry)) + return; + + devinfo->console_interval = INFF_CONSOLE; + + debugfs_create_file("console_interval", 0644, dentry, devinfo, + &inff_pcie_console_interval_fops); +} + +#else +void inff_pcie_fwcon_timer(struct inff_pciedev_info *devinfo, bool active) +{ +} + +static void inff_pcie_debugfs_create(struct device *dev) +{ +} +#endif + +/* Forward declaration for pci_match_id() call */ +static const struct pci_device_id inff_pcie_devid_table[]; + +static int +inff_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int ret; + struct inff_fw_request *fwreq; + struct inff_pciedev_info *devinfo; + struct inff_pciedev *pcie_bus_dev; + struct inff_core *core; + struct inff_bus *bus; + + if (!id) { + id = pci_match_id(inff_pcie_devid_table, pdev); + if (!id) { + pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", + pdev->vendor, pdev->device); + return -ENODEV; + } + } + + inff_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device); + + ret = -ENOMEM; + devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL); + if (!devinfo) + return ret; + + devinfo->pdev = pdev; + pcie_bus_dev = NULL; + devinfo->ci = inff_chip_attach(devinfo, pdev->device, + &inff_pcie_buscore_ops); + if (IS_ERR(devinfo->ci)) { + ret = PTR_ERR(devinfo->ci); + devinfo->ci = NULL; + goto fail; + } + + core = inff_chip_get_core(devinfo->ci, INF_CORE_PCIE2); + if (core->rev >= 64) + devinfo->reginfo = &inff_reginfo_64; + else + devinfo->reginfo = &inff_reginfo_default; + + pcie_bus_dev = kzalloc(sizeof(*pcie_bus_dev), GFP_KERNEL); + if (!pcie_bus_dev) { + ret = -ENOMEM; + goto fail; + } + + devinfo->settings = inff_get_module_param(&devinfo->pdev->dev, + INFF_BUSTYPE_PCIE, + devinfo->ci->chip, + devinfo->ci->chiprev); + if (!devinfo->settings) { + ret = -ENOMEM; + goto fail; + } + ret = PTR_ERR_OR_ZERO(devinfo->settings); + if (ret < 0) + goto fail; + + bus = kzalloc(sizeof(*bus), GFP_KERNEL); + if (!bus) { + ret = -ENOMEM; + goto fail; + } + bus->msgbuf = kzalloc(sizeof(*bus->msgbuf), GFP_KERNEL); + if (!bus->msgbuf) { + ret = -ENOMEM; + kfree(bus); + goto fail; + } + + /* hook it all together. */ + pcie_bus_dev->devinfo = devinfo; + pcie_bus_dev->bus = bus; + bus->dev = &pdev->dev; + bus->bus_priv.pcie = pcie_bus_dev; + bus->ops = &inff_pcie_bus_ops; + bus->proto_type = INFF_PROTO_MSGBUF; + bus->chip = devinfo->coreid; + bus->chip_pub = devinfo->ci; + bus->wowl_supported = pci_pme_capable(pdev, PCI_D3hot); + dev_set_drvdata(&pdev->dev, bus); + + ret = inff_alloc(&devinfo->pdev->dev, devinfo->settings); + if (ret) + goto fail_bus; + +#ifdef DEBUG + /* Set up the fwcon timer */ + timer_setup(&devinfo->timer, inff_pcie_fwcon, 0); +#endif + + fwreq = inff_prepare_fw_request(devinfo->settings->firmware_path, + devinfo->ci, inff_pcie_fwnames, + ARRAY_SIZE(inff_pcie_fwnames), + devinfo->settings->board_type); + if (!fwreq) { + ret = -ENOMEM; + goto fail_inff; + } + + /* NVRAM reserves PCI domain 0 for SDK faked bus */ + fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; + fwreq->bus_nr = devinfo->pdev->bus->number; + + ret = inff_fw_get_firmwares(bus->dev, fwreq, inff_pcie_setup); + if (ret < 0) { + kfree(fwreq); + goto fail_inff; + } + return 0; + +fail_inff: + inff_free(&devinfo->pdev->dev); +fail_bus: + kfree(bus->msgbuf); + kfree(bus); +fail: + inff_err(NULL, "failed %x:%x\n", pdev->vendor, pdev->device); + inff_pcie_release_resource(devinfo); + if (devinfo->ci) + inff_chip_detach(devinfo->ci); + if (devinfo->settings) + inff_release_module_param(devinfo->settings); + kfree(pcie_bus_dev); + kfree(devinfo); + return ret; +} + +static void +inff_pcie_remove(struct pci_dev *pdev) +{ + struct inff_pciedev_info *devinfo; + struct inff_chip_specific *chip_spec; + struct inff_bus *bus; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(&pdev->dev); + if (!bus) + return; + + devinfo = bus->bus_priv.pcie->devinfo; + chip_spec = &devinfo->ci->chip_spec; + + inff_pcie_bus_console_read(devinfo, false); + inff_pcie_fwcon_timer(devinfo, false); + + devinfo->state = INFFMAC_PCIE_STATE_DOWN; + if (devinfo->ci) + inff_pcie_intr_disable(devinfo); + + inff_detach(&pdev->dev); + inff_free(&pdev->dev); + + kfree(bus->bus_priv.pcie); + kfree(bus->msgbuf->flowrings); + kfree(bus->msgbuf); + kfree(bus); + + inff_pcie_release_irq(devinfo); + inff_pcie_release_scratchbuffers(devinfo); + inff_pcie_release_ringbuffers(devinfo); + inff_pcie_reset_device(devinfo); + inff_pcie_release_resource(devinfo); + release_firmware(chip_spec->clm_fw); + + if (devinfo->ci) + inff_chip_detach(devinfo->ci); + if (devinfo->settings) + inff_release_module_param(devinfo->settings); + + kfree(devinfo); + dev_set_drvdata(&pdev->dev, NULL); +} + +#ifdef CONFIG_PM + +static int inff_pcie_pm_enter_D3(struct device *dev) +{ + struct inff_pciedev_info *devinfo; + struct inff_bus *bus; + struct inff_cfg80211_info *config; + int retry = INFF_PM_WAIT_MAXRETRY; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(dev); + devinfo = bus->bus_priv.pcie->devinfo; + config = bus->drvr->config; + + while (retry && + config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) { + usleep_range(10000, 20000); + retry--; + } + if (!retry && config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) + inff_err(bus, "timed out wait for cfg80211 suspended\n"); + + inff_pcie_fwcon_timer(devinfo, false); + inff_bus_change_state(bus, INFF_BUS_DOWN); + + devinfo->mbdata_completed = false; + inff_pcie_send_mb_data(devinfo, INFF_H2D_HOST_D3_INFORM); + + wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, + INFF_PCIE_MBDATA_TIMEOUT); + if (!devinfo->mbdata_completed) { + inff_err(bus, "Timeout on response for entering D3 substate\n"); + inff_bus_change_state(bus, INFF_BUS_UP); + return -EIO; + } + + devinfo->state = INFFMAC_PCIE_STATE_DOWN; + + return 0; +} + +static int inff_pcie_pm_leave_D3(struct device *dev) +{ + struct inff_pciedev_info *devinfo; + struct inff_bus *bus; + struct pci_dev *pdev; + int err; + + inff_dbg(PCIE, "Enter\n"); + + bus = dev_get_drvdata(dev); + devinfo = bus->bus_priv.pcie->devinfo; + inff_dbg(PCIE, "Enter, dev=%p, bus=%p\n", dev, bus); + + /* Check if device is still up and running, if so we are ready */ + if (inff_pcie_read_reg32(devinfo, devinfo->reginfo->intmask) != 0) { + inff_dbg(PCIE, "Try to wakeup device....\n"); + if (devinfo->use_d0_inform) { + if (inff_pcie_send_mb_data(devinfo, + INFF_H2D_HOST_D0_INFORM)) + goto cleanup; + } else { + inff_pcie_hostready(devinfo); + } + + inff_dbg(PCIE, "Hot resume, continue....\n"); + devinfo->state = INFFMAC_PCIE_STATE_UP; + inff_pcie_select_core(devinfo, INF_CORE_PCIE2); + inff_bus_change_state(bus, INFF_BUS_UP); + inff_pcie_intr_enable(devinfo); + if (devinfo->use_d0_inform) { + inff_dbg(TRACE, "sending inff_pcie_hostready since use_d0_inform=%d\n", + devinfo->use_d0_inform); + inff_pcie_hostready(devinfo); + } + + inff_pcie_fwcon_timer(devinfo, true); + return 0; + } + +cleanup: + inff_chip_detach(devinfo->ci); + devinfo->ci = NULL; + pdev = devinfo->pdev; + inff_pcie_remove(pdev); + + err = inff_pcie_probe(pdev, NULL); + if (err) + __inff_err(NULL, __func__, "probe after resume failed, err=%d\n", err); + + return err; +} + +static const struct dev_pm_ops inff_pciedrvr_pm = { + .suspend = inff_pcie_pm_enter_D3, + .resume = inff_pcie_pm_leave_D3, + .freeze = inff_pcie_pm_enter_D3, + .restore = inff_pcie_pm_leave_D3, +}; + +#endif /* CONFIG_PM */ + +#define INFF_PCIE_DEVICE(dev_id) \ + { \ + INF_PCIE_VENDOR_ID_CYPRESS, dev_id, \ + PCI_ANY_ID, PCI_ANY_ID, \ + PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ + 0 \ + } \ + +static const struct pci_device_id inff_pcie_devid_table[] = { + INFF_PCIE_DEVICE(INF_PCIE_5557X_DEVICE_ID), + { /* end: all zeroes */ } +}; + +MODULE_DEVICE_TABLE(pci, inff_pcie_devid_table); + +static struct pci_driver inff_pciedrvr = { + .name = KBUILD_MODNAME, + .id_table = inff_pcie_devid_table, + .probe = inff_pcie_probe, + .remove = inff_pcie_remove, +#ifdef CONFIG_PM + .driver.pm = &inff_pciedrvr_pm, +#endif + .driver.coredump = inff_dev_coredump, +}; + +int inff_pcie_register(void) +{ + inff_dbg(PCIE, "Enter\n"); + return pci_register_driver(&inff_pciedrvr); +} + +void inff_pcie_exit(void) +{ + inff_dbg(PCIE, "Enter\n"); + pci_unregister_driver(&inff_pciedrvr); +} diff --git a/drivers/net/wireless/infineon/inffmac/pcie.h b/drivers/net/wireless/infineon/inffmac/pcie.h new file mode 100644 index 000000000000..f319d9741e04 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pcie.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_PCIE_H +#define INFF_PCIE_H + +struct inff_pciedev { + struct inff_bus *bus; + struct inff_pciedev_info *devinfo; +}; + +void inff_pcie_handle_mb_data(struct inff_bus *bus_if, u32 d2h_mb_data); + +#endif /* INFF_PCIE_H */ -- 2.25.1 Driver implementation for exposing an Infineon OUI-based vendor nl80211 interface for allowing Infineon's vendor-specific WLAN operations from the user space. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/vendor.c | 338 +++++ .../net/wireless/infineon/inffmac/vendor.h | 54 + .../wireless/infineon/inffmac/vendor_inf.c | 1344 +++++++++++++++++ .../wireless/infineon/inffmac/vendor_inf.h | 767 ++++++++++ 4 files changed, 2503 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/vendor.c create mode 100644 drivers/net/wireless/infineon/inffmac/vendor.h create mode 100644 drivers/net/wireless/infineon/inffmac/vendor_inf.c create mode 100644 drivers/net/wireless/infineon/inffmac/vendor_inf.h diff --git a/drivers/net/wireless/infineon/inffmac/vendor.c b/drivers/net/wireless/infineon/inffmac/vendor.c new file mode 100644 index 000000000000..285a48504732 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/vendor.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include + +#include "fwil_types.h" +#include "core.h" +#include "p2p.h" +#include "debug.h" +#include "cfg80211.h" +#include "vendor.h" +#include "fwil.h" +#include "common.h" +#include "vendor_inf.h" + +static int inff_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + const struct inff_vndr_dcmd_hdr *cmdhdr = data; + struct sk_buff *reply; + unsigned int payload, ret_len; + void *dcmd_buf = NULL, *wr_pointer; + u16 msglen, maxmsglen = PAGE_SIZE - 0x100; + int ret; + + if (len < sizeof(*cmdhdr)) { + inff_err("vendor command too short: %d\n", len); + return -EINVAL; + } + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + inff_dbg(TRACE, "ifidx=%d, cmd=%d\n", ifp->ifidx, cmdhdr->cmd); + + if (cmdhdr->offset > len) { + inff_err("bad buffer offset %d > %d\n", cmdhdr->offset, len); + return -EINVAL; + } + + len -= cmdhdr->offset; + ret_len = cmdhdr->len; + if (ret_len > 0 || len > 0) { + if (len > INFF_DCMD_MAXLEN) { + inff_err("oversize input buffer %d\n", len); + len = INFF_DCMD_MAXLEN; + } + if (ret_len > INFF_DCMD_MAXLEN) { + inff_err("oversize return buffer %d\n", ret_len); + ret_len = INFF_DCMD_MAXLEN; + } + payload = max_t(unsigned int, ret_len, len) + 1; + dcmd_buf = vzalloc(payload); + if (!dcmd_buf) + return -ENOMEM; + + memcpy(dcmd_buf, (void *)cmdhdr + cmdhdr->offset, len); + *(char *)(dcmd_buf + len) = '\0'; + } + + if (cmdhdr->cmd == INFF_C_SET_AP) { + if (*(int *)(dcmd_buf) == 1) { + ifp->vif->wdev.iftype = NL80211_IFTYPE_AP; + inff_net_setcarrier(ifp, true); + } else { + ifp->vif->wdev.iftype = NL80211_IFTYPE_STATION; + } + inff_cfg80211_update_proto_addr_mode(&vif->wdev); + } + + if (cmdhdr->set) + ret = inff_fil_cmd_data_set(ifp, cmdhdr->cmd, dcmd_buf, + ret_len); + else + ret = inff_fil_cmd_data_get(ifp, cmdhdr->cmd, dcmd_buf, + ret_len); + if (ret != 0) + goto exit; + + wr_pointer = dcmd_buf; + while (ret_len > 0) { + msglen = ret_len > maxmsglen ? maxmsglen : ret_len; + ret_len -= msglen; + payload = msglen + sizeof(msglen); + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); + if (!reply) { + ret = -ENOMEM; + break; + } + + if (nla_put(reply, INFF_NLATTR_DATA, msglen, wr_pointer) || + nla_put_u16(reply, INFF_NLATTR_LEN, msglen)) { + kfree_skb(reply); + ret = -ENOBUFS; + break; + } + + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + break; + + wr_pointer += msglen; + } + +exit: + vfree(dcmd_buf); + + return ret; +} + +static int inff_cfg80211_vndr_cmds_int_get(struct inff_if *ifp, + u32 cmd, struct wiphy *wiphy) +{ + struct sk_buff *reply; + int get_value = 0; + int ret; + + ret = inff_fil_cmd_int_get(ifp, cmd, &get_value); + if (ret) + inff_err("Command %u get failure. Error : %d\n", cmd, ret); + + reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, sizeof(int)); + nla_put_nohdr(reply, sizeof(int), &get_value); + ret = cfg80211_vendor_cmd_reply(reply); + if (ret) + inff_err("Command %u failure. Error : %d\n", cmd, ret); + return ret; +} + +static int inff_cfg80211_vndr_cmds_int_set(struct inff_if *ifp, int val, u32 cmd) +{ + int ret; + + ret = inff_fil_cmd_int_set(ifp, cmd, val); + if (ret < 0) + inff_err("Command %u set failure. Error : %d\n", cmd, ret); + return ret; +} + +static int inff_cfg80211_vndr_cmds_frameburst(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret; + int val = *(int *)data; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0x0 || val == 0x1) { + ret = inff_cfg80211_vndr_cmds_int_set(ifp, val, + INFF_C_SET_FAKEFRAG); + } else if (val == 0xff) { + ret = inff_cfg80211_vndr_cmds_int_get(ifp, + INFF_C_GET_FAKEFRAG, + wiphy); + } else { + inff_err("Invalid Input\n"); + ret = -EINVAL; + } + + return ret; +} + +const struct wiphy_vendor_command inff_vendor_cmds[] = { + { + INFF_SUBCMD(DCMD, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_dcmd_handler) + }, + { + INFF_SUBCMD(FRAMEBURST, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_frameburst) + }, + { + INFF_SUBCMD(MUEDCA_OPT, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_muedca_opt) + }, + { + INFF_SUBCMD(LDPC, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_ldpc_cap) + }, + { + INFF_SUBCMD(AMSDU, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_amsdu) + }, + { + INFF_SUBCMD(TWT, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + inff_vendor_attr_twt_policy, + inff_cfg80211_vndr_cmds_twt), + .maxattr = INFF_VENDOR_ATTR_TWT_MAX + }, + { + INFF_SUBCMD(OCE, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_oce_enable) + }, + { + INFF_SUBCMD(BSSCOLOR, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_bss_color) + }, + { + INFF_SUBCMD(RAND_MAC, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_randmac) + }, + { + INFF_SUBCMD(MBO, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + inff_vendor_attr_mbo_policy, + inff_cfg80211_vndr_cmds_mbo), + .maxattr = INFF_VENDOR_ATTR_MBO_MAX + }, + { + INFF_SUBCMD(MPC, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_mpc) + }, + { + INFF_SUBCMD(GIANTRX, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_giantrx) + }, + { + INFF_SUBCMD(WNM, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + inff_vendor_attr_wnm_policy, + inff_cfg80211_vndr_cmds_wnm_max_idle), + .maxattr = INFF_VENDOR_ATTR_WNM_MAX + }, + { + INFF_SUBCMD(HWCAPS, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_hwcaps), + }, + { + INFF_SUBCMD(WNM_WL_CAP, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_wnm_wl_cap) + }, + { + INFF_SUBCMD(CMDSTR, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_str) + }, + { + INFF_SUBCMD(PFN_CONFIG, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_config_pfn) + }, + { + INFF_SUBCMD(PFN_STATUS, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_get_pfn_status) + }, + { + INFF_SUBCMD(SSID_PROT, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + inff_vendor_attr_ssid_prot_policy, + inff_cfg80211_vndr_cmds_ssid_prot), + .maxattr = INFF_VENDOR_ATTR_SSID_PROT_MAX + }, + { + INFF_SUBCMD(MCHAN_CONFIG, + (WIPHY_VENDOR_CMD_NEED_WDEV | + WIPHY_VENDOR_CMD_NEED_NETDEV), + VENDOR_CMD_RAW_DATA, + inff_cfg80211_vndr_cmds_mchan_config) + }, +}; + +const struct nl80211_vendor_cmd_info inff_vendor_events[] = { + INFF_SUBEVT(ICMP_ECHO_REQ) +}; + +int get_inff_num_vndr_cmds(void) +{ + int num = ARRAY_SIZE(inff_vendor_cmds); + + return num; +} + +int get_inff_num_vndr_evts(void) +{ + return ARRAY_SIZE(inff_vendor_events); +} diff --git a/drivers/net/wireless/infineon/inffmac/vendor.h b/drivers/net/wireless/infineon/inffmac/vendor.h new file mode 100644 index 000000000000..6e7d584d18a4 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/vendor.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_VENDOR_H +#define INFF_VENDOR_H + +/** + * enum inff_nlattrs - nl80211 message attributes + * + * @INFF_NLATTR_LEN: message body length + * @INFF_NLATTR_DATA: message body + */ +enum inff_nlattrs { + INFF_NLATTR_UNSPEC, + + INFF_NLATTR_LEN, + INFF_NLATTR_DATA, + + __INFF_NLATTR_AFTER_LAST, + INFF_NLATTR_MAX = __INFF_NLATTR_AFTER_LAST - 1 +}; + +/** + * struct inff_vndr_dcmd_hdr - message header for cfg80211 vendor command dcmd + * support + * + * @cmd: common dongle cmd definition + * @len: length of expecting return buffer + * @offset: offset of data buffer + * @set: get or set request(optional) + * @magic: magic number for verification + */ +struct inff_vndr_dcmd_hdr { + uint cmd; + int len; + uint offset; + uint set; + uint magic; +}; + +extern const struct wiphy_vendor_command inff_vendor_cmds[]; +extern const struct nl80211_vendor_cmd_info inff_vendor_events[]; +s32 inff_wiphy_icmp_echo_req_event_handler(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data); +int get_inff_num_vndr_cmds(void); +int get_inff_num_vndr_evts(void); + +#endif /* INFF_VENDOR_H */ diff --git a/drivers/net/wireless/infineon/inffmac/vendor_inf.c b/drivers/net/wireless/infineon/inffmac/vendor_inf.c new file mode 100644 index 000000000000..4f9b896d4768 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/vendor_inf.c @@ -0,0 +1,1344 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2022-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include "hw_ids.h" +#include "core.h" +#include "cfg80211.h" +#include "debug.h" +#include "fwil.h" +#include "vendor_inf.h" +#include "xtlv.h" +#include "he.h" +#include "twt.h" +#include "pno.h" +#include "bus.h" +#include +#include +#include +#include +#include +#include "common.h" +#include "chip.h" +#include "offload.h" + +static const struct inff_vendor_cmdstr inff_vndr_cmdstr[] = { + { "offload_config", inff_vndr_cmdstr_offload_config}, + { "mkeep_alive", inff_vndr_cmdstr_mkeep_alive}, + { "tko", inff_vndr_cmdstr_tko}, + { "icmp_echo_req", inff_vndr_cmdstr_icmp_echo_req}, + { NULL, NULL } +}; + +DEFINE_HASHTABLE(vndr_cmd_hashtbl, VNDR_CMD_HASH_BITS); + +static int inff_cfg80211_vndr_send_cmd_reply(struct wiphy *wiphy, + const void *data, int len) +{ + struct sk_buff *skb; + + /* Alloc the SKB for vendor_event */ + skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, len); + if (unlikely(!skb)) { + inff_err("skb alloc failed\n"); + return -ENOMEM; + } + + /* Push the data to the skb */ + nla_put_nohdr(skb, len, data); + return cfg80211_vendor_cmd_reply(skb); +} + +int inff_vndr_cmdstr_hashtbl_init(void) +{ + int i; + u32 jhash_key; + + inff_dbg(TRACE, "enter\n"); + + hash_init(vndr_cmd_hashtbl); + + /* Initializing the VENDOR CMD hashtable with all the string commmands + * and func_handler in inff_vndr_str_cmds + */ + for (i = 0; inff_vndr_cmdstr[i].name; i++) { + struct inff_vndr_cmdstr_hashtbl *vndr_hashtbl; + + vndr_hashtbl = kzalloc(sizeof(*vndr_hashtbl), GFP_KERNEL); + if (!vndr_hashtbl) + return -ENOMEM; + + vndr_hashtbl->vndr_cmd_addr = (struct inff_vendor_cmdstr *)&inff_vndr_cmdstr[i]; + jhash_key = jhash(inff_vndr_cmdstr[i].name, strlen(inff_vndr_cmdstr[i].name), 0); + hash_add(vndr_cmd_hashtbl, &vndr_hashtbl->node, jhash_key); + } + + return 0; +} + +void inff_vndr_cmdstr_hashtbl_deinit(void) +{ + struct inff_vndr_cmdstr_hashtbl *vndr_hashtbl; + struct hlist_node *tmp_node; + int i; + + hash_for_each_safe(vndr_cmd_hashtbl, i, tmp_node, vndr_hashtbl, node) { + hash_del(&vndr_hashtbl->node); + kfree(vndr_hashtbl); + } +} + +static void +inff_cfgvendor_twt_parse_params(const struct nlattr *attr_iter, + struct inff_twt_params *twt_params) +{ + int tmp, twt_param; + const struct nlattr *twt_param_iter; + + nla_for_each_nested(twt_param_iter, attr_iter, tmp) { + twt_param = nla_type(twt_param_iter); + switch (twt_param) { + case INFF_VENDOR_ATTR_TWT_PARAM_NEGO_TYPE: + twt_params->negotiation_type = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_SETUP_CMD_TYPE: + twt_params->setup_cmd = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_DIALOG_TOKEN: + twt_params->dialog_token = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME: + twt_params->twt = nla_get_u64(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME_OFFSET: + twt_params->twt_offset = nla_get_u64(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION: + twt_params->min_twt = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_EXPONENT: + twt_params->exponent = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_MANTISSA: + twt_params->mantissa = nla_get_u16(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_REQUESTOR: + twt_params->requestor = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_TRIGGER: + twt_params->trigger = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_IMPLICIT: + twt_params->implicit = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_FLOW_TYPE: + twt_params->flow_type = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_FLOW_ID: + twt_params->flow_id = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_BCAST_TWT_ID: + twt_params->bcast_twt_id = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_PROTECTION: + twt_params->protection = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_CHANNEL: + twt_params->twt_channel = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_TWT_INFO_FRAME_DISABLED: + twt_params->twt_info_frame_disabled = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION_UNIT: + twt_params->min_twt_unit = nla_get_u8(twt_param_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAM_TEARDOWN_ALL_TWT: + twt_params->teardown_all_twt = nla_get_u8(twt_param_iter); + break; + default: + inff_dbg(TRACE, "Unknown TWT param %d, skipping\n", + twt_param); + break; + } + } +} + +int inff_cfg80211_vndr_cmds_twt(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + int tmp, attr_type; + const struct nlattr *attr_iter; + + struct inff_twt_params twt_params = { + .twt_oper = 0, + .negotiation_type = INFF_TWT_PARAM_NEGO_TYPE_ITWT, + .setup_cmd = INFF_TWT_OPER_SETUP_CMD_TYPE_REQUEST, + .dialog_token = 1, + .twt = 0, + .twt_offset = 0, + .requestor = 1, + .trigger = 0, + .implicit = 1, + .flow_type = 0, + .flow_id = 0, + .bcast_twt_id = 0, + .protection = 0, + .twt_channel = 0, + .twt_info_frame_disabled = 0, + .min_twt_unit = 0, + .teardown_all_twt = 0 + }; + + nla_for_each_attr(attr_iter, data, len, tmp) { + attr_type = nla_type(attr_iter); + + switch (attr_type) { + case INFF_VENDOR_ATTR_TWT_OPER: + twt_params.twt_oper = nla_get_u8(attr_iter); + break; + case INFF_VENDOR_ATTR_TWT_PARAMS: + inff_cfgvendor_twt_parse_params(attr_iter, &twt_params); + break; + default: + inff_dbg(TRACE, "Unknown TWT attribute %d, skipping\n", + attr_type); + break; + } + } + + return (int)inff_twt_oper(wiphy, wdev, twt_params); +} + +int inff_cfg80211_vndr_cmds_bss_color(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + u8 val = *(u8 *)data; + u8 param[8] = {0}; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_he_get_bss_color(ifp, param, sizeof(param)); + if (!ret) + inff_cfg80211_vndr_send_cmd_reply(wiphy, param, 1); + } else { + inff_dbg(INFO, "not support set bsscolor during runtime!\n"); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_muedca_opt(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + u8 val = *(u8 *)data; + u8 param[8] = {0}; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_he_get_muedca_opt(ifp, param, sizeof(param)); + if (!ret) + inff_cfg80211_vndr_send_cmd_reply(wiphy, param, 1); + } else { + ret = inff_he_set_muedca_opt(ifp, val); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_amsdu(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int val = *(s32 *)data; + s32 get_amsdu = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_fil_iovar_int_get(ifp, "amsdu", &get_amsdu); + if (ret) { + inff_err("get amsdu error:%d\n", ret); + + return ret; + } + + inff_dbg(INFO, "get amsdu: %d\n", get_amsdu); + inff_cfg80211_vndr_send_cmd_reply(wiphy, + &get_amsdu, sizeof(int)); + } else { + ret = inff_fil_iovar_int_set(ifp, "amsdu", val); + if (ret) + inff_err("set amsdu error:%d\n", ret); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_ldpc_cap(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int val = *(s32 *)data; + s32 buf = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_fil_iovar_int_get(ifp, "ldpc_cap", &buf); + if (ret) { + inff_err("get ldpc_cap error:%d\n", ret); + + return ret; + } + + inff_dbg(INFO, "get ldpc_cap: %d\n", buf); + inff_cfg80211_vndr_send_cmd_reply(wiphy, &buf, sizeof(int)); + } else { + ret = inff_fil_iovar_int_set(ifp, "ldpc_cap", val); + if (ret) + inff_err("set ldpc_cap error:%d\n", ret); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_oce_enable(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct inff_iov_buf *oce_iov; + struct inff_xtlv *oce_xtlv; + u8 val = *(u8 *)data; + u8 param[16] = {0}; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + oce_iov = (struct inff_iov_buf *)param; + oce_iov->version = cpu_to_le16(INFF_OCE_IOV_VERSION); + oce_iov->id = cpu_to_le16(INFF_OCE_CMD_ENABLE); + oce_xtlv = (struct inff_xtlv *)oce_iov->data; + + if (val == 0xa) { + /* To get fw iovars of the form "wl oce enable" + * using iw, call the parent iovar "oce" with the subcmd + * filled and passed along + * ./iw dev wlan0 vendor recv 0x000319 0xf 0xa + */ + ret = inff_fil_iovar_data_get(ifp, "oce", + param, sizeof(param)); + if (ret) { + inff_err("get oce enable error:%d\n", ret); + } else { + inff_dbg(INFO, + "get oce enable: %d\n", oce_xtlv->data[0]); + inff_cfg80211_vndr_send_cmd_reply(wiphy, oce_xtlv->data, + sizeof(int)); + } + } else { + oce_iov->len = cpu_to_le16(8); + oce_xtlv->id = cpu_to_le16(INFF_OCE_XTLV_ENABLE); + oce_xtlv->len = cpu_to_le16(1); + oce_xtlv->data[0] = val; + ret = inff_fil_iovar_data_set(ifp, "oce", + param, sizeof(param)); + if (ret) + inff_err("set oce enable error:%d\n", ret); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_randmac(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len) +{ + int ret = 0; + struct inff_randmac iov_buf = {0}; + u8 val = *(u8 *)data; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + iov_buf.version = WL_RANDMAC_API_VERSION; + iov_buf.subcmd_id = WL_RANDMAC_SUBCMD_ENABLE; + iov_buf.len = offsetof(struct inff_randmac, data); + + if (val == 0x1) { + /* To set fw iovars of the form "wl randmac enable" using iw, call the + * parent iovar "randmac" with the subcmd filled and passed along + * ./iw dev wlan0 vendor send 0x000319 0x11 0x1 + */ + ret = inff_fil_bsscfg_data_set(ifp, "randmac", (void *)&iov_buf, iov_buf.len); + if (ret) + inff_err("Failed to set randmac enable: %d\n", ret); + } else if (val == 0x0) { + iov_buf.subcmd_id = WL_RANDMAC_SUBCMD_DISABLE; + /* To set fw iovars of the form "wl randmac disable" using iw, call the + * parent iovar "randmac" with the subcmd filled and passed along + * ./iw dev wlan0 vendor send 0x000319 0x11 0x0 + */ + ret = inff_fil_bsscfg_data_set(ifp, "randmac", (void *)&iov_buf, iov_buf.len); + if (ret) + inff_err("Failed to set randmac disable: %d\n", ret); + } else if (val == 0xa) { + int result_data = 0; + struct inff_randmac *iov_resp = NULL; + u8 buf[64] = {0}; + /* To get fw iovars of the form "wl randmac" using iw, call the + * parent iovar "randmac" with the subcmd filled and passed along + * ./iw dev wlan0 vendor recv 0x000319 0x11 0xa + */ + memcpy(buf, (void *)&iov_buf, iov_buf.len); + ret = inff_fil_iovar_data_get(ifp, "randmac", (void *)buf, sizeof(buf)); + if (ret) { + inff_err("Failed to get randmac enable or disable: %d\n", ret); + } else { + iov_resp = (struct inff_randmac *)buf; + if (iov_resp->subcmd_id == WL_RANDMAC_SUBCMD_ENABLE) + result_data = 1; + inff_cfg80211_vndr_send_cmd_reply(wiphy, &result_data, sizeof(int)); + } + } + return ret; +} + +int inff_cfg80211_vndr_cmds_mbo(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + int tmp, attr_type, mbo_param; + const struct nlattr *attr_iter, *mbo_param_iter; + + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct inff_iov_buf *mbo_iov; + struct inff_xtlv *mbo_xtlv; + u8 param[64] = {0}; + u16 buf_len = 0, buf_len_start = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + mbo_iov = (struct inff_iov_buf *)param; + mbo_iov->version = cpu_to_le16(INFF_MBO_IOV_VERSION); + mbo_xtlv = (struct inff_xtlv *)mbo_iov->data; + buf_len_start = sizeof(param) - sizeof(struct inff_iov_buf); + buf_len = buf_len_start; + + nla_for_each_attr(attr_iter, data, len, tmp) { + attr_type = nla_type(attr_iter); + + switch (attr_type) { + case INFF_VENDOR_ATTR_MBO_CMD: + mbo_iov->id = cpu_to_le16(nla_get_u8(attr_iter)); + break; + case INFF_VENDOR_ATTR_MBO_PARAMS: + nla_for_each_nested(mbo_param_iter, attr_iter, tmp) { + mbo_param = nla_type(mbo_param_iter); + + switch (mbo_param) { + case INFF_VENDOR_ATTR_MBO_PARAM_OPCLASS: + { + u8 op_class; + + op_class = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_OPCLASS, + &op_class, sizeof(op_class), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_CHAN: + { + u8 chan; + + chan = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_CHAN, + &chan, sizeof(chan), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_PREFERENCE: + { + u8 pref; + + pref = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_PREFERENCE, + &pref, sizeof(pref), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_REASON_CODE: + { + u8 reason; + + reason = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_REASON_CODE, + &reason, sizeof(reason), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_CELL_DATA_CAP: + { + u8 cell_data_cap; + + cell_data_cap = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_CELL_DATA_CAP, + &cell_data_cap, sizeof(cell_data_cap), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_COUNTERS: + break; + case INFF_VENDOR_ATTR_MBO_PARAM_ENABLE: + { + u8 enable; + + enable = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_ENABLE, + &enable, sizeof(enable), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_SUB_ELEM_TYPE: + { + u8 type; + + type = nla_get_u8(mbo_param_iter); + inff_pack_xtlv(INFF_VENDOR_ATTR_MBO_PARAM_SUB_ELEM_TYPE, + &type, sizeof(type), + (char **)&mbo_xtlv, &buf_len); + } + break; + case INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_START_OFFSET: + case INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_RSSI_DELTA: + case INFF_VENDOR_ATTR_MBO_PARAM_ANQP_CELL_SUPP: + case INFF_VENDOR_ATTR_MBO_PARAM_BIT_MASK: + case INFF_VENDOR_ATTR_MBO_PARAM_ASSOC_DISALLOWED: + case INFF_VENDOR_ATTR_MBO_PARAM_CELLULAR_DATA_PREF: + return -EOPNOTSUPP; + default: + inff_err("unknown mbo param attr:%d\n", mbo_param); + return -EINVAL; + } + } + break; + default: + inff_err("Unknown MBO attribute %d, skipping\n", + attr_type); + return -EINVAL; + } + } + + buf_len = buf_len_start - buf_len; + mbo_xtlv->len = cpu_to_le16(buf_len); + mbo_iov->len = cpu_to_le16(buf_len); + buf_len += sizeof(struct inff_iov_buf); + ret = inff_fil_iovar_data_set(ifp, "mbo", param, buf_len); + + if (ret) + inff_err("set mbo enable error:%d\n", ret); + + return ret; +} + +int inff_cfg80211_vndr_cmds_mpc(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int val = *(s32 *)data; + s32 buf = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_fil_iovar_int_get(ifp, "mpc", &buf); + if (ret) { + inff_err("get mpc error:%d\n", ret); + return ret; + } + + inff_dbg(INFO, "get mpc: %d\n", buf); + inff_cfg80211_vndr_send_cmd_reply(wiphy, &buf, sizeof(int)); + } else { + ret = inff_fil_iovar_int_set(ifp, "mpc", val); + if (ret) + inff_err("set mpc error:%d\n", ret); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_giantrx(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int val = *(s32 *)data; + s32 buf = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xa) { + ret = inff_fil_iovar_int_get(ifp, "giantrx", &buf); + if (ret) { + inff_err("get giantrx error:%d\n", ret); + return ret; + } + + inff_dbg(INFO, "get giantrx: %d\n", buf); + inff_cfg80211_vndr_send_cmd_reply(wiphy, &buf, sizeof(int)); + } else { + inff_fil_cmd_int_set(ifp, INFF_C_DOWN, 1); + ret = inff_fil_iovar_int_set(ifp, "giantrx", val); + inff_fil_cmd_int_set(ifp, INFF_C_UP, 1); + if (ret) + inff_err("set giantrx error:%d\n", ret); + } + return ret; +} + +int inff_cfg80211_vndr_cmds_wnm_max_idle(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int tmp, attr_type = 0, wnm_param = 0, ret = 0; + const struct nlattr *attr_iter, *wnm_param_iter; + + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + u8 param[64] = {0}, get_info = 0; + u16 buf_len = 0, wnm_id = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + nla_for_each_attr(attr_iter, data, len, tmp) { + attr_type = nla_type(attr_iter); + + switch (attr_type) { + case INFF_VENDOR_ATTR_WNM_CMD: + wnm_id = cpu_to_le16(nla_get_u8(attr_iter)); + break; + case INFF_VENDOR_ATTR_WNM_PARAMS: + nla_for_each_nested(wnm_param_iter, attr_iter, tmp) { + wnm_param = nla_type(wnm_param_iter); + switch (wnm_param) { + case INFF_VENDOR_ATTR_WNM_PARAM_GET_INFO: + { + get_info = (int)nla_get_u8(wnm_param_iter); + } + break; + case INFF_VENDOR_ATTR_WNM_PARAM_IDLE_PERIOD: + { + int period; + + period = (int)nla_get_u8(wnm_param_iter); + memcpy(¶m[buf_len], &period, sizeof(period)); + buf_len += sizeof(period); + } + break; + case INFF_VENDOR_ATTR_WNM_PARAM_PROTECTION_OPT: + { + int option; + + option = (int)nla_get_u8(wnm_param_iter); + memcpy(¶m[buf_len], &option, sizeof(option)); + buf_len += sizeof(option); + } + break; + default: + inff_err("unknown wnm param attr:%d\n", wnm_param); + return -EINVAL; + } + } + break; + default: + inff_err("Unknown wnm attribute %d, skipping\n", + attr_type); + return -EINVAL; + } + } + + switch (wnm_id) { + case INFF_WNM_CMD_IOV_WNM_MAXIDLE: + { + if (get_info) { + int get_period = 0; + + ret = inff_fil_iovar_int_get(ifp, "wnm_maxidle", &get_period); + if (!ret) + ret = inff_cfg80211_vndr_send_cmd_reply(wiphy, + &get_period, + sizeof(get_period)); + } else { + ret = inff_fil_iovar_data_set(ifp, "wnm_maxidle", param, buf_len); + } + } + break; + + default: + inff_err("unsupport wnm cmd:%d\n", wnm_id); + return -EINVAL; + } + + if (ret) + inff_err("wnm %s error:%d\n", get_info ? "get" : "set", ret); + + return ret; +} + +int inff_cfg80211_vndr_cmds_hwcaps(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0, i; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct inff_bus *bus_if; + s32 buf[INFF_VENDOR_HW_CAPS_MAX] = {0}; + struct inff_chip *ci; + struct inff_chip_specific *chip_spec; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + bus_if = ifp->drvr->bus_if; + ci = bus_if->chip_pub; + chip_spec = &ci->chip_spec; + + if (chip_spec->hw_caps_replaycnts) { + buf[INFF_VENDOR_HW_CAPS_REPLAYCNTS] = chip_spec->hw_caps_replaycnts; + } else { + inff_err("chip:%d doesn't specify hw_caps_replaycnts\n", ci->chip); + ret = -EINVAL; + goto done; + } + + ret = inff_cfg80211_vndr_send_cmd_reply(wiphy, buf, sizeof(int)); + if (ret) { + inff_dbg(INFO, "get HW capability error %d\n", ret); + } else { + for (i = 0; i < INFF_VENDOR_HW_CAPS_MAX; i++) + inff_dbg(INFO, "get %s: %d\n", hw_caps_name[i], buf[i]); + } + +done: + return ret; +} + +int inff_cfg80211_vndr_cmds_wnm_wl_cap(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int val = *(s32 *)data; + s32 buf = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (val == 0xffff) { + ret = inff_fil_iovar_int_get(ifp, "wnm", &buf); + if (ret) { + inff_err("get wnm_wl_cap error:%d\n", ret); + return ret; + } + + inff_dbg(INFO, "get wnm_wl_cap: %d\n", buf); + inff_cfg80211_vndr_send_cmd_reply(wiphy, &buf, sizeof(int)); + } else { + ret = inff_fil_iovar_int_set(ifp, "wnm", val); + if (ret) + inff_err("set wnm_wl_cap error:%d\n", ret); + } + + return ret; +} + +int inff_vndr_cmdstr_offload_config(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long cmd_val[VNDR_CMD_VAL_NUM]) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int ret = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + /* IW CMDSTR TEMPLATE. + * echo 'offload_config Enable 1 ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + * + * echo 'offload_config Profile LowPwr 1 -s 0x3df ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + * + */ + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 6) && + (memcmp(cmd_str[1], "Enable", 6)) == 0 && + (cmd_val[0] == 0 || cmd_val[0] == 1)) { + inff_offload_enable(ifp, inff_offload_feat, cmd_val[0]); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 7) && + (memcmp(cmd_str[1], "Profile", 7)) == 0) { + if (cmd_str[2][0] != '\0') { + unsigned int ol_prof; + + if ((strlen(cmd_str[2]) == 6) && + (memcmp(cmd_str[2], "LowPwr", 6)) == 0) { + ol_prof = INFF_OFFLOAD_PROF_TYPE_LOW_PWR; + } else if ((strlen(cmd_str[2]) == 6) && + (memcmp(cmd_str[2], "MidPwr", 6)) == 0) { + ol_prof = INFF_OFFLOAD_PROF_TYPE_MID_PWR; + } else if ((strlen(cmd_str[2]) == 7) && + (memcmp(cmd_str[2], "HighPwr", 7)) == 0) { + ol_prof = INFF_OFFLOAD_PROF_TYPE_HIGH_PWR; + } else { + inff_err("unknown offload_config Profile attr\n"); + return -EINVAL; + } + if (cmd_str[3][0] != '\0' && (strlen(cmd_str[3]) == 2) && + (memcmp(cmd_str[3], "-s", 2)) == 0) + inff_offload_config(ifp, ~cmd_val[1], ol_prof, cmd_val[0]); + else + inff_offload_config(ifp, inff_offload_feat, ol_prof, + cmd_val[0]); + } else { + inff_err("unknown offload_config Profile attr\n"); + return -EINVAL; + } + } else { + inff_err("unknown offload_config attr\n"); + return -EINVAL; + } + } else { + inff_err("Offload unsupported for iface %d\n", inff_cfg80211_get_iftype(ifp)); + return -EINVAL; + } + + return ret; +} + +int inff_vndr_cmdstr_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int ret = 0; + bool immed_flag = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + /* NULL Keep-Alive + * echo 'mkeep_alive 0 1000 ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + * + * NAT Keep-Alive + * echo 'mkeep_alive 0 1000 0x080027b1050a00904c3104 + * 0008004500001e000040004011c52a0a8830700a88302513c + * 413c5000a00000a0d ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + if (cmd_val[0] < 0 || cmd_val[0] > 4 || cmd_val[1] < 0) { + inff_err("Invalid command value\n"); + ret = -EINVAL; + goto exit; + } + + if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 9) && + (memcmp(cmd_str[1], "immediate", 9)) == 0) + immed_flag = 1; + + ret = inff_offload_configure_mkeep_alive(ifp, immed_flag, &cmd_val[0], + &cmd_val[1], &cmd_val[2], + VNDR_CMD_VAL_NUM); +exit: + return ret; +} + +int inff_vndr_cmdstr_tko(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int ret = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 5) && + (memcmp(cmd_str[1], "param", 5) == 0) && + (cmd_val[0] >= 0 && cmd_val[1] >= 0 && + cmd_val[2] >= 0 && cmd_val[3] >= 0)) { + /* echo 'tko param 10 4 10 0 ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + ret = inff_offload_configure_tko(ifp, WL_TKO_SUBCMD_PARAM, + &cmd_val[0], &cmd_val[1], + &cmd_val[2], &cmd_val[3]); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 6) && + (memcmp(cmd_str[1], "enable", 6) == 0) && + (cmd_val[0] == 0 || cmd_val[0] == 1)) { + /* echo 'tko enable 1 ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + ret = inff_offload_configure_tko(ifp, WL_TKO_SUBCMD_ENABLE, + &cmd_val[0], NULL, NULL, NULL); + } else { + inff_err("Invalid tko command format\n"); + ret = -EINVAL; + } + + return ret; +} + +/* inff_vndr_cmd_str_parse_ip() + * Get ip version. based on the ip version parse the command string into IP. + * In Param + * @cmd_str - String to be parsed. + * @ip_addr - Parsed IP address storage. + * Out Param + * @ip_ver - Pointer to IP version. + * Return + * true - success. + * false - otherwise. + */ +static +int inff_vndr_cmdstr_parse_ip(char *cmd_str, u8 *ip_addr, u8 *ip_ver) +{ + if (in4_pton(cmd_str, strlen(cmd_str), ip_addr, -1, NULL)) { + *ip_ver = INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V4; + inff_dbg(INFO, "Peer IP Version: %d Peer IPv4 Address: %pI4\n", + *ip_ver, ip_addr); + return true; + } else if (in6_pton(cmd_str, strlen(cmd_str), ip_addr, -1, NULL)) { + *ip_ver = INFF_OFFLOAD_ICMP_ECHO_REQ_IP_V6; + inff_dbg(INFO, "Peer IP Version: %d Peer IPv6 Address: %pI6\n", + *ip_ver, ip_addr); + return true; + } + + return false; +} + +int inff_vndr_cmdstr_icmp_echo_req(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val) +{ + struct inff_cfg80211_vif *vif; + struct inff_cfg80211_info *cfg; + struct inff_if *ifp; + bool is_ip = false; + u8 cmd_type, enable = 0, ip_ver; + u32 periodicity = 0, duration = 0; + u8 ip_addr[INFF_IPV6_ADDR_LEN]; + u8 mac_addr[ETH_ALEN]; + struct inff_icmp_echo_req_get_info *icmp_echo_req_get_info; + int ret = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + cfg = ifp->drvr->config; + + if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 6) && + (memcmp(cmd_str[1], "enable", 6) == 0) && + (cmd_val[0] == 0 || cmd_val[0] == 1)) { + /* echo 'icmp_echo_req enable 0/1 ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_ENAB; + enable = cmd_val[0]; + inff_dbg(INFO, "Cmd Type: %d enable: %d\n", cmd_type, enable); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 3) && + (memcmp(cmd_str[1], "add", 3)) == 0) { + /* echo 'icmp_echo_req add ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_ADD; + is_ip = true; + + if (cmd_str[3][0] != '\0') { + if (!mac_pton(cmd_str[3], mac_addr)) { + inff_err("Invalid icmp_echo_req peer MAC address\n"); + return -EINVAL; + } + } + + if (cmd_val[0] >= 0 && cmd_val[1] >= 0) { + periodicity = cmd_val[0]; + duration = cmd_val[1]; + } + inff_dbg(INFO, "Cmd Type: %d MAC Address: %pM Periodicity: %d Duration: %d\n", + cmd_type, mac_addr, periodicity, duration); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 3) && + (memcmp(cmd_str[1], "del", 3)) == 0) { + /* echo 'icmp_echo_req del ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_DEL; + is_ip = true; + inff_dbg(INFO, "Cmd Type: %d\n", cmd_type); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 5) && + (memcmp(cmd_str[1], "start", 5)) == 0) { + /* echo 'icmp_echo_req start ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_START; + is_ip = true; + inff_dbg(INFO, "Cmd Type: %d\n", cmd_type); + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 4) && + (memcmp(cmd_str[1], "stop", 4)) == 0) { + /* echo 'icmp_echo_req stop ' | \ + * iw dev wlan0 vendor send 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_STOP; + is_ip = true; + inff_dbg(INFO, "Cmd type: %d\n", cmd_type); + + } else if (cmd_str[1][0] != '\0' && (strlen(cmd_str[1]) == 4) && + (memcmp(cmd_str[1], "info", 4)) == 0) { + /* echo 'icmp_echo_req info ' | \ + * iw dev wlan0 vendor recv 0x000319 0x1C - + * + * echo 'icmp_echo_req info all ' | \ + * iw dev wlan0 vendor recv 0x000319 0x1C - + */ + cmd_type = INFF_OFFLOAD_ICMP_ECHO_REQ_INFO; + + if (cmd_str[2][0] != '\0' && (strlen(cmd_str[2]) == 3) && + (memcmp(cmd_str[2], "all", 3)) == 0) + ip_ver = INFF_OFFLOAD_ICMP_ECHO_REQ_IP_BOTH; + else + is_ip = true; + inff_dbg(INFO, "Cmd Type: %d\n", cmd_type); + } else { + inff_err("Invalid icmp_echo_req command format\n"); + return -EINVAL; + } + + if (is_ip && (cmd_str[2][0] != '\0')) { + if (!inff_vndr_cmdstr_parse_ip(cmd_str[2], ip_addr, &ip_ver)) { + inff_err("Invalid peer IP address\n"); + return -EINVAL; + } + } + + ret = inff_offload_configure_icmp_echo_req(ifp, cmd_type, enable, ip_addr, + ip_ver, mac_addr, periodicity, + duration); + if (!ret && cmd_type == INFF_OFFLOAD_ICMP_ECHO_REQ_INFO) { + icmp_echo_req_get_info = (struct inff_icmp_echo_req_get_info *)cfg->extra_buf; + inff_cfg80211_vndr_send_cmd_reply(wiphy, (void *)icmp_echo_req_get_info, + icmp_echo_req_get_info->length); + } + + return ret; +} + +int inff_cfg80211_vndr_cmds_str(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct inff_vndr_cmdstr_hashtbl *hash_entry; + u32 jhash_key; + int ret = 0, idx_str = 0, idx_val = 0; + unsigned long val; + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN]; + long *cmd_val = NULL; + char *tok = NULL, *buf = NULL; + + cmd_val = kzalloc(sizeof(*cmd_val) * VNDR_CMD_VAL_NUM, GFP_KERNEL); + if (!cmd_val) + return -ENOMEM; + + buf = (char *)data; + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + memset(cmd_str, '\0', VNDR_CMD_STR_NUM * VNDR_CMD_STR_MAX_LEN * sizeof(char)); + memset(cmd_val, -1, VNDR_CMD_VAL_NUM * sizeof(*cmd_val)); + + while (idx_str < VNDR_CMD_STR_NUM && idx_val < VNDR_CMD_VAL_NUM && + ((tok = strsep(&buf, " ")) != NULL)) { + if (kstrtol(tok, 10, &val) == 0) { + cmd_val[idx_val] = val; + idx_val++; + } else if ((strncmp(tok, "0x", 2) == 0) || (strncmp(tok, "0X", 2) == 0)) { + if (kstrtol(tok, 16, &val) == 0) { + cmd_val[idx_val] = val; + idx_val++; + + } else if (strnlen(tok, VNDR_CMD_VAL_NUM) >= 20) { + /* For larger input hex, split the hex pattern into 2 bytes each + * and store it individually. + */ + tok = tok + 2;/* Skip past 0x */ + if (strlen(tok) % 2 != 0) { + inff_err("Data invalid format. Even length required\n"); + ret = -EINVAL; + goto exit; + } + while (*tok != '\0') { + char num[3]; + + if (idx_val >= VNDR_CMD_VAL_NUM) { + inff_err("pkt header hex length exceeded\n"); + ret = -EINVAL; + goto exit; + } + memcpy(num, tok, 2); + num[2] = '\0'; + if (kstrtol(num, 16, &val) == 0) { + cmd_val[idx_val] = val; + } else { + inff_err("Invalid hex pkt data\n"); + ret = -EINVAL; + goto exit; + } + tok += 2; + idx_val++; + } + cmd_val[idx_val] = ' '; + } else { + inff_err("Failed to parse hex token\n"); + ret = -EINVAL; + goto exit; + } + } else if (strnlen(tok, VNDR_CMD_STR_MAX_LEN) <= VNDR_CMD_STR_MAX_LEN) { + strscpy(cmd_str[idx_str], tok, strnlen(tok, VNDR_CMD_STR_MAX_LEN)); + idx_str++; + } else { + inff_err("Failed to parse token\n"); + ret = -EINVAL; + goto exit; + } + } + if (idx_str >= VNDR_CMD_STR_NUM || idx_val >= VNDR_CMD_VAL_NUM) { + inff_err("CMD parameter limit exceeded\n"); + ret = -EINVAL; + goto exit; + } + /* Run the user cmd string input via Jenkins hash to pass and search the entry in + * vendor cmd hashtable initialized at load time. + */ + jhash_key = jhash(cmd_str[0], strlen(cmd_str[0]), 0); + + /* Search the user entered vndr cmd entry in the hash table and call its corresponding + * function handler. + */ + hash_for_each_possible(vndr_cmd_hashtbl, hash_entry, node, jhash_key) { + if (hash_entry->vndr_cmd_addr && + (strlen(cmd_str[0]) == strlen(hash_entry->vndr_cmd_addr->name)) && + memcmp(hash_entry->vndr_cmd_addr->name, cmd_str[0], + strlen(hash_entry->vndr_cmd_addr->name)) == 0) { + ret = hash_entry->vndr_cmd_addr->func(wiphy, wdev, + cmd_str, cmd_val); + break; + } + } + +exit: + kfree(cmd_val); + return ret; +} + +int inff_cfg80211_vndr_cmds_config_pfn(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int buflen; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct drv_config_pfn_params *pfn_data; + + inff_dbg(TRACE, "Enter pfn_enable %d Network_blob count %d\n", + cfg->pfn_enable, *((u8 *)data)); + + cfg->pfn_enable = 1; + pfn_data = (struct drv_config_pfn_params *)data; + cfg->pfn_data.pfn_config = pfn_data->pfn_config; + cfg->pfn_data.count = pfn_data->count; + + if (cfg->pfn_data.count > INFF_PNO_MAX_PFN_COUNT) { + inff_dbg(TRACE, "Not in range. Max 16 ssids allowed to add in pfn list"); + cfg->pfn_data.count = INFF_PNO_MAX_PFN_COUNT; + } + + buflen = cfg->pfn_data.count * sizeof(struct network_blob); + cfg->pfn_data.network_blob_data = kmalloc(buflen, GFP_KERNEL); + memset(cfg->pfn_data.network_blob_data, '\0', buflen); + memcpy(cfg->pfn_data.network_blob_data, (u8 *)data + PFN_CONFIG_AND_COUNT_SIZE, buflen); + pfn_send_network_blob_fw(wiphy, wdev); + inff_dbg(TRACE, "Exit\n"); + return 0; +} + +int inff_cfg80211_vndr_cmds_get_pfn_status(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + u8 *buf = NULL; + struct inff_bss_info_le *bi = NULL; + int err = 0, i = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct network_blob *network_blob_data = NULL; + struct inff_chan ch; + struct pfn_conn_info curr_bssid; + + inff_dbg(TRACE, "Enter\n"); + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + if (cfg->pfn_enable != 1) + return 0; + buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + return err; + } + + *(u32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BSS_INFO, + buf, WL_BSS_INFO_MAX); + if (err) { + inff_err("pfn_status buf error:%d\n", err); + return err; + } + bi = (struct inff_bss_info_le *)(buf + 4); + memset(&curr_bssid, '\0', sizeof(struct pfn_conn_info)); + + if (bi->SSID_len > 0) { + memcpy(curr_bssid.SSID, bi->SSID, bi->SSID_len); + memcpy(curr_bssid.BSSID, bi->BSSID, ETH_ALEN); + curr_bssid.SSID_len = bi->SSID_len; + curr_bssid.RSSI = bi->RSSI; + curr_bssid.phy_noise = bi->phy_noise; + ch.chspec = le16_to_cpu(bi->chanspec); + cfg->d11inf.decchspec(&ch); + curr_bssid.channel = ch.control_ch_num; + curr_bssid.SNR = bi->SNR; + + network_blob_data = cfg->pfn_data.network_blob_data; + for (; i < cfg->pfn_data.count && network_blob_data; i++) { + if (!strncmp(network_blob_data->ssid, bi->SSID, bi->SSID_len)) { + curr_bssid.proto = network_blob_data->proto; + curr_bssid.key_mgmt = network_blob_data->key_mgmt; + break; + } + network_blob_data++; + } + } + if (curr_bssid.SSID_len) + inff_cfg80211_vndr_send_cmd_reply(wiphy, (void *)&curr_bssid, + sizeof(struct pfn_conn_info)); + kfree(buf); + inff_dbg(TRACE, "Exit\n"); + return 0; +} + +int inff_cfg80211_vndr_cmds_mchan_config(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + u8 val = *(u8 *)data; + + inff_dbg(TRACE, "enter, config: %d\n", val); + + if (val <= INFF_MCHAN_CONF_AUDIO) + cfg->mchan_conf = val; + + return 0; +} + +int inff_cfg80211_vndr_cmds_ssid_prot(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len) +{ + int ret = 0; + int val = 0; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + const struct nlattr *attr_iter; + int tmp, attr_type = 0; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + nla_for_each_attr(attr_iter, data, len, tmp) { + attr_type = nla_type(attr_iter); + if (attr_type == INFF_VENDOR_ATTR_SSID_PROT_ENABLE) { + val = nla_get_u8(attr_iter); + ret = inff_fil_iovar_int_set(ifp, "ssid_protection", val); + if (ret < 0) + inff_err("Failed set ssid_protection, ret=%d\n", ret); + else + inff_dbg(INFO, "ssid_protection is %s\n", + val ? "enabled" : "disabled"); + } + } + return ret; +} + +int +inff_cfg80211_vndr_evt_icmp_echo_req(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len) +{ + struct sk_buff *skb; + + skb = cfg80211_vendor_event_alloc(wiphy, wdev, len, SEVT(ICMP_ECHO_REQ), GFP_KERNEL); + if (!skb) { + inff_dbg(EVENT, "NO MEM: can't allocate skb for vendor ICMP ECHO REQ EVENT\n"); + return -ENOMEM; + } + + if (nla_put(skb, NL80211_ATTR_VENDOR_DATA, len, data)) { + kfree_skb(skb); + inff_err("NO ROOM in skb for ICMP_ECHO_REQ_EVENT\n"); + return -EMSGSIZE; + } + + cfg80211_vendor_event(skb, GFP_KERNEL); + + return 0; +} diff --git a/drivers/net/wireless/infineon/inffmac/vendor_inf.h b/drivers/net/wireless/infineon/inffmac/vendor_inf.h new file mode 100644 index 000000000000..19cd22a80525 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/vendor_inf.h @@ -0,0 +1,767 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2022-2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_VENDOR_INF_H +#define INFF_VENDOR_INF_H + +#include +#include + +/* This file is a registry of identifier assignments from the Infineon + * OUI 00:03:19 for purposes other than MAC address assignment. New identifiers + * can be assigned through normal review process for changes to the upstream + * hostap.git repository. + */ +#define OUI_INF 0x000319 + +#define SCMD(_CMD) INFF_VENDOR_SCMD_##_CMD +#define INFF_SUBCMD(_CMD, _FLAGS, _POLICY, _FN) \ + { \ + .vendor_id = OUI_INF, \ + .subcmd = SCMD(_CMD) \ + }, \ + .flags = (_FLAGS), \ + .policy = (_POLICY), \ + .doit = (_FN) + +#define SEVT(_EVT) INFF_VENDOR_SEVT_##_EVT +#define INFF_SUBEVT(_EVT) \ + { \ + .vendor_id = OUI_INF, \ + .subcmd = SEVT(_EVT) \ + }, + +struct inff_iov_buf { + u16 version; + u16 len; + u16 id; + u16 data[]; +}; + +/* + * enum inff_nl80211_vendor_subcmds - INF nl80211 vendor command identifiers + * + * @INFF_VENDOR_SCMD_UNSPEC: Reserved value 0 + * + * @INFF_VENDOR_SCMD_DCMD: Handle the Dongle commands triggered from the userspace utilities. + * These commands will be passed to the Dongle for processing. + * + * @INFF_VENDOR_SCMD_FRAMEBURST: Control the Frameburst feature. This feature allows more + * efficient use of the airtime between the transmitting and receiving WLAN devices. + * + * @INFF_VENDOR_SCMD_ACS: Configure the Automatic Channel Selection (ACS) feature. + * + * @INFF_VENDOR_SCMD_SET_MAC_P2P_DEV: Set MAC address for a P2P Discovery device. + * Uses Vendor attribute INFF_VENDOR_ATTR_MAC_ADDR to pass the MAC address. + * + * @INFF_VENDOR_SCMD_MUEDCA_OPT: Configure Multi User Enhanced Distrubuted Channel Access (MU-EDCA). + * + * @INFF_VENDOR_SCMD_LDPC: Enable support for handling Low Density Parity Check (LDPC) Coding + * in received payload. + * + * @INFF_VENDOR_SCMD_AMSDU: Control AMSDU aggregation for both TX & RX on all the TID queues. + * + * @INFF_VENDOR_SCMD_TWT: Configure Target Wake Time (TWT) Session with the needed parameters. + * Uses Vendor attributes defined in the enum inff_vendor_attr_twt. + * + * @INFF_VENDOR_SCMD_OCE: Configure the Optimized Connectivity Experience (OCE) functionality + * related parameters. + * + * @INFF_VENDOR_SCMD_BSSCOLOR: Set BSS Color (1-63) for AP Mode operation in HE. + * + * @INFF_VENDOR_SCMD_RAND_MAC: Configure the Random MAC module. + * + * @INFF_VENDOR_SCMD_MBO: Configure Multi Band Operation (MBO) functionality related parameters. + * + * @INFF_VENDOR_SCMD_MPC: Control the Minimum Power Consumption (MPC) feature. + * This is a STA-only power saving feature and not related to 802.11 power save. + * + * @INFF_VENDOR_SCMD_GIANTRX: Allow handling RX MGMT Packts of size 1840 bytes. + * + * @INFF_VENDOR_SCMD_PFN_CONFIG: Send the Preferred Network (PFN) information to the Dongle + * + * @INFF_VENDOR_SCMD_PFN_STATUS: Fetch the Preferred Network (PFN) information from the Dongle + * through the driver. + * + * @INFF_VENDOR_SCMD_WNM: Configure the Wireless Network Management (WNM) 802.11v functionaltiy + * related parameters. + * + * @INFF_VENDOR_SCMD_HWCAPS: Get device's capability. + * + * @INFF_VENDOR_SCMD_CMDSTR: New vendor string infra subcmd to handle user supplied strings. + * String parsing and calling corresponding function handler for a specific command + * given by user. + * + * @INFF_VENDOR_SCMD_SSID_PROT: Vendor command to enable/disable SSID protection + * + * @INFF_VENDOR_SCMD_MAX: This acts as a the tail of cmds list. + * Make sure it located at the end of the list. + */ +enum inff_nl80211_vendor_subcmds { + SCMD(UNSPEC) = 0, + SCMD(DCMD) = 1, + SCMD(RSV2) = 2, + SCMD(RSV3) = 3, + SCMD(RSV4) = 4, + SCMD(RSV5) = 5, + SCMD(FRAMEBURST) = 6, + SCMD(RSV7) = 7, + SCMD(RSV8) = 8, + SCMD(ACS) = 9, + SCMD(SET_MAC_P2P_DEV) = 10, + SCMD(MUEDCA_OPT) = 11, + SCMD(LDPC) = 12, + SCMD(AMSDU) = 13, + SCMD(TWT) = 14, + SCMD(OCE) = 15, + SCMD(BSSCOLOR) = 16, + SCMD(RAND_MAC) = 17, + SCMD(MBO) = 18, + SCMD(MPC) = 19, + SCMD(GIANTRX) = 20, + SCMD(PFN_CONFIG) = 21, + SCMD(PFN_STATUS) = 22, + SCMD(RSV22) = 23, + SCMD(RSV24) = 24, + SCMD(WNM) = 25, + SCMD(HWCAPS) = 26, + SCMD(WNM_WL_CAP) = 27, + SCMD(CMDSTR) = 28, + SCMD(SSID_PROT) = 30, + SCMD(MCHAN_CONFIG) = 31, + SCMD(MAX) = 32 +}; + +/** + * enum inff_nl80211_vendor_events - INFF NL80211 Event identifiers + * + * @INFF_VENDOR_SCMD_UNPEC: Reserved value 0 + * + * @INFF_VENDOR_SCMD_ICMP_ECHO_REQ + * ICMP Echo Request Event + * + * @INFF_VENDOR_SCMD_MAX: This acts as a the tail of cmds list. + * Make sure it located at the end of the list. + * + */ +enum inff_nl80211_vendor_events { + SEVT(UNSPEC) = 0, + SEVT(ICMP_ECHO_REQ) = 2, + SEVT(MAX) = 3 +}; + +/* + * enum inff_vendor_attr - INF nl80211 vendor attributes + * + * @INFF_VENDOR_ATTR_UNSPEC: Reserved value 0 + * + * @INFF_VENDOR_ATTR_LEN: Dongle Command Message Body Length. + * + * @INFF_VENDOR_ATTR_DATA: Dongle Commend Message Body. + * + * @INFF_VENDOR_ATTR_MAC_ADDR: Medium Access Control (MAC) address. + * + * @INFF_VENDOR_ATTR_MAX: This acts as a the tail of attrs list. + * Make sure it located at the end of the list. + */ +enum inff_vendor_attr { + INFF_VENDOR_ATTR_UNSPEC = 0, + INFF_VENDOR_ATTR_LEN = 1, + INFF_VENDOR_ATTR_DATA = 2, + INFF_VENDOR_ATTR_MAC_ADDR = 3, + /* Reserved 4-10 */ + INFF_VENDOR_ATTR_MAX +}; + +#define INFF_MBO_IOV_MAJOR_VER 1 +#define INFF_MBO_IOV_MINOR_VER 1 +#define INFF_MBO_IOV_MAJOR_VER_SHIFT 8 +#define INFF_MBO_IOV_VERSION \ + ((INFF_MBO_IOV_MAJOR_VER << INFF_MBO_IOV_MAJOR_VER_SHIFT) | \ + INFF_MBO_IOV_MINOR_VER) + +enum inff_vendor_attr_mbo_param { + INFF_VENDOR_ATTR_MBO_PARAM_UNSPEC = 0, + INFF_VENDOR_ATTR_MBO_PARAM_OPCLASS = 1, + INFF_VENDOR_ATTR_MBO_PARAM_CHAN = 2, + INFF_VENDOR_ATTR_MBO_PARAM_PREFERENCE = 3, + INFF_VENDOR_ATTR_MBO_PARAM_REASON_CODE = 4, + INFF_VENDOR_ATTR_MBO_PARAM_CELL_DATA_CAP = 5, + INFF_VENDOR_ATTR_MBO_PARAM_COUNTERS = 6, + INFF_VENDOR_ATTR_MBO_PARAM_ENABLE = 7, + INFF_VENDOR_ATTR_MBO_PARAM_SUB_ELEM_TYPE = 8, + INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_START_OFFSET = 9, + INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_RSSI_DELTA = 10, + INFF_VENDOR_ATTR_MBO_PARAM_ANQP_CELL_SUPP = 11, + INFF_VENDOR_ATTR_MBO_PARAM_BIT_MASK = 12, + INFF_VENDOR_ATTR_MBO_PARAM_ASSOC_DISALLOWED = 13, + INFF_VENDOR_ATTR_MBO_PARAM_CELLULAR_DATA_PREF = 14, + INFF_VENDOR_ATTR_MBO_PARAM_MAX = 15 +}; + +static const struct nla_policy +inff_vendor_attr_mbo_param_policy[INFF_VENDOR_ATTR_MBO_PARAM_MAX + 1] = { + [INFF_VENDOR_ATTR_MBO_PARAM_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_OPCLASS] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_CHAN] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_PREFERENCE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_REASON_CODE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_CELL_DATA_CAP] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_COUNTERS] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_ENABLE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_SUB_ELEM_TYPE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_START_OFFSET] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_BTQ_TRIG_RSSI_DELTA] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_ANQP_CELL_SUPP] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_BIT_MASK] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_ASSOC_DISALLOWED] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_CELLULAR_DATA_PREF] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAM_MAX] = {.type = NLA_U8}, +}; + +enum inff_vendor_attr_mbo { + INFF_VENDOR_ATTR_MBO_UNSPEC, + INFF_VENDOR_ATTR_MBO_CMD, + INFF_VENDOR_ATTR_MBO_PARAMS, + INFF_VENDOR_ATTR_MBO_MAX +}; + +static const struct nla_policy inff_vendor_attr_mbo_policy[INFF_VENDOR_ATTR_MBO_MAX + 1] = { + [INFF_VENDOR_ATTR_MBO_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_CMD] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_MBO_PARAMS] = + NLA_POLICY_NESTED(inff_vendor_attr_mbo_param_policy), + [INFF_VENDOR_ATTR_MBO_MAX] = {.type = NLA_U8}, +}; + +enum { + INFF_MBO_CMD_ADD_CHAN_PREF = 1, + INFF_MBO_CMD_DEL_CHAN_PREF = 2, + INFF_MBO_CMD_LIST_CHAN_PREF = 3, + INFF_MBO_CMD_CELLULAR_DATA_CAP = 4, + INFF_MBO_CMD_DUMP_COUNTERS = 5, + INFF_MBO_CMD_CLEAR_COUNTERS = 6, + INFF_MBO_CMD_FORCE_ASSOC = 7, + INFF_MBO_CMD_BSSTRANS_REJECT = 8, + INFF_MBO_CMD_SEND_NOTIF = 9, + INFF_MBO_CMD_LAST +}; + +enum { + INFF_MBO_XTLV_OPCLASS = 0x1, + INFF_MBO_XTLV_CHAN = 0x2, + INFF_MBO_XTLV_PREFERENCE = 0x3, + INFF_MBO_XTLV_REASON_CODE = 0x4, + INFF_MBO_XTLV_CELL_DATA_CAP = 0x5, + INFF_MBO_XTLV_COUNTERS = 0x6, + INFF_MBO_XTLV_ENABLE = 0x7, + INFF_MBO_XTLV_SUB_ELEM_TYPE = 0x8, + INFF_MBO_XTLV_BTQ_TRIG_START_OFFSET = 0x9, + INFF_MBO_XTLV_BTQ_TRIG_RSSI_DELTA = 0xa, + INFF_MBO_XTLV_ANQP_CELL_SUPP = 0xb, + INFF_MBO_XTLV_BIT_MASK = 0xc, + INFF_MBO_XTLV_ASSOC_DISALLOWED = 0xd, + INFF_MBO_XTLV_CELLULAR_DATA_PREF = 0xe +}; + +/* + * enum inff_vendor_attr_twt - Attributes for the TWT vendor command + * + * @INFF_VENDOR_ATTR_TWT_UNSPEC: Reserved value 0 + * + * @INFF_VENDOR_ATTR_TWT_OPER: To specify the type of TWT operation + * to be performed. Uses attributes defined in enum inff_twt_oper. + * + * @INFF_VENDOR_ATTR_TWT_PARAMS: Nester attributes representing the + * parameters configured for TWT. These parameters are defined in + * the enum inff_vendor_attr_twt_param. + * + * @INFF_VENDOR_ATTR_TWT_MAX: This acts as a the tail of cmds list. + * Make sure it located at the end of the list. + */ +enum inff_vendor_attr_twt { + INFF_VENDOR_ATTR_TWT_UNSPEC, + INFF_VENDOR_ATTR_TWT_OPER, + INFF_VENDOR_ATTR_TWT_PARAMS, + INFF_VENDOR_ATTR_TWT_MAX +}; + +/* + * enum inff_twt_oper - TWT operation to be specified using the vendor + * attribute INFF_VENDOR_ATTR_TWT_OPER + * + * @INFF_TWT_OPER_UNSPEC: Reserved value 0 + * + * @INFF_TWT_OPER_SETUP: Setup a TWT session. Required parameters are + * obtained through the nested attrs under %INFF_VENDOR_ATTR_TWT_PARAMS. + * + * @INFF_TWT_OPER_TEARDOWN: Teardown the already negotiated TWT session. + * Required parameters are obtained through the nested attrs under + * INFF_VENDOR_ATTR_TWT_PARAMS. + * + * @INFF_TWT_OPER_MAX: This acts as a the tail of the list. + * Make sure it located at the end of the list. + */ +enum inff_twt_oper { + INFF_TWT_OPER_UNSPEC, + INFF_TWT_OPER_SETUP, + INFF_TWT_OPER_TEARDOWN, + INFF_TWT_OPER_MAX +}; + +/* + * enum inff_vendor_attr_twt_param - TWT parameters + * + * @INFF_VENDOR_ATTR_TWT_PARAM_UNSPEC: Reserved value 0 + * + * @INFF_VENDOR_ATTR_TWT_PARAM_NEGO_TYPE: Specifies the type of Negotiation to be + * done during Setup. The four possible types are + * 0 - Individual TWT Negotiation + * 1 - Wake TBTT Negotiation + * 2 - Broadcast TWT in Beacon + * 3 - Broadcast TWT Membership Negotiation + * + * The possible values are defined in the enum inff_twt_param_nego_type + * + * @INFF_VENDOR_ATTR_TWT_PARAM_SETUP_CMD_TYPE: Specifies the type of TWT Setup frame + * when sent by the TWT Requesting STA + * 0 - Request + * 1 - Suggest + * 2 - Demand + * + * when sent by the TWT Responding STA. + * 3 - Grouping + * 4 - Accept + * 5 - Alternate + * 6 - Dictate + * 7 - Reject + * + * The possible values are defined in the enum inff_twt_oper_setup_cmd_type. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_DIALOG_TOKEN: Dialog Token used by the TWT Requesting STA to + * identify the TWT Setup request/response transaction. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME: Target Wake Time TSF at which the STA has to wake up. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME_OFFSET: Target Wake Time TSF Offset from current TSF + * in microseconds. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION: Nominal Minimum TWT Wake Duration. + * Used along with %INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION_UNIT to derive Wake Duration. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_EXPONENT: TWT Wake Interval Exponent. + * Used along with %INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_MANTISSA to derive Wake Interval. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_MANTISSA: TWT Wake Interval Mantissa. + * Used along with %INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_EXPONENT to derive Wake Interval. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_REQUESTOR: Specify this is a TWT Requesting / Responding STA. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_TRIGGER: Specify Trigger based / Non-Trigger based TWT Session. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_IMPLICIT: Specify Implicit / Explicit TWT session. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_FLOW_TYPE: Specify Un-Announced / Announced TWT session. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_FLOW_ID: Flow ID is the unique identifier of an iTWT session. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_BCAST_TWT_ID: Broadcast TWT ID is the unique identifier of a + * bTWT session. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_PROTECTION: Specifies whether Tx within SP is protected. + * Set to 1 to indicate that TXOPs within the TWT SPs shall be initiated + * with a NAV protection mechanism, such as (MU) RTS/CTS or CTS-to-self frame; + * otherwise, it shall set it to 0. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_CHANNEL: TWT channel field which is set to 0, unless + * the HE STA sets up a subchannel selective transmission operation. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_TWT_INFO_FRAME_DISABLED: TWT Information frame RX handing + * disabled / enabled. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION_UNIT: Nominal Minimum TWT Wake Duration + * Unit. 0 represents unit in "256 usecs" and 1 represents unit in "TUs". + * + * @INFF_VENDOR_ATTR_TWT_PARAM_TEARDOWN_ALL_TWT: Teardown all negotiated TWT sessions. + * + * @INFF_VENDOR_ATTR_TWT_PARAM_MAX: This acts as a the tail of the list. + * Make sure it located at the end of the list. + */ +enum inff_vendor_attr_twt_param { + INFF_VENDOR_ATTR_TWT_PARAM_UNSPEC, + INFF_VENDOR_ATTR_TWT_PARAM_NEGO_TYPE, + INFF_VENDOR_ATTR_TWT_PARAM_SETUP_CMD_TYPE, + INFF_VENDOR_ATTR_TWT_PARAM_DIALOG_TOKEN, + INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME, + INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME_OFFSET, + INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION, + INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_EXPONENT, + INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_MANTISSA, + INFF_VENDOR_ATTR_TWT_PARAM_REQUESTOR, + INFF_VENDOR_ATTR_TWT_PARAM_TRIGGER, + INFF_VENDOR_ATTR_TWT_PARAM_IMPLICIT, + INFF_VENDOR_ATTR_TWT_PARAM_FLOW_TYPE, + INFF_VENDOR_ATTR_TWT_PARAM_FLOW_ID, + INFF_VENDOR_ATTR_TWT_PARAM_BCAST_TWT_ID, + INFF_VENDOR_ATTR_TWT_PARAM_PROTECTION, + INFF_VENDOR_ATTR_TWT_PARAM_CHANNEL, + INFF_VENDOR_ATTR_TWT_PARAM_TWT_INFO_FRAME_DISABLED, + INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION_UNIT, + INFF_VENDOR_ATTR_TWT_PARAM_TEARDOWN_ALL_TWT, + INFF_VENDOR_ATTR_TWT_PARAM_MAX +}; + +/* + * enum inff_twt_param_nego_type - TWT Session Negotiation Type Parameters + * + * @INFF_TWT_PARAM_NEGO_TYPE_ITWT: Individual TWT negotiation between TWT requesting STA + * and TWT responding STA or individual TWT announcement by TWT Responder + * + * @INFF_TWT_PARAM_NEGO_TYPE_WAKE_TBTT: Wake TBTT and Wake interval negotiation between + * TWT scheduled STA and TWT scheduling AP. + * + * @INFF_TWT_PARAM_NEGO_TYPE_BTWT_IE_BCN: Provide Broadcast TWT schedules to TWT scheduled + * STAs by including the TWT element in broadcast Managemnet frames sent by TWT + * scheduling AP. + * + * @INFF_TWT_PARAM_NEGO_TYPE_BTWT: Broadcast TWT negotiation between TWT requesting STA + * and TWT responding STA. Manage Memberships in broadcast TWT schedules by including + * the TWT element in individually addressed Management frames sent by either a TWT + * scheduled STA or a TWT scheduling AP. + * + * @INFF_TWT_PARAM_NEGO_TYPE_MAX: This acts as a the tail of the list. + * Make sure it located at the end of the list. + */ +enum inff_twt_param_nego_type { + INFF_TWT_PARAM_NEGO_TYPE_INVALID = -1, + INFF_TWT_PARAM_NEGO_TYPE_ITWT = 0, + INFF_TWT_PARAM_NEGO_TYPE_WAKE_TBTT = 1, + INFF_TWT_PARAM_NEGO_TYPE_BTWT_IE_BCN = 2, + INFF_TWT_PARAM_NEGO_TYPE_BTWT = 3, + INFF_TWT_PARAM_NEGO_TYPE_MAX = 4 +}; + +/* + * enum inff_vendor_attr_twt_param - TWT Session setup command types + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_REQUEST: A TWT requesting or TWT scheduled STA + * requests to join a TWT without specifying a target wake time. This type needs to + * be used only by the TWT requesting STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_SUGGEST: A TWT requesting or TWT scheduled STA requests to + * join a TWT without specifying a target wake time. This type needs to be used only + * by the TWT requesting STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_DEMAND: A TWT requesting or TWT scheduled STA requests to + * join a TWT and specifies a demanded set of TWT parameters. If the demanded set of + * TWT parameters is not accommodated by the responding STA or TWT scheduling AP, then + * the TWT requesting STA or TWT scheduled STA will reject the TWT setup. This type + * needs to be used only by the TWT requesting STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_GROUPING: The TWT responding STA suggests TWT group + * parameters that are different from the suggested or demanded TWT parameters of the + * TWT requesting STA. This type needs to be used only by the S1G TWT Responding STA in + * case of ITWT Setup Negotiation. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_ACCEPT: A TWT responding STA or TWT scheduling AP accepts + * the TWT request with the TWT parameters (see NOTE) indicated in the TWT element + * transmitted by the TWT requesting STA or TWT scheduled STA. This value is also used + * in unsolicited TWT responses. This needs type needs to be used only by the TWT + * responding STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_ALTERNATE: A TWT responding STA or TWT scheduling AP suggests + * TWT parameters that are different from those suggested by the TWT requesting STA or + * TWT scheduled STA. This needs type needs to be used only by the TWT reponding STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_DICTATE: A TWT responding STA or TWT scheduling AP indicates + * TWT parameters that are different from those suggested by the TWT requesting STA or + * TWT scheduled STA. This needs type needs to be used only by the TWT responding STA. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_REJECT: A TWT responding STA or TWT scheduling AP rejects + * setup, or a TWT scheduling AP terminates an existing broadcast TWT, or a TWT + * scheduled STA terminates its membership in a broadcast TWT. + * + * @INFF_TWT_OPER_SETUP_CMD_TYPE_MAX: This acts as a the tail of the list. + * Make sure it located at the end of the list. + */ +enum inff_twt_oper_setup_cmd_type { + INFF_TWT_OPER_SETUP_CMD_TYPE_INVALID = -1, + INFF_TWT_OPER_SETUP_CMD_TYPE_REQUEST = 0, + INFF_TWT_OPER_SETUP_CMD_TYPE_SUGGEST = 1, + INFF_TWT_OPER_SETUP_CMD_TYPE_DEMAND = 2, + INFF_TWT_OPER_SETUP_CMD_TYPE_GROUPING = 3, + INFF_TWT_OPER_SETUP_CMD_TYPE_ACCEPT = 4, + INFF_TWT_OPER_SETUP_CMD_TYPE_ALTERNATE = 5, + INFF_TWT_OPER_SETUP_CMD_TYPE_DICTATE = 6, + INFF_TWT_OPER_SETUP_CMD_TYPE_REJECT = 7, + INFF_TWT_OPER_SETUP_CMD_TYPE_MAX = 8 +}; + +#define INFF_OCE_IOV_MAJOR_VER 1 +#define INFF_OCE_IOV_MINOR_VER 1 +#define INFF_OCE_IOV_MAJOR_VER_SHIFT 8 +#define INFF_OCE_IOV_VERSION \ + ((INFF_OCE_IOV_MAJOR_VER << INFF_OCE_IOV_MAJOR_VER_SHIFT) | \ + INFF_OCE_IOV_MINOR_VER) + +enum { + INFF_OCE_CMD_ENABLE = 1, + INFF_OCE_CMD_PROBE_DEF_TIME = 2, + INFF_OCE_CMD_FD_TX_PERIOD = 3, + INFF_OCE_CMD_FD_TX_DURATION = 4, + INFF_OCE_CMD_RSSI_TH = 5, + INFF_OCE_CMD_RWAN_LINKS = 6, + INFF_OCE_CMD_CU_TRIGGER = 7, + INFF_OCE_CMD_LAST +}; + +enum { + INFF_OCE_XTLV_ENABLE = 0x1, + INFF_OCE_XTLV_PROBE_DEF_TIME = 0x2, + INFF_OCE_XTLV_FD_TX_PERIOD = 0x3, + INFF_OCE_XTLV_FD_TX_DURATION = 0x4, + INFF_OCE_XTLV_RSSI_TH = 0x5, + INFF_OCE_XTLV_RWAN_LINKS = 0x6, + INFF_OCE_XTLV_CU_TRIGGER = 0x7 +}; + +static const struct nla_policy +inff_vendor_attr_twt_param_policy[INFF_VENDOR_ATTR_TWT_PARAM_MAX + 1] = { + [INFF_VENDOR_ATTR_TWT_PARAM_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_NEGO_TYPE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_SETUP_CMD_TYPE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_DIALOG_TOKEN] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME] = {.type = NLA_U64}, + [INFF_VENDOR_ATTR_TWT_PARAM_WAKE_TIME_OFFSET] = {.type = NLA_U64}, + [INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_EXPONENT] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_WAKE_INTVL_MANTISSA] = {.type = NLA_U16}, + [INFF_VENDOR_ATTR_TWT_PARAM_REQUESTOR] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_TRIGGER] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_IMPLICIT] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_FLOW_TYPE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_FLOW_ID] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_BCAST_TWT_ID] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_PROTECTION] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_CHANNEL] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_TWT_INFO_FRAME_DISABLED] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_MIN_WAKE_DURATION_UNIT] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_TEARDOWN_ALL_TWT] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAM_MAX] = {.type = NLA_U8}, +}; + +static const struct nla_policy inff_vendor_attr_twt_policy[INFF_VENDOR_ATTR_TWT_MAX + 1] = { + [INFF_VENDOR_ATTR_TWT_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_OPER] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_TWT_PARAMS] = + NLA_POLICY_NESTED(inff_vendor_attr_twt_param_policy), + [INFF_VENDOR_ATTR_TWT_MAX] = {.type = NLA_U8}, +}; + +/* randmac define/enum/struct + */ +#define WL_RANDMAC_API_VERSION 0x0100 /**< version 1.0 */ +#define WL_RANDMAC_API_MIN_VERSION 0x0100 /**< version 1.0 */ + +/** subcommands that can apply to randmac */ +enum { + WL_RANDMAC_SUBCMD_NONE = 0, + WL_RANDMAC_SUBCMD_GET_VERSION = 1, + WL_RANDMAC_SUBCMD_ENABLE = 2, + WL_RANDMAC_SUBCMD_DISABLE = 3, + WL_RANDMAC_SUBCMD_CONFIG = 4, + WL_RANDMAC_SUBCMD_STATS = 5, + WL_RANDMAC_SUBCMD_CLEAR_STATS = 6, + WL_RANDMAC_SUBCMD_MAX +}; + +struct inff_randmac { + u16 version; + u16 len; /* total length */ + u16 subcmd_id; /* subcommand id */ + u8 data[0]; /* subcommand data */ +}; + +enum inff_vendor_attr_wnm_param { + INFF_VENDOR_ATTR_WNM_PARAM_UNSPEC, + INFF_VENDOR_ATTR_WNM_PARAM_GET_INFO, + INFF_VENDOR_ATTR_WNM_PARAM_IDLE_PERIOD, + INFF_VENDOR_ATTR_WNM_PARAM_PROTECTION_OPT, + INFF_VENDOR_ATTR_WNM_PARAM_MAX +}; + +static const struct nla_policy +inff_vendor_attr_wnm_param_policy[INFF_VENDOR_ATTR_WNM_PARAM_MAX + 1] = { + [INFF_VENDOR_ATTR_WNM_PARAM_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_PARAM_GET_INFO] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_PARAM_IDLE_PERIOD] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_PARAM_PROTECTION_OPT] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_PARAM_MAX] = {.type = NLA_U8}, +}; + +enum inff_vendor_attr_wnm { + INFF_VENDOR_ATTR_WNM_UNSPEC, + INFF_VENDOR_ATTR_WNM_CMD, + INFF_VENDOR_ATTR_WNM_PARAMS, + INFF_VENDOR_ATTR_WNM_MAX +}; + +enum inff_vendor_hw_caps { + INFF_VENDOR_HW_CAPS_REPLAYCNTS, + INFF_VENDOR_HW_CAPS_MAX +}; + +static const char * const hw_caps_name[] = { + [INFF_VENDOR_HW_CAPS_REPLAYCNTS] = "replay counters" +}; + +static const struct nla_policy inff_vendor_attr_wnm_policy[INFF_VENDOR_ATTR_WNM_MAX + 1] = { + [INFF_VENDOR_ATTR_WNM_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_CMD] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_WNM_PARAMS] = + NLA_POLICY_NESTED(inff_vendor_attr_wnm_param_policy), + [INFF_VENDOR_ATTR_WNM_MAX] = {.type = NLA_U8}, +}; + +enum { + INFF_WNM_CMD_IOV_WNM = 1, + INFF_WNM_CMD_IOV_WNM_MAXIDLE = 2, + INFF_WNM_CMD_IOV_WNM_TIMBC_OFFSET = 3, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_URL = 4, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_REQ = 5, + INFF_WNM_CMD_IOV_WNM_TFS_TCLASTYPE = 6, + INFF_WNM_CMD_IOV_WNM_PARP_DISCARD = 7, + INFF_WNM_CMD_IOV_WNM_PARP_ALLNODE = 8, + INFF_WNM_CMD_IOV_WNM_TIMBC_SET = 9, + INFF_WNM_CMD_IOV_WNM_TIMBC_STATUS = 10, + INFF_WNM_CMD_IOV_WNM_DMS_SET = 11, + INFF_WNM_CMD_IOV_WNM_DMS_TERM = 12, + INFF_WNM_CMD_IOV_WNM_SERVICE_TERM = 13, + INFF_WNM_CMD_IOV_WNM_SLEEP_INTV = 14, + INFF_WNM_CMD_IOV_WNM_SLEEP_MODE = 15, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_QUERY = 16, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_RESP = 17, + INFF_WNM_CMD_IOV_WNM_TCLAS_ADD = 18, + INFF_WNM_CMD_IOV_WNM_TCLAS_DEL = 19, + INFF_WNM_CMD_IOV_WNM_TCLAS_LIST = 20, + INFF_WNM_CMD_IOV_WNM_DMS_STATUS = 21, + INFF_WNM_CMD_IOV_WNM_KEEPALIVES_MAX_IDLE = 22, + INFF_WNM_CMD_IOV_WNM_PM_IGNORE_BCMC = 23, + INFF_WNM_CMD_IOV_WNM_DMS_DEPENDENCY = 24, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_ROAMTHROTTLE = 25, + INFF_WNM_CMD_IOV_WNM_TFS_SET = 26, + INFF_WNM_CMD_IOV_WNM_TFS_TERM = 27, + INFF_WNM_CMD_IOV_WNM_TFS_STATUS = 28, + INFF_WNM_CMD_IOV_WNM_BTQ_NBR_ADD = 29, + INFF_WNM_CMD_IOV_WNM_BTQ_NBR_DEL = 30, + INFF_WNM_CMD_IOV_WNM_BTQ_NBR_LIST = 31, + INFF_WNM_CMD_IOV_WNM_BSSTRANS_RSSI_RATE_MAP = 32, + INFF_WNM_CMD_IOV_WNM_KEEPALIVE_PKT_TYPE = 33, + INFF_WNM_CONFIG_CMD_IOV_WNM_TYPE_MAX +}; + +struct inff_maxidle_wnm { + u8 get_info; + int period; + int protect; +}; + +enum inff_vendor_attr_ssid_prot { + INFF_VENDOR_ATTR_SSID_PROT_UNSPEC, + INFF_VENDOR_ATTR_SSID_PROT_ENABLE, + INFF_VENDOR_ATTR_SSID_PROT_MAX +}; + +static const struct nla_policy +inff_vendor_attr_ssid_prot_policy[INFF_VENDOR_ATTR_SSID_PROT_MAX + 1] = { + [INFF_VENDOR_ATTR_SSID_PROT_UNSPEC] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_SSID_PROT_ENABLE] = {.type = NLA_U8}, + [INFF_VENDOR_ATTR_SSID_PROT_MAX] = {.type = NLA_U8}, +}; + +/* String based vendor commands infra + */ +#define VNDR_CMD_STR_NUM 15 +#define VNDR_CMD_STR_MAX_LEN 50 +#define VNDR_CMD_VAL_NUM 50 +#define VNDR_CMD_HASH_BITS 4 + +struct inff_vendor_cmdstr { + const char *name; + int (*func)(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long cmd_val[VNDR_CMD_VAL_NUM]); +}; + +struct inff_vndr_cmdstr_hashtbl { + struct inff_vendor_cmdstr *vndr_cmd_addr; + struct hlist_node node; +}; + +int inff_cfg80211_vndr_cmds_twt(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len); +int inff_cfg80211_vndr_cmds_bss_color(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_muedca_opt(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_amsdu(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_ldpc_cap(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_oce_enable(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_randmac(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_mbo(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_mpc(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_giantrx(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_wnm_max_idle(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_hwcaps(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_wnm_wl_cap(struct wiphy *wiphy, + struct wireless_dev *wdev, + const void *data, int len); +int inff_vndr_cmdstr_offload_config(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long cmd_val[VNDR_CMD_VAL_NUM]); +int inff_vndr_cmdstr_mkeep_alive(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val); +int inff_vndr_cmdstr_tko(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val); +int inff_vndr_cmdstr_icmp_echo_req(struct wiphy *wiphy, struct wireless_dev *wdev, + char cmd_str[VNDR_CMD_STR_NUM][VNDR_CMD_STR_MAX_LEN], + long *cmd_val); +int inff_cfg80211_vndr_cmds_str(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len); +int inff_cfg80211_vndr_cmds_config_pfn(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len); +int inff_cfg80211_vndr_cmds_get_pfn_status(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len); +int inff_cfg80211_vndr_cmds_mchan_config(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len); +int inff_cfg80211_vndr_cmds_ssid_prot(struct wiphy *wiphy, + struct wireless_dev *wdev, const void *data, int len); +int inff_cfg80211_vndr_evt_icmp_echo_req(struct wiphy *wiphy, struct wireless_dev *wdev, + const void *data, int len); + +#endif /* INFF_VENDOR_INF_H */ -- 2.25.1 Driver implementation to collect the EHT capabilities of the Device and register it to the wiphy capabilities in the cfg80211 driver. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/eht.c | 86 +++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/eht.h | 42 ++++++++++ 2 files changed, 128 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/eht.c create mode 100644 drivers/net/wireless/infineon/inffmac/eht.h diff --git a/drivers/net/wireless/infineon/inffmac/eht.c b/drivers/net/wireless/infineon/inffmac/eht.c new file mode 100644 index 000000000000..78434c10713b --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/eht.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include "bus.h" +#include "chip.h" +#include "eht.h" +#include "feature.h" +#include "fwil.h" +#include "cfg80211.h" +#include "debug.h" +#include "xtlv.h" + +int +inff_eht_mlo_get_enable(struct inff_if *ifp, u8 *param, int param_len) +{ + s32 ret = 0; + + ret = inff_fil_xtlv_data_get(ifp, "mlo", INFF_EHT_MLO_CMD_ENAB, param, param_len); + if (unlikely(ret)) + iphy_err(ifp->drvr, "failed to check if EHT MLO is enabled"); + + return ret; +} + +void +inff_eht_update_wiphy_cap(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct wiphy *wiphy = drvr->wiphy; + struct ieee80211_supported_band *band; + struct ieee80211_sband_iftype_data *data; + struct inff_bus *bus_if; + struct inff_chip *ci; + struct inff_chip_mlo_priv *chip_mlo_priv; + u8 eht_mlo_mode; + u8 i; + s32 ret = 0; + + /* EHT MLO mode */ + if (!inff_feat_is_enabled(ifp, INFF_FEAT_MLO)) + return; + ret = inff_eht_mlo_get_enable(ifp, &eht_mlo_mode, sizeof(eht_mlo_mode)); + if (ret || !eht_mlo_mode) + return; + bus_if = drvr->bus_if; + ci = bus_if->chip_pub; + chip_mlo_priv = &ci->chip_mlo_priv; + + if (!chip_mlo_priv->get_eht_cap) + return; + + inff_dbg(INFO, "EHT MLO Enabled\n"); + + /* Update HE Capab for each Band */ + for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { + band = wiphy->bands[i]; + if (!band) + continue; + + data = (struct ieee80211_sband_iftype_data *)band->iftype_data; + + switch (band->band) { + case NL80211_BAND_6GHZ: + if (!inff_feat_is_6ghz_enabled(ifp)) + break; + /* Band 6GHz supports EHT, so */ + fallthrough; + + case NL80211_BAND_5GHZ: + /* Band 5GHz supports EHT, so */ + fallthrough; + + case NL80211_BAND_2GHZ: + /* Band 2.4GHz supports EHT, so */ + chip_mlo_priv->get_eht_cap(band, data); + break; + + default: + break; + } + } +} diff --git a/drivers/net/wireless/infineon/inffmac/eht.h b/drivers/net/wireless/infineon/inffmac/eht.h new file mode 100644 index 000000000000..a2850f1a8bfc --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/eht.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_EHT_H +#define INFF_EHT_H + +#include "core.h" + +#define EHT_MAC_CAP_INFO_SIZE 2 +#define EHT_PHY_CAP_INFO_SIZE 9 + +/** + * enum inff_eht_mlo_cmd - EHT iovar subcmds handled by firmware EHT module + */ +/* EHT MLO sub command IDs */ +enum inff_eht_mlo_cmd { + INFF_EHT_MLO_CMD_ENAB = 0, /* enable/disable MLO feature as a whole */ + INFF_EHT_MLO_CMD_CONFIG, /* configure MLO feature - bsscfg specific */ + INFF_EHT_MLO_CMD_STATUS, /* status on MLO feature - interface specific */ + INFF_EHT_MLO_CMD_EMLSR_CTRL, /* emlsr control - interface specific */ + INFF_EHT_MLO_CMD_TID_MAP, /* configure TID-To-Link Mapping */ + INFF_EHT_MLO_CMD_CAP, /* capability of MLO feature as a whole */ + INFF_EHT_MLO_CMD_ACT_LINK_BMAP, /* Set active link for MLO TX and RX */ + INFF_EHT_MLO_CMD_MULTILINK_ACTIVE, /* Set use of multi links in MLO mode */ + INFF_EHT_MLO_CMD_LINK_PS_BMAP, /* Modify PS state of a particular link in MLO. */ + INFF_EHT_MLO_CMD_LINK_DORMANT_BMAP, /* Bitmap to configure dormant state for links */ + INFF_EHT_MLO_CMD_REC_LINK_BMAP, /* Bitmap to configure recommended links */ + INFF_EHT_MLO_CMD_CONFIG_PREF, /* Configure mlo mode and band preferences */ + INFF_EHT_MLO_CMD_MAX_MLO_LINKS, /* set/get max MLO links supported */ + INFF_EHT_MLO_CMD_FEATURE_EN, /* Enable/Disable a given feature */ + INFF_EHT_MLO_CMD_NPLINK_CONFIG, /* configure nplink op upon offchannel of plink */ + INFF_EHT_MLO_CMD_STATS, /* stats on MLO feature */ + /* Add new sub command IDs here... */ +}; + +int inff_eht_mlo_get_enable(struct inff_if *ifp, u8 *param, int param_len); +void inff_eht_update_wiphy_cap(struct inff_if *ifp); + +#endif /* INFF_EHT_H */ -- 2.25.1 Implements the driver debugging infrastructure for logging important event or state information in a ring buffer, which is helpful in debugging intermittent issues that happened in the driver if kernel debug mechanisms were not enabled. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/logger.c | 189 ++++++++++++++++++ .../net/wireless/infineon/inffmac/logger.h | 133 ++++++++++++ 2 files changed, 322 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/logger.c create mode 100644 drivers/net/wireless/infineon/inffmac/logger.h diff --git a/drivers/net/wireless/infineon/inffmac/logger.c b/drivers/net/wireless/infineon/inffmac/logger.c new file mode 100644 index 000000000000..7a1cf8c90dee --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/logger.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include "core.h" +#include "debug.h" +#include "logger.h" + +/** + * inff_logring_fill() - Dump all the data in the logring. + * + * @drvr: Driver Context. + * @ringid: Logring ID. + */ +void inff_logring_dump(struct inff_pub *drvr, u8 ringid) +{ + struct inff_logger *logger = drvr->logger; + struct inff_logring *logring = &logger->logring[ringid]; + + if (!logring->enabled) + return; + + do { + if (logring->dump_cb) + logring->dump_cb(&logring->buffer[logring->curr], + logring->item_size); + + logring->curr = (logring->curr + logring->item_size) % + (logger->logring_depth * logring->item_size); + } while (logring->curr != logring->head); +} + +/** + * inff_logring_fill() - Fill the logring with data + * + * @drvr: Driver Context. + * @ringid: Logring ID. + * @data: data to be logged. + * @size: size of the data to be logged. + */ +void inff_logring_fill(struct inff_pub *drvr, u8 ringid, u8 *data, u32 size) +{ + struct inff_logger *logger = drvr->logger; + struct inff_logring *logring = &logger->logring[ringid]; + u8 *logring_buf; + + if (!logring->enabled) + return; + + logring_buf = &logring->buffer[logring->curr]; + + memset(logring_buf, 0, logring->item_size); + + /* Log timestamp */ + *(u64 *)logring_buf = (u64)ktime_to_ns(ktime_get_boottime()); + logring_buf += sizeof(u64); + + /* Log Data */ + memcpy(logring_buf, data, size); + + logring->curr = (logring->curr + logring->item_size) % + (logger->logring_depth * logring->item_size); + + /* If logring becomes full, dump the ringitem contents before overwrite */ + if (logring->curr == logring->head) + inff_logring_dump(drvr, ringid); +} + +/** + * inff_logring_init() - Logring Initialization + * + * @drvr: Driver Context. + * @ringid: Logring ID. + * @item_size: Size of individual items in the logring. + * @dump_cb: Callback function that dumps the ring item contents. + * + * Return success or failure. + */ +int inff_logring_init(struct inff_pub *drvr, u8 ringid, u32 item_size, + dump_callback_t dump_cb) +{ + struct inff_logger *logger = drvr->logger; + struct inff_logring *logring = &logger->logring[ringid]; + + if (!(logger->level & BIT(ringid))) + return 0; + + logring->buffer = kzalloc(item_size * logger->logring_depth, GFP_KERNEL); + if (!logring->buffer) + return -ENOMEM; + + logring->item_size = item_size; + logring->curr = 0; + logring->head = 0; + logring->ringid = ringid; + logring->dump_cb = dump_cb; + logring->enabled = true; + + return 0; +} + +/** + * inff_logring_deinit() - Logring De-initialization + * + * @drvr: Driver Context. + * @ringid: Logring ID. + */ +void inff_logring_deinit(struct inff_pub *drvr, u8 ringid) +{ + struct inff_logger *logger = drvr->logger; + struct inff_logring *logring; + + if (!logger || + !(logger->level & BIT(ringid))) + return; + + logring = &logger->logring[ringid]; + + if (logring->enabled) { + logring->enabled = false; + kfree(logring->buffer); + logring->buffer = NULL; + } +} + +/** + * inff_logger_attach() - allocate a context for the logger. + * + * @drvr: driver instance. + * @logger_level: Debug logger log levels. + * @logring_depth: Debug logring depth + * + * Returns success or failure + */ +int inff_logger_attach(struct inff_pub *drvr, u32 logger_level, u32 logring_depth) +{ + struct inff_logger *logger; + s32 ret; + + logger = kzalloc(sizeof(*logger), GFP_KERNEL); + if (!logger) + return -ENOMEM; + + logger->level = logger_level; + logger->logring_depth = logring_depth; + + drvr->logger = logger; + + ret = inff_logring_init(drvr, INFF_LOGRING_FW_CMD_SET, + sizeof(struct inff_logring_fw_cmd_item), NULL); + if (ret) { + inff_err("Logger: FW_CMD_SET logring initialization failed ret=%d\n", ret); + goto detach; + } + + ret = inff_logring_init(drvr, INFF_LOGRING_FW_CMD_GET, + sizeof(struct inff_logring_fw_cmd_item), NULL); + if (ret) { + inff_err("Logger: FW_CMD_GET logring initialization failed ret=%d\n", ret); + goto detach; + } + + return 0; +detach: + inff_logger_detach(drvr); + return ret; +} + +/** + * inff_logger_detach() - Delloaction of the logger context + * + * @drvr: Driver instance. + */ +void inff_logger_detach(struct inff_pub *drvr) +{ + struct inff_logger *logger = drvr->logger; + + if (!logger) + return; + + inff_logring_deinit(drvr, INFF_LOGRING_FW_CMD_SET); + + inff_logring_deinit(drvr, INFF_LOGRING_FW_CMD_GET); + + kfree(logger); + drvr->logger = NULL; +} diff --git a/drivers/net/wireless/infineon/inffmac/logger.h b/drivers/net/wireless/infineon/inffmac/logger.h new file mode 100644 index 000000000000..11b73df5fea7 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/logger.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_LOGGER_H +#define INFF_LOGGER_H + +#include +#include + +/** + * enum inff_logrings - Logring types + * + * @INFF_LOGRING_FW_EVENT - Logs all the events received from the firmware. + * + * @INFF_LOGRING_FW_CMD_SET - Logs all the SET CMD sent to the firmware. + * + * @INFF_LOGRING_FW_CMD_SET - Logs all the GET CMD sent to the firmware. + */ +enum inff_logrings { + INFF_LOGRING_FW_EVENT, + INFF_LOGRING_FW_CMD_SET, + INFF_LOGRING_FW_CMD_GET, + INFF_LOGRING_MAX, +}; + +#define INFF_LOGGER_LEVEL_FW_EVENT BIT(INFF_LOGRING_FW_EVENT) +#define INFF_LOGGER_LEVEL_FW_CMD_SET BIT(INFF_LOGRING_FW_CMD_SET) +#define INFF_LOGGER_LEVEL_FW_CMD_GET BIT(INFF_LOGRING_FW_CMD_GET) + +#define INFF_LOGGER_LEVEL_DEFAULT 0x1 +#define INFF_LOGRING_DEPTH_DEFAULT 32 + +/** + * struct inff_logring_fw_event_item - Firmware event logring item + * + * @timestamp: firmware event received time stamp. + * @emsg: firmware event message. + */ +struct inff_logring_fw_event_item { + u64 timestamp; + struct inff_event_msg emsg; +}; + +/** + * struct inff_logring_fw_cmd_item - Firmware cmd logring item + * + * @timestamp: firmware cmd senti time stamp. + * @cmdid: firmware cmd ID. + * @cmdstr: firmware cmd string. + */ +struct inff_logring_fw_cmd_item { + u64 timestamp; + u32 cmdid; + u8 cmdstr[64]; +}; + +typedef void (*dump_callback_t) (u8 *data, u32 size); + +/** + * struct inff_logring - Debug Log ring structure. + * + * @enabled: Ring is enabled/disabled. + * @ringid: unique identifier of the ring. + * @curr: Current Index in the Ring. + * @head: Head Index in the Ring. + * @item_size: logring item size. + * @buffer: Buffer for holding data. + * @dump_cb: Dump callback function. + */ +struct inff_logring { + bool enabled; + u8 ringid; + u32 curr; + u32 head; + u32 item_size; + u8 *buffer; + dump_callback_t dump_cb; +}; + +/** + * struct inff_logger - Debug Logger structure. + * + * @level: Logging level. + * @logring_depth: Count of no if items in the logring. + * @logring: array of ring buffers for logging the debug info by type. + */ +struct inff_logger { + u32 level; + u32 logring_depth; + struct inff_logring logring[INFF_LOGRING_MAX]; +}; + +#ifdef DEBUG +void inff_logring_dump(struct inff_pub *drvr, u8 ringid); +void inff_logring_fill(struct inff_pub *drvr, u8 ringid, u8 *data, u32 size); +int inff_logring_init(struct inff_pub *drvr, u8 ringid, u32 ring_item_size, + dump_callback_t dump_cb); +void inff_logring_deinit(struct inff_pub *drvr, u8 ringid); +int inff_logger_attach(struct inff_pub *drvr, u32 logger_level, u32 logring_depth); +void inff_logger_detach(struct inff_pub *drvr); +#else +static inline void inff_logring_dump(struct inff_pub *drvr, u8 ringid) +{ +} + +static inline void inff_logring_fill(struct inff_pub *drvr, u8 ringid, u8 *data, u32 size) +{ +} + +static inline int inff_logring_init(struct inff_pub *drvr, u8 ringid, u32 ring_item_size, + dump_callback_t dump_cb) +{ + return 0; +} + +static inline void inff_logring_deinit(struct inff_pub *drvr, u8 ringid) +{ +} + +static inline int inff_logger_attach(struct inff_pub *drvr, u32 logger_level, u32 logring_depth) +{ + return 0; +} + +static inline void inff_logger_detach(struct inff_pub *drvr) +{ +} +#endif /* DEBUG */ + +#endif /* INFF_LOGGER_H */ -- 2.25.1 Driver implementation to collect the EHT capabilities of the Device and register it to the wiphy capabilities in the cfg80211 driver. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/eht.c | 86 +++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/eht.h | 42 ++++++++++ 2 files changed, 128 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/eht.c create mode 100644 drivers/net/wireless/infineon/inffmac/eht.h diff --git a/drivers/net/wireless/infineon/inffmac/eht.c b/drivers/net/wireless/infineon/inffmac/eht.c new file mode 100644 index 000000000000..78434c10713b --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/eht.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include "bus.h" +#include "chip.h" +#include "eht.h" +#include "feature.h" +#include "fwil.h" +#include "cfg80211.h" +#include "debug.h" +#include "xtlv.h" + +int +inff_eht_mlo_get_enable(struct inff_if *ifp, u8 *param, int param_len) +{ + s32 ret = 0; + + ret = inff_fil_xtlv_data_get(ifp, "mlo", INFF_EHT_MLO_CMD_ENAB, param, param_len); + if (unlikely(ret)) + iphy_err(ifp->drvr, "failed to check if EHT MLO is enabled"); + + return ret; +} + +void +inff_eht_update_wiphy_cap(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct wiphy *wiphy = drvr->wiphy; + struct ieee80211_supported_band *band; + struct ieee80211_sband_iftype_data *data; + struct inff_bus *bus_if; + struct inff_chip *ci; + struct inff_chip_mlo_priv *chip_mlo_priv; + u8 eht_mlo_mode; + u8 i; + s32 ret = 0; + + /* EHT MLO mode */ + if (!inff_feat_is_enabled(ifp, INFF_FEAT_MLO)) + return; + ret = inff_eht_mlo_get_enable(ifp, &eht_mlo_mode, sizeof(eht_mlo_mode)); + if (ret || !eht_mlo_mode) + return; + bus_if = drvr->bus_if; + ci = bus_if->chip_pub; + chip_mlo_priv = &ci->chip_mlo_priv; + + if (!chip_mlo_priv->get_eht_cap) + return; + + inff_dbg(INFO, "EHT MLO Enabled\n"); + + /* Update HE Capab for each Band */ + for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { + band = wiphy->bands[i]; + if (!band) + continue; + + data = (struct ieee80211_sband_iftype_data *)band->iftype_data; + + switch (band->band) { + case NL80211_BAND_6GHZ: + if (!inff_feat_is_6ghz_enabled(ifp)) + break; + /* Band 6GHz supports EHT, so */ + fallthrough; + + case NL80211_BAND_5GHZ: + /* Band 5GHz supports EHT, so */ + fallthrough; + + case NL80211_BAND_2GHZ: + /* Band 2.4GHz supports EHT, so */ + chip_mlo_priv->get_eht_cap(band, data); + break; + + default: + break; + } + } +} diff --git a/drivers/net/wireless/infineon/inffmac/eht.h b/drivers/net/wireless/infineon/inffmac/eht.h new file mode 100644 index 000000000000..a2850f1a8bfc --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/eht.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_EHT_H +#define INFF_EHT_H + +#include "core.h" + +#define EHT_MAC_CAP_INFO_SIZE 2 +#define EHT_PHY_CAP_INFO_SIZE 9 + +/** + * enum inff_eht_mlo_cmd - EHT iovar subcmds handled by firmware EHT module + */ +/* EHT MLO sub command IDs */ +enum inff_eht_mlo_cmd { + INFF_EHT_MLO_CMD_ENAB = 0, /* enable/disable MLO feature as a whole */ + INFF_EHT_MLO_CMD_CONFIG, /* configure MLO feature - bsscfg specific */ + INFF_EHT_MLO_CMD_STATUS, /* status on MLO feature - interface specific */ + INFF_EHT_MLO_CMD_EMLSR_CTRL, /* emlsr control - interface specific */ + INFF_EHT_MLO_CMD_TID_MAP, /* configure TID-To-Link Mapping */ + INFF_EHT_MLO_CMD_CAP, /* capability of MLO feature as a whole */ + INFF_EHT_MLO_CMD_ACT_LINK_BMAP, /* Set active link for MLO TX and RX */ + INFF_EHT_MLO_CMD_MULTILINK_ACTIVE, /* Set use of multi links in MLO mode */ + INFF_EHT_MLO_CMD_LINK_PS_BMAP, /* Modify PS state of a particular link in MLO. */ + INFF_EHT_MLO_CMD_LINK_DORMANT_BMAP, /* Bitmap to configure dormant state for links */ + INFF_EHT_MLO_CMD_REC_LINK_BMAP, /* Bitmap to configure recommended links */ + INFF_EHT_MLO_CMD_CONFIG_PREF, /* Configure mlo mode and band preferences */ + INFF_EHT_MLO_CMD_MAX_MLO_LINKS, /* set/get max MLO links supported */ + INFF_EHT_MLO_CMD_FEATURE_EN, /* Enable/Disable a given feature */ + INFF_EHT_MLO_CMD_NPLINK_CONFIG, /* configure nplink op upon offchannel of plink */ + INFF_EHT_MLO_CMD_STATS, /* stats on MLO feature */ + /* Add new sub command IDs here... */ +}; + +int inff_eht_mlo_get_enable(struct inff_if *ifp, u8 *param, int param_len); +void inff_eht_update_wiphy_cap(struct inff_if *ifp); + +#endif /* INFF_EHT_H */ -- 2.25.1 Driver implementation to define tracepoints which can be traced using the Kernel's FTRACE support. Helpful while debugging the funcationalities. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/tracepoint.c | 35 +++++ .../wireless/infineon/inffmac/tracepoint.h | 138 ++++++++++++++++++ 2 files changed, 173 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/tracepoint.c create mode 100644 drivers/net/wireless/infineon/inffmac/tracepoint.h diff --git a/drivers/net/wireless/infineon/inffmac/tracepoint.c b/drivers/net/wireless/infineon/inffmac/tracepoint.c new file mode 100644 index 000000000000..627481ce538b --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/tracepoint.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include /* bug in tracepoint.h, it should include this */ + +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "bus.h" +#include "tracepoint.h" +#include "debug.h" + +void __inff_err(struct inff_bus *bus, const char *func, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + if (bus) + dev_err(bus->dev, "%s: %pV", func, &vaf); + else + pr_err("%s: %pV", func, &vaf); + trace_inff_err(func, &vaf); + va_end(args); +} + +#endif diff --git a/drivers/net/wireless/infineon/inffmac/tracepoint.h b/drivers/net/wireless/infineon/inffmac/tracepoint.h new file mode 100644 index 000000000000..e6e1778574ea --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/tracepoint.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#if !defined(INFF_TRACEPOINT_H) || defined(TRACE_HEADER_MULTI_READ) +#define INFF_TRACEPOINT_H + +#include +#include + +#ifndef CONFIG_INF_TRACING + +#undef TRACE_EVENT +#define TRACE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(...) + +#undef DEFINE_EVENT +#define DEFINE_EVENT(name, proto, ...) \ +static inline void trace_ ## name(proto) {} + +#endif /* CONFIG_INF_TRACING */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM inffmac + +#define MAX_MSG_LEN 100 + +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wsuggest-attribute=format" +#endif + +TRACE_EVENT(inff_err, + TP_PROTO(const char *func, struct va_format *vaf), + TP_ARGS(func, vaf), + TP_STRUCT__entry(__string(func, func) + __vstring(msg, vaf->fmt, vaf->va) + ), + TP_fast_assign(__assign_str(func); + __assign_vstr(msg, vaf->fmt, vaf->va); + ), + TP_printk("%s: %s", __get_str(func), __get_str(msg)) +); + +TRACE_EVENT(inff_dbg, + TP_PROTO(u32 level, const char *func, struct va_format *vaf), + TP_ARGS(level, func, vaf), + TP_STRUCT__entry(__field(u32, level) + __string(func, func) + __vstring(msg, vaf->fmt, vaf->va) + ), + TP_fast_assign(__entry->level = level; + __assign_str(func); + __assign_vstr(msg, vaf->fmt, vaf->va); + ), + TP_printk("%s: %s", __get_str(func), __get_str(msg)) +); + +TRACE_EVENT(inff_hexdump, + TP_PROTO(void *data, size_t len), + TP_ARGS(data, len), + TP_STRUCT__entry(__field(unsigned long, len) + __field(unsigned long, addr) + __dynamic_array(u8, hdata, len) + ), + TP_fast_assign(__entry->len = len; + __entry->addr = (unsigned long)data; + memcpy(__get_dynamic_array(hdata), data, len); + ), + TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len) +); + +TRACE_EVENT(inff_bcdchdr, + TP_PROTO(void *data), + TP_ARGS(data), + TP_STRUCT__entry(__field(u8, flags) + __field(u8, prio) + __field(u8, flags2) + __field(u32, siglen) + __dynamic_array(u8, signal, *((u8 *)data + 3) * 4) + ), + TP_fast_assign(__entry->flags = *(u8 *)data; + __entry->prio = *((u8 *)data + 1); + __entry->flags2 = *((u8 *)data + 2); + __entry->siglen = *((u8 *)data + 3) * 4; + memcpy(__get_dynamic_array(signal), + (u8 *)data + 4, __entry->siglen); + ), + TP_printk("bcdc: prio=%d siglen=%d", __entry->prio, __entry->siglen) +); + +#ifndef SDPCM_RX +#define SDPCM_RX 0 +#endif +#ifndef SDPCM_TX +#define SDPCM_TX 1 +#endif +#ifndef SDPCM_GLOM +#define SDPCM_GLOM 2 +#endif + +TRACE_EVENT(inff_sdpcm_hdr, + TP_PROTO(u8 dir, void *data), + TP_ARGS(dir, data), + TP_STRUCT__entry(__field(u8, dir) + __field(u16, len) + __dynamic_array(u8, hdr, dir == SDPCM_GLOM ? 20 : 12) + ), + TP_fast_assign(memcpy(__get_dynamic_array(hdr), data, dir == SDPCM_GLOM ? 20 : 12); + __entry->len = *(u8 *)data | (*((u8 *)data + 1) << 8); + __entry->dir = dir; + ), + TP_printk("sdpcm: %s len %u, seq %d", + __entry->dir == SDPCM_RX ? "RX" : "TX", + __entry->len, ((u8 *)__get_dynamic_array(hdr))[4]) +); + +#pragma GCC diagnostic pop + +#ifdef CONFIG_INF_TRACING + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE tracepoint + +#include + +#endif /* CONFIG_INF_TRACING */ + +#endif /* INFF_TRACEPOINT_H */ -- 2.25.1 Driver implementation for handling various types of Device firmware files like binaries, NVRAM, CLM BLOB. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/firmware.c | 983 ++++++++++++++++++ .../net/wireless/infineon/inffmac/firmware.h | 108 ++ 2 files changed, 1091 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/firmware.c create mode 100644 drivers/net/wireless/infineon/inffmac/firmware.h diff --git a/drivers/net/wireless/infineon/inffmac/firmware.c b/drivers/net/wireless/infineon/inffmac/firmware.c new file mode 100644 index 000000000000..9ea05c1119d7 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/firmware.c @@ -0,0 +1,983 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "firmware.h" +#include "core.h" +#include "common.h" +#include "chip.h" + +#define INFF_FW_MAX_NVRAM_SIZE 64000 +#define INFF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */ +#define INFF_FW_NVRAM_PCIEDEV_LEN 20 /* pcie/1/4/ + \0 */ +#define INFF_FW_DEFAULT_BOARDREV "boardrev=0xff" +#define INFF_FW_MACADDR_FMT "macaddr=%pM" +#define INFF_FW_MACADDR_LEN (7 + ETH_ALEN * 3) + +enum nvram_parser_state { + ST_IDLE, + ST_KEY, + ST_VALUE, + ST_COMMENT, + ST_END +}; + +/** + * struct nvram_parser - internal info for parser. + * + * @state: current parser state. + * @data: input buffer being parsed. + * @nvram: output buffer with parse result. + * @nvram_len: length of parse result. + * @line: current line. + * @column: current column in line. + * @pos: byte offset in input buffer. + * @entry: start position of key,value entry. + * @multi_dev_v1: detect pcie multi device v1 (compressed). + * @multi_dev_v2: detect pcie multi device v2. + * @boardrev_found: nvram contains boardrev information. + * @strip_mac: strip the MAC address. + */ +struct nvram_parser { + enum nvram_parser_state state; + const u8 *data; + u8 *nvram; + u32 nvram_len; + u32 line; + u32 column; + u32 pos; + u32 entry; + bool multi_dev_v1; + bool multi_dev_v2; + bool boardrev_found; + bool strip_mac; +}; + +/* + * is_nvram_char() - check if char is a valid one for NVRAM entry + * + * It accepts all printable ASCII chars except for '#' which opens a comment. + * Please note that ' ' (space) while accepted is not a valid key name char. + */ +static bool is_nvram_char(char c) +{ + /* comment marker excluded */ + if (c == '#') + return false; + + /* key and value may have any other readable character */ + return (c >= 0x20 && c < 0x7f); +} + +static bool is_whitespace(char c) +{ + return (c == ' ' || c == '\r' || c == '\n' || c == '\t'); +} + +static enum nvram_parser_state inff_nvram_handle_idle(struct nvram_parser *nvp) +{ + char c; + + c = nvp->data[nvp->pos]; + if (c == '\n') + return ST_COMMENT; + if (is_whitespace(c) || c == '\0') + goto proceed; + if (c == '#') + return ST_COMMENT; + if (is_nvram_char(c)) { + nvp->entry = nvp->pos; + return ST_KEY; + } + inff_dbg(INFO, "warning: ln=%d:col=%d: ignoring invalid character\n", + nvp->line, nvp->column); +proceed: + nvp->column++; + nvp->pos++; + return ST_IDLE; +} + +static enum nvram_parser_state inff_nvram_handle_key(struct nvram_parser *nvp) +{ + enum nvram_parser_state st = nvp->state; + char c; + + c = nvp->data[nvp->pos]; + if (c == '=') { + /* ignore RAW1 by treating as comment */ + if (strncmp(&nvp->data[nvp->entry], "RAW1", 4) == 0) + st = ST_COMMENT; + else + st = ST_VALUE; + if (strncmp(&nvp->data[nvp->entry], "devpath", 7) == 0) + nvp->multi_dev_v1 = true; + if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0) + nvp->multi_dev_v2 = true; + if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0) + nvp->boardrev_found = true; + /* strip macaddr if platform MAC overrides */ + if (nvp->strip_mac && + strncmp(&nvp->data[nvp->entry], "macaddr", 7) == 0) + st = ST_COMMENT; + } else if (!is_nvram_char(c) || c == ' ') { + inff_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n", + nvp->line, nvp->column); + return ST_COMMENT; + } + + nvp->column++; + nvp->pos++; + return st; +} + +static enum nvram_parser_state +inff_nvram_handle_value(struct nvram_parser *nvp) +{ + char c; + char *skv; + char *ekv; + u32 cplen; + + c = nvp->data[nvp->pos]; + if (!is_nvram_char(c)) { + /* key,value pair complete */ + ekv = (u8 *)&nvp->data[nvp->pos]; + skv = (u8 *)&nvp->data[nvp->entry]; + cplen = ekv - skv; + if (nvp->nvram_len + cplen + 1 >= INFF_FW_MAX_NVRAM_SIZE) + return ST_END; + /* copy to output buffer */ + memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen); + nvp->nvram_len += cplen; + nvp->nvram[nvp->nvram_len] = '\0'; + nvp->nvram_len++; + return ST_IDLE; + } + nvp->pos++; + nvp->column++; + return ST_VALUE; +} + +static enum nvram_parser_state +inff_nvram_handle_comment(struct nvram_parser *nvp) +{ + char *eoc, *sol; + + sol = (char *)&nvp->data[nvp->pos]; + eoc = strchr(sol, '\n'); + if (!eoc) { + eoc = strchr(sol, '\0'); + if (!eoc) + return ST_END; + } + + /* eat all moving to next line */ + nvp->line++; + nvp->column = 1; + nvp->pos += (eoc - sol) + 1; + return ST_IDLE; +} + +static enum nvram_parser_state inff_nvram_handle_end(struct nvram_parser *nvp) +{ + /* final state */ + return ST_END; +} + +static enum nvram_parser_state +(*nv_parser_states[])(struct nvram_parser *nvp) = { + inff_nvram_handle_idle, + inff_nvram_handle_key, + inff_nvram_handle_value, + inff_nvram_handle_comment, + inff_nvram_handle_end +}; + +static int inff_init_nvram_parser(struct nvram_parser *nvp, + const u8 *data, size_t data_len) +{ + size_t size; + + memset(nvp, 0, sizeof(*nvp)); + nvp->data = data; + /* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */ + if (data_len > INFF_FW_MAX_NVRAM_SIZE) + size = INFF_FW_MAX_NVRAM_SIZE; + else + size = data_len; + /* Add space for properties we may add */ + size += strlen(INFF_FW_DEFAULT_BOARDREV) + 1; + size += INFF_FW_MACADDR_LEN + 1; + /* Alloc for extra 0 byte + roundup by 4 + length field */ + size += 1 + 3 + sizeof(u32); + nvp->nvram = kzalloc(size, GFP_KERNEL); + if (!nvp->nvram) + return -ENOMEM; + + nvp->line = 1; + nvp->column = 1; + return 0; +} + +/* inff_fw_strip_multi_v1 :Some nvram files contain settings for multiple + * devices. Strip it down for one device, use domain_nr/bus_nr to determine + * which data is to be returned. v1 is the version where nvram is stored + * compressed and "devpath" maps to index for valid entries. + */ +static void inff_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr, + u16 bus_nr) +{ + /* Device path with a leading '=' key-value separator */ + char pci_path[20]; + size_t pci_len; + char pcie_path[20]; + size_t pcie_len; + + u32 i, j; + bool found; + u8 *nvram; + u8 id; + + nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL); + if (!nvram) + goto fail; + + /* min length: devpath0=pcie/1/4/ + 0:x=y */ + if (nvp->nvram_len < INFF_FW_NVRAM_DEVPATH_LEN + 6) + goto fail; + + /* First search for the devpathX and see if it is the configuration + * for domain_nr/bus_nr. Search complete nvp + */ + snprintf(pci_path, sizeof(pci_path), "=pci/%d/%d", domain_nr, + bus_nr); + pci_len = strlen(pci_path); + snprintf(pcie_path, sizeof(pcie_path), "=pcie/%d/%d", domain_nr, + bus_nr); + pcie_len = strlen(pcie_path); + found = false; + i = 0; + while (i < nvp->nvram_len - INFF_FW_NVRAM_DEVPATH_LEN) { + /* Format: devpathX=pcie/Y/Z/ + * Y = domain_nr, Z = bus_nr, X = virtual ID + */ + if (strncmp(&nvp->nvram[i], "devpath", 7) == 0 && + (!strncmp(&nvp->nvram[i + 8], pci_path, pci_len) || + !strncmp(&nvp->nvram[i + 8], pcie_path, pcie_len))) { + id = nvp->nvram[i + 7] - '0'; + found = true; + break; + } + while (nvp->nvram[i] != 0) + i++; + i++; + } + if (!found) + goto fail; + + /* Now copy all valid entries, release old nvram and assign new one */ + i = 0; + j = 0; + while (i < nvp->nvram_len) { + if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) { + i += 2; + if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0) + nvp->boardrev_found = true; + while (nvp->nvram[i] != 0) { + nvram[j] = nvp->nvram[i]; + i++; + j++; + } + nvram[j] = 0; + j++; + } + while (nvp->nvram[i] != 0) + i++; + i++; + } + kfree(nvp->nvram); + nvp->nvram = nvram; + nvp->nvram_len = j; + return; + +fail: + kfree(nvram); + nvp->nvram_len = 0; +} + +/* inff_fw_strip_multi_v2 :Some nvram files contain settings for multiple + * devices. Strip it down for one device, use domain_nr/bus_nr to determine + * which data is to be returned. v2 is the version where nvram is stored + * uncompressed, all relevant valid entries are identified by + * pcie/domain_nr/bus_nr: + */ +static void inff_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr, + u16 bus_nr) +{ + char prefix[INFF_FW_NVRAM_PCIEDEV_LEN]; + size_t len; + u32 i, j; + u8 *nvram; + + nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL); + if (!nvram) { + nvp->nvram_len = 0; + return; + } + + /* Copy all valid entries, release old nvram and assign new one. + * Valid entries are of type pcie/X/Y/ where X = domain_nr and + * Y = bus_nr. + */ + snprintf(prefix, sizeof(prefix), "pcie/%d/%d/", domain_nr, bus_nr); + len = strlen(prefix); + i = 0; + j = 0; + while (i < nvp->nvram_len - len) { + if (strncmp(&nvp->nvram[i], prefix, len) == 0) { + i += len; + if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0) + nvp->boardrev_found = true; + while (nvp->nvram[i] != 0) { + nvram[j] = nvp->nvram[i]; + i++; + j++; + } + nvram[j] = 0; + j++; + } + while (nvp->nvram[i] != 0) + i++; + i++; + } + kfree(nvp->nvram); + nvp->nvram = nvram; + nvp->nvram_len = j; +} + +static void inff_fw_add_defaults(struct nvram_parser *nvp) +{ + if (nvp->boardrev_found) + return; + + memcpy(&nvp->nvram[nvp->nvram_len], &INFF_FW_DEFAULT_BOARDREV, + strlen(INFF_FW_DEFAULT_BOARDREV)); + nvp->nvram_len += strlen(INFF_FW_DEFAULT_BOARDREV); + nvp->nvram[nvp->nvram_len] = '\0'; + nvp->nvram_len++; +} + +static void inff_fw_add_macaddr(struct nvram_parser *nvp, u8 *mac) +{ + int len; + + len = scnprintf(&nvp->nvram[nvp->nvram_len], INFF_FW_MACADDR_LEN + 1, + INFF_FW_MACADDR_FMT, mac); + WARN_ON(len != INFF_FW_MACADDR_LEN); + nvp->nvram_len += len + 1; +} + +/* inff_nvram_strip :Takes a buffer of "=\n" lines read from a fil + * and ending in a NUL. Removes carriage returns, empty lines, comment lines, + * and converts newlines to NULs. Shortens buffer as needed and pads with NULs. + * End of buffer is completed with token identifying length of buffer. + */ +static void *inff_fw_nvram_strip(const u8 *data, size_t data_len, + u32 *new_length, u16 domain_nr, u16 bus_nr, + struct device *dev) +{ + struct nvram_parser nvp; + u32 pad; + u32 token; + __le32 token_le; + u8 mac[ETH_ALEN]; + + if (inff_init_nvram_parser(&nvp, data, data_len) < 0) + return NULL; + + if (eth_platform_get_mac_address(dev, mac) == 0) + nvp.strip_mac = true; + + while (nvp.pos < data_len) { + nvp.state = nv_parser_states[nvp.state](&nvp); + if (nvp.state == ST_END) + break; + } + if (nvp.multi_dev_v1) { + nvp.boardrev_found = false; + inff_fw_strip_multi_v1(&nvp, domain_nr, bus_nr); + } else if (nvp.multi_dev_v2) { + nvp.boardrev_found = false; + inff_fw_strip_multi_v2(&nvp, domain_nr, bus_nr); + } + + if (nvp.nvram_len == 0) { + kfree(nvp.nvram); + return NULL; + } + + inff_fw_add_defaults(&nvp); + + if (nvp.strip_mac) + inff_fw_add_macaddr(&nvp, mac); + + pad = nvp.nvram_len; + *new_length = roundup(nvp.nvram_len + 1, 4); + while (pad != *new_length) { + nvp.nvram[pad] = 0; + pad++; + } + + token = *new_length / 4; + token = (~token << 16) | (token & 0x0000FFFF); + token_le = cpu_to_le32(token); + + memcpy(&nvp.nvram[*new_length], &token_le, sizeof(token_le)); + *new_length += sizeof(token_le); + + return nvp.nvram; +} + +void inff_fw_nvram_free(void *nvram) +{ + kfree(nvram); +} + +struct inff_fw { + struct device *dev; + struct inff_fw_request *req; + u32 curpos; + unsigned int board_index; + void (*done)(struct device *dev, int err, struct inff_fw_request *req); +}; + +#ifdef CONFIG_EFI +/* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV" + * to specify "worldwide" compatible settings, but these 2 ccode-s do not work + * properly. "ccode=ALL" causes channels 12 and 13 to not be available, + * "ccode=XV" causes all 5GHz channels to not be available. So we replace both + * with "ccode=X2" which allows channels 12+13 and 5Ghz channels in + * no-Initiate-Radiation mode. This means that we will never send on these + * channels without first having received valid wifi traffic on the channel. + */ +static void inff_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len) +{ + char *ccode; + + ccode = strnstr((char *)data, "ccode=ALL", data_len); + if (!ccode) + ccode = strnstr((char *)data, "ccode=XV\r", data_len); + if (!ccode) + return; + + ccode[6] = 'X'; + ccode[7] = '2'; + ccode[8] = '\r'; +} + +static u8 *inff_fw_nvram_from_efi(size_t *data_len_ret) +{ + efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, + 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); + unsigned long data_len = 0; + efi_status_t status; + u8 *data = NULL; + + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return NULL; + + status = efi.get_variable(L"nvram", &guid, NULL, &data_len, NULL); + if (status != EFI_BUFFER_TOO_SMALL) + goto fail; + + data = kmalloc(data_len, GFP_KERNEL); + if (!data) + goto fail; + + status = efi.get_variable(L"nvram", &guid, NULL, &data_len, data); + if (status != EFI_SUCCESS) + goto fail; + + inff_fw_fix_efi_nvram_ccode(data, data_len); + inff_info("Using nvram EFI variable\n"); + + *data_len_ret = data_len; + return data; +fail: + kfree(data); + return NULL; +} +#else +static inline u8 *inff_fw_nvram_from_efi(size_t *data_len) { return NULL; } +#endif + +static void inff_fw_free_request(struct inff_fw_request *req) +{ + struct inff_fw_item *item; + int i; + + for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) { + if (item->type == INFF_FW_TYPE_BINARY || + item->type == INFF_FW_TYPE_TRXS || + item->type == INFF_FW_TYPE_TRXSE) + release_firmware(item->binary); + else if (item->type == INFF_FW_TYPE_NVRAM) + inff_fw_nvram_free(item->nv_data.data); + } + kfree(req); +} + +static int inff_fw_request_nvram_done(const struct firmware *fw, void *ctx) +{ + struct inff_fw *fwctx = ctx; + struct inff_fw_item *cur; + bool free_bcm47xx_nvram = false; + bool kfree_nvram = false; + u32 nvram_length = 0; + void *nvram = NULL; + u8 *data = NULL; + size_t data_len; + + inff_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); + + cur = &fwctx->req->items[fwctx->curpos]; + + if (fw && fw->data) { + data = (u8 *)fw->data; + data_len = fw->size; + } else { + data = inff_fw_nvram_from_efi(&data_len); + if (data) + kfree_nvram = true; + else if (!(cur->flags & INFF_FW_REQF_OPTIONAL)) + goto fail; + } + + if (data) + nvram = inff_fw_nvram_strip(data, data_len, &nvram_length, + fwctx->req->domain_nr, + fwctx->req->bus_nr, + fwctx->dev); + + if (free_bcm47xx_nvram) + bcm47xx_nvram_release_contents(data); + if (kfree_nvram) + kfree(data); + + release_firmware(fw); + if (!nvram && !(cur->flags & INFF_FW_REQF_OPTIONAL)) + goto fail; + + inff_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length); + cur->nv_data.data = nvram; + cur->nv_data.len = nvram_length; + return 0; + +fail: + return -ENOENT; +} + +static int inff_fw_complete_request(const struct firmware *fw, + struct inff_fw *fwctx) +{ + struct inff_fw_item *cur = &fwctx->req->items[fwctx->curpos]; + int ret = 0; + + inff_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not "); + + switch (cur->type) { + case INFF_FW_TYPE_NVRAM: + ret = inff_fw_request_nvram_done(fw, fwctx); + break; + case INFF_FW_TYPE_BINARY: + case INFF_FW_TYPE_TRXSE: + case INFF_FW_TYPE_TRXS: + case INFF_FW_TYPE_CLM: + if (fw) + cur->binary = fw; + else + ret = -ENOENT; + break; + default: + /* something fishy here so bail out early */ + inff_err("unknown fw type: %d\n", cur->type); + release_firmware(fw); + ret = -EINVAL; + } + + return (cur->flags & INFF_FW_REQF_OPTIONAL) ? 0 : ret; +} + +static char *inff_alt_fw_path(const char *path, const char *board_type) +{ + char base[INFF_FW_NAME_LEN]; + const char *suffix; + char *ret; + + if (!board_type) + return NULL; + + suffix = strrchr(path, '.'); + if (!suffix || suffix == path) + return NULL; + + /* strip extension at the end */ + strscpy(base, path, INFF_FW_NAME_LEN); + base[suffix - path] = 0; + + ret = kasprintf(GFP_KERNEL, "%s.%s%s", base, board_type, suffix); + if (!ret) + inff_err("out of memory allocating firmware path for '%s'\n", + path); + + inff_dbg(TRACE, "FW alt path: %s\n", ret); + + return ret; +} + +static int inff_fw_request_firmware(const struct firmware **fw, + struct inff_fw *fwctx) +{ + struct inff_fw_item *cur = &fwctx->req->items[fwctx->curpos]; + unsigned int i; + int ret; + + /* Files can be board-specific, first try board-specific paths */ + for (i = 0; i < ARRAY_SIZE(fwctx->req->board_types); i++) { + char *alt_path; + + if (!fwctx->req->board_types[i]) + goto fallback; + alt_path = inff_alt_fw_path(cur->path, + fwctx->req->board_types[i]); + if (!alt_path) + goto fallback; + + ret = request_firmware_direct(fw, alt_path, fwctx->dev); + kfree(alt_path); + if (ret) + inff_info("no board-specific nvram available (ret=%d), device will use %s\n", + ret, cur->path); + else + return ret; + } + +fallback: + return request_firmware(fw, cur->path, fwctx->dev); +} + +static void inff_fw_request_done(const struct firmware *fw, void *ctx) +{ + struct inff_fw *fwctx = ctx; + struct inff_fw_item *cur = &fwctx->req->items[fwctx->curpos]; + char alt_path[INFF_FW_NAME_LEN]; + int ret; + + if (!fw && cur->type == INFF_FW_TYPE_TRXS) { + strscpy(alt_path, cur->path, INFF_FW_NAME_LEN); + /* strip 'se' from .trxse at the end */ + //alt_path[strlen(alt_path) - ] = 0; + ret = request_firmware(&fw, alt_path, fwctx->dev); + if (!ret) + cur->path = alt_path; + } + if (!fw && cur->type == INFF_FW_TYPE_TRXSE) { + strscpy(alt_path, cur->path, INFF_FW_NAME_LEN); + /* strip 'se' from .trxse at the end */ + alt_path[strlen(alt_path) - 2] = 0; + ret = request_firmware(&fw, alt_path, fwctx->dev); + if (!ret) + cur->path = alt_path; + } + + ret = inff_fw_complete_request(fw, fwctx); + + while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) { + inff_fw_request_firmware(&fw, fwctx); + ret = inff_fw_complete_request(fw, ctx); + } + + if (ret) { + inff_fw_free_request(fwctx->req); + fwctx->req = NULL; + } + fwctx->done(fwctx->dev, ret, fwctx->req); + kfree(fwctx); +} + +static void inff_fw_request_done_alt_path(const struct firmware *fw, void *ctx) +{ + struct inff_fw *fwctx = ctx; + struct inff_fw_item *first = &fwctx->req->items[0]; + const char *board_type, *alt_path; + int ret = 0; + + if (fw) { + inff_fw_request_done(fw, ctx); + return; + } + + /* Try next board firmware */ + if (fwctx->board_index < ARRAY_SIZE(fwctx->req->board_types)) { + board_type = fwctx->req->board_types[fwctx->board_index++]; + if (!board_type) + goto fallback; + alt_path = inff_alt_fw_path(first->path, board_type); + if (!alt_path) + goto fallback; + + if (!fw) { + ret = request_firmware_nowait(THIS_MODULE, true, alt_path, + fwctx->dev, GFP_KERNEL, fwctx, + inff_fw_request_done_alt_path); + } + kfree(alt_path); + + if (ret < 0) + inff_fw_request_done(fw, ctx); + return; + } + +fallback: + /* Fall back to canonical path if board firmware not found */ + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + inff_fw_request_done); + + if (ret < 0) + inff_fw_request_done(fw, ctx); +} + +static bool inff_fw_request_is_valid(struct inff_fw_request *req) +{ + struct inff_fw_item *item; + int i; + + if (!req->n_items) + return false; + + for (i = 0, item = &req->items[0]; i < req->n_items; i++, item++) { + if (!item->path) + return false; + } + return true; +} + +int inff_fw_get_firmware_sync(struct device *dev, struct inff_fw_request *req, + void (*fw_cb)(struct device *dev, int err, + struct inff_fw_request *req)) +{ + struct inff_fw_item *first = &req->items[0]; + struct inff_fw *fwctx; + char *alt_path = NULL; + const struct firmware *fw; + int ret = -ENOENT; + + inff_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); + + if (!inff_fw_request_is_valid(req)) + return -EINVAL; + + if (req->n_items > 1) + return -EINVAL; + + fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL); + if (!fwctx) + return -ENOMEM; + + fwctx->dev = dev; + fwctx->req = req; + fwctx->done = fw_cb; + + /* First try alternative board-specific path if any */ + if (fwctx->req->board_types[0]) + alt_path = inff_alt_fw_path(first->path, + fwctx->req->board_types[0]); + if (alt_path) { + /* Do not fallback to user-mode helper if file does not exist */ + ret = request_firmware_direct(&fw, alt_path, fwctx->dev); + kfree(alt_path); + } + + if (ret == -ENOENT) + ret = request_firmware(&fw, first->path, fwctx->dev); + + inff_fw_request_done(fw, fwctx); + + return ret; +} + +int inff_fw_get_firmwares(struct device *dev, struct inff_fw_request *req, + void (*fw_cb)(struct device *dev, int err, + struct inff_fw_request *req)) +{ + struct inff_fw_item *first = &req->items[0]; + struct inff_fw *fwctx; + char *alt_path = NULL; + int ret; + + inff_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); + if (!fw_cb) + return -EINVAL; + + if (!inff_fw_request_is_valid(req)) + return -EINVAL; + + fwctx = kzalloc(sizeof(*fwctx), GFP_KERNEL); + if (!fwctx) + return -ENOMEM; + + fwctx->dev = dev; + fwctx->req = req; + fwctx->done = fw_cb; + + /* First try alternative board-specific path if any */ + if (fwctx->req->board_types[0]) + alt_path = inff_alt_fw_path(first->path, + fwctx->req->board_types[0]); + if (alt_path) { + fwctx->board_index++; + ret = request_firmware_nowait(THIS_MODULE, true, alt_path, + fwctx->dev, GFP_KERNEL, fwctx, + inff_fw_request_done_alt_path); + kfree(alt_path); + } else { + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + inff_fw_request_done); + } + if (ret < 0) + inff_fw_request_done(NULL, fwctx); + + return 0; +} + +struct inff_fw_request * +inff_fw_alloc_request(char mp_path[], u32 chip, u32 chiprev, + const struct inff_firmware_mapping mapping_table[], + u32 table_size, struct inff_fw_name *fwnames, + u32 n_fwnames) +{ + struct inff_fw_request *fwreq; + char chipname[12]; + size_t mp_path_len; + u32 i, j; + char end = '\0'; + + if (chiprev >= BITS_PER_TYPE(u32)) { + inff_err("Invalid chip revision %u\n", chiprev); + return NULL; + } + + for (i = 0; i < table_size; i++) { + if (mapping_table[i].chipid == chip && + mapping_table[i].revmask & BIT(chiprev)) + break; + } + + inff_chip_name(chip, chiprev, chipname, sizeof(chipname)); + + if (i == table_size) { + inff_err("Unknown chip %s\n", chipname); + return NULL; + } + + fwreq = kzalloc(struct_size(fwreq, items, n_fwnames), GFP_KERNEL); + if (!fwreq) + return NULL; + + inff_info("using %s for chip %s\n", + mapping_table[i].fw_base, chipname); + + mp_path_len = strnlen(mp_path, INFF_FW_ALTPATH_LEN); + if (mp_path_len) + end = mp_path[mp_path_len - 1]; + + fwreq->n_items = n_fwnames; + + for (j = 0; j < n_fwnames; j++) { + fwreq->items[j].path = fwnames[j].path; + fwnames[j].path[0] = '\0'; + /* check if firmware path is provided by module parameter */ + if (mp_path[0] != '\0') { + strscpy(fwnames[j].path, mp_path, + INFF_FW_NAME_LEN); + + if (end != '/') { + strlcat(fwnames[j].path, "/", + INFF_FW_NAME_LEN); + } + } + strlcat(fwnames[j].path, mapping_table[i].fw_base, + INFF_FW_NAME_LEN); + strlcat(fwnames[j].path, fwnames[j].extension, + INFF_FW_NAME_LEN); + fwreq->items[j].path = fwnames[j].path; + + if (!strncmp(fwnames[j].extension, ".trxse", 6)) { + fwreq->items[j].type = INFF_FW_TYPE_TRXSE; + } else if (!strncmp(fwnames[j].extension, ".trxs", 5)) { + fwreq->items[j].type = INFF_FW_TYPE_TRXS; + } else if (!strncmp(fwnames[j].extension, ".txt", 4)) { + fwreq->items[j].type = INFF_FW_TYPE_NVRAM; + fwreq->items[j].flags = INFF_FW_REQF_OPTIONAL; + } else if (!strncmp(fwnames[j].extension, ".clm_blob", 9)) { + fwreq->items[j].type = INFF_FW_TYPE_CLM; + fwreq->items[j].flags = INFF_FW_REQF_OPTIONAL; + } + } + + return fwreq; +} + +struct inff_fw_request * +inff_prepare_fw_request(char mp_path[], struct inff_chip *ci, + const struct inff_firmware_mapping *name_map, + int map_size, const char *board_type) +{ + struct inff_fw_request *fwreq; + struct inff_fw_name fwnames[INFF_FW_BIN_MAX_TYPE]; + struct inff_chip_specific *chip_spec = &ci->chip_spec; + struct inff_fw_dataset *fw_data = &chip_spec->fwdata[0]; + + if (fw_data[INFF_FW_CODE].fwnames.extension) { + fwnames[INFF_FW_CODE].extension = fw_data[INFF_FW_CODE].fwnames.extension; + fwnames[INFF_FW_CODE].path = fw_data[INFF_FW_CODE].fwnames.path; + } else { + inff_err("chip 0x%x uninitialized with fw code file extension\n", + ci->chip); + return NULL; + } + + if (fw_data[INFF_FW_NVRAM].fwnames.extension) { + fwnames[INFF_FW_NVRAM].extension = fw_data[INFF_FW_NVRAM].fwnames.extension; + fwnames[INFF_FW_NVRAM].path = fw_data[INFF_FW_NVRAM].fwnames.path; + } + + if (fw_data[INFF_FW_CLM].fwnames.extension) { + fwnames[INFF_FW_CLM].extension = fw_data[INFF_FW_CLM].fwnames.extension; + fwnames[INFF_FW_CLM].path = fw_data[INFF_FW_CLM].fwnames.path; + } + + fwreq = inff_fw_alloc_request(mp_path, ci->chip, ci->chiprev, name_map, + map_size, fwnames, ARRAY_SIZE(fwnames)); + if (!fwreq) + return NULL; + + fwreq->board_types[0] = board_type; + + return fwreq; +} diff --git a/drivers/net/wireless/infineon/inffmac/firmware.h b/drivers/net/wireless/infineon/inffmac/firmware.h new file mode 100644 index 000000000000..444a463b8140 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/firmware.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FIRMWARE_H +#define INFF_FIRMWARE_H + +#define INFF_FW_REQF_OPTIONAL 0x0001 + +#define INFF_FW_NAME_LEN 320 + +#define INFF_FW_MAX_BOARD_TYPES 8 + +#define INFF_FW_DEFAULT_PATH "infineon/" + +/* forward declaration */ +struct inff_chip; + +/** + * struct inff_firmware_mapping - Used to map chipid/revmask to firmware + * filename and nvram filename. Each bus type implementation should create + * a table of firmware mappings (using the macros defined below). + * + * @chipid: ID of chip. + * @revmask: bitmask of revisions, e.g. 0x10 means rev 4 only, 0xf means rev 0-3 + * @fw: name of the firmware file. + * @nvram: name of nvram file. + */ +struct inff_firmware_mapping { + u32 chipid; + u32 revmask; + const char *fw_base; +}; + +/* Firmware and Country Local Matrix files */ +#define INFF_FW_ENTRY(chipid, mask, name) \ + { chipid, mask, INFF_ ## name ## _FIRMWARE_BASENAME } + +void inff_fw_nvram_free(void *nvram); + +enum inff_fw_type { + INFF_FW_TYPE_BINARY, + INFF_FW_TYPE_NVRAM, + INFF_FW_TYPE_TRXSE, + INFF_FW_TYPE_TRXS, + INFF_FW_TYPE_CLM, +}; + +struct inff_fw_item { + const char *path; + enum inff_fw_type type; + u16 flags; + union { + const struct firmware *binary; + struct { + void *data; + u32 len; + } nv_data; + }; +}; + +struct inff_fw_request { + u16 domain_nr; + u16 bus_nr; + u32 n_items; + const char *board_types[INFF_FW_MAX_BOARD_TYPES]; + struct inff_fw_item items[] __counted_by(n_items); +}; + +struct inff_fw_name { + const char *extension; + char *path; +}; + +struct inff_fw_request * +inff_fw_alloc_request(char mp_path[], u32 chip, u32 chiprev, + const struct inff_firmware_mapping mapping_table[], + u32 table_size, struct inff_fw_name *fwnames, + u32 n_fwnames); + +/* + * Request firmware(s) asynchronously. When the asynchronous request + * fails it will not use the callback, but call device_release_driver() + * instead which will call the driver .remove() callback. + */ +int inff_fw_get_firmwares(struct device *dev, struct inff_fw_request *req, + void (*fw_cb)(struct device *dev, int err, + struct inff_fw_request *req)); + +/** + * Request single firmware synchronously. + * Callback is called on a valid request + * whether it succeeds or not. + */ +int inff_fw_get_firmware_sync(struct device *dev, struct inff_fw_request *req, + void (*fw_cb)(struct device *dev, int err, + struct inff_fw_request *req)); + +struct inff_fw_request * +inff_prepare_fw_request(char mp_path[], struct inff_chip *ci, + const struct inff_firmware_mapping *name_map, + int map_size, const char *board_type); + +#endif /* INFF_FIRMWARE_H */ -- 2.25.1 Driver implementation to detect and maintain the feature capabilities of the currently loaded Device firmware. All other driver source files utilizes these firmware feature flags for finding if the firmware is supporting a specific operation or functionality. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/feature.c | 377 ++++++++++++++++++ .../net/wireless/infineon/inffmac/feature.h | 168 ++++++++ 2 files changed, 545 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/feature.c create mode 100644 drivers/net/wireless/infineon/inffmac/feature.h diff --git a/drivers/net/wireless/infineon/inffmac/feature.c b/drivers/net/wireless/infineon/inffmac/feature.c new file mode 100644 index 000000000000..dcc6e0d93686 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/feature.c @@ -0,0 +1,377 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include + +#include "hw_ids.h" +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "feature.h" +#include "common.h" +#include "xtlv.h" +#include "twt.h" + +static const char * const inff_feat_names[] = { + "MBSS", + "MCHAN", + "PNO", + "WOWL", + "P2P", + "RSDB", + "TDLS", + "SCAN_RANDOM_MAC", + "WOWL_ND", + "WOWL_GTK", + "WOWL_ARP_ND", + "MFP", + "GSCAN", + "FWSUP", + "MONITOR", + "MONITOR_FLAG", + "MONITOR_FMT_RADIOTAP", + "MONITOR_FMT_HW_RX_HDR", + "DOT11H", + "SAE", + "FWAUTH", + "DUMP_OBSS", + "SCAN_V2", + "PMKID_V2", + "PMKID_V3", + "SURVEY_DUMP", + "SAE_EXT", + "FBT", + "OKC", + "GCMP", + "TWT", + "OFFLOADS", + "ULP", + "PROPTXSTATUS", + "OWE", + "WLAN_SENSE", + "FTM", + "GTKO", + "MCHAN_CONFIG", + "MLO", + "LAST" +}; + +struct inff_feat_fwcap { + enum inff_feat_id feature; + const char * const fwcap_id; +}; + +static const struct inff_feat_fwcap inff_fwcap_map[] = { + { INFF_FEAT_MBSS, "mbss" }, + { INFF_FEAT_MCHAN, "mchan" }, + { INFF_FEAT_P2P, "p2p" }, + { INFF_FEAT_MONITOR, "monitor" }, + { INFF_FEAT_MONITOR_FLAG, "rtap" }, + { INFF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" }, + { INFF_FEAT_DOT11H, "802.11h" }, + { INFF_FEAT_SAE, "sae " }, + { INFF_FEAT_FWAUTH, "idauth" }, + { INFF_FEAT_SAE_EXT, "sae_ext " }, + { INFF_FEAT_FBT, "fbt " }, + { INFF_FEAT_OKC, "okc" }, + { INFF_FEAT_GCMP, "gcmp" }, + { INFF_FEAT_OFFLOADS, "offloads" }, + { INFF_FEAT_ULP, "ulp" }, + { INFF_FEAT_PROPTXSTATUS, "proptxstatus" }, + { INFF_FEAT_OWE, "owe" }, + { INFF_FEAT_WLAN_SENSE, "wlan_sense" }, + { INFF_FEAT_FTM, "ftm" }, + { INFF_FEAT_GTKO, "gtko" }, + { INFF_FEAT_MCHAN_CONFIG, "mchan_config" }, + { INFF_FEAT_MLO, "mlo" }, +}; + +#ifdef DEBUG +static const char * const inff_quirk_names[] = { + "QUIRK_AUTO_AUTH", + "QUIRK_NEED_MPC", + "QUIRK_LAST" +}; + +/** + * inff_feat_debugfs_read() - expose feature info to debugfs. + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + */ +static int inff_feat_debugfs_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + u8 feats[DIV_ROUND_UP(INFF_FEAT_LAST, 8)] = {0}; + u32 quirks = bus_if->drvr->chip_quirks; + int id, i; + u8 size = INFF_FEAT_LAST / 8; + + memcpy(feats, bus_if->drvr->feat_flags, sizeof(feats)); + + seq_puts(seq, "Features: "); + for (i = 0; i < size; i++) + seq_printf(seq, "%02x", feats[i]); + seq_puts(seq, "\n"); + + for (id = 0; id < INFF_FEAT_LAST; id++) + if (feats[id / 8] & BIT(id % 8)) + seq_printf(seq, "\t%s\n", inff_feat_names[id]); + + seq_printf(seq, "\nQuirks: %08x\n", quirks); + for (id = 0; id < INFF_FEAT_QUIRK_LAST; id++) + if (quirks & BIT(id)) + seq_printf(seq, "\t%s\n", inff_quirk_names[id]); + return 0; +} +#else +static int inff_feat_debugfs_read(struct seq_file *seq, void *data) +{ + return 0; +} +#endif /* DEBUG */ + +struct inff_feat_wlcfeat { + u16 min_ver_major; + u16 min_ver_minor; + u32 feat_flags; +}; + +static const struct inff_feat_wlcfeat inff_feat_wlcfeat_map[] = { + { 12, 0, BIT(INFF_FEAT_PMKID_V2) }, + { 13, 0, BIT(INFF_FEAT_PMKID_V3) }, +}; + +static void inff_feat_wlc_version_overrides(struct inff_pub *drv) +{ + struct inff_if *ifp = inff_get_ifp(drv, 0); + struct inff_wlc_version_le ver; + int err, major, minor; + + err = inff_fil_iovar_data_get(ifp, "wlc_ver", &ver, sizeof(ver)); + if (err) + return; + + major = le16_to_cpu(ver.wlc_ver_major); + minor = le16_to_cpu(ver.wlc_ver_minor); + + inff_dbg(INFO, "WLC version: %d.%d\n", major, minor); +} + +/** + * inff_feat_iovar_int_get() - determine feature through iovar query. + * + * @ifp: interface to query. + * @id: feature id. + * @name: iovar name. + */ +static void inff_feat_iovar_int_get(struct inff_if *ifp, + enum inff_feat_id id, char *name) +{ + u32 data; + int err; + + /* we need to know firmware error */ + ifp->fwil_fwerr = true; + + err = inff_fil_iovar_int_get(ifp, name, &data); + if (err != -INFF_FW_UNSUPPORTED) { + inff_dbg(INFO, "enabling feature: %s\n", inff_feat_names[id]); + ifp->drvr->feat_flags[id / 8] |= BIT(id % 8); + } else { + inff_dbg(TRACE, "%s feature check failed: %d\n", + inff_feat_names[id], err); + } + + ifp->fwil_fwerr = false; +} + +static void inff_feat_iovar_enab_get(struct inff_if *ifp, + enum inff_feat_id id, char *name, + u16 subcmd_id) +{ + int err; + u8 val; + + /* we need to know firmware error */ + ifp->fwil_fwerr = true; + + err = inff_fil_xtlv_data_get(ifp, name, subcmd_id, + (void *)&val, sizeof(val)); + + if (!err) { + inff_dbg(INFO, "enabling feature: %s\n", inff_feat_names[id]); + ifp->drvr->feat_flags[id / 8] |= BIT(id % 8); + } else { + inff_dbg(TRACE, "%s feature check failed: %d\n", + inff_feat_names[id], err); + } + + ifp->fwil_fwerr = false; +} + +#define MAX_CAPS_BUFFER_SIZE 768 +static void inff_feat_firmware_capabilities(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + char caps[MAX_CAPS_BUFFER_SIZE]; + enum inff_feat_id id; + int i, err; + + err = inff_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + if (err) { + iphy_err(drvr, "could not get firmware cap (%d)\n", err); + return; + } + + inff_dbg(INFO, "[ %s]\n", caps); + + for (i = 0; i < ARRAY_SIZE(inff_fwcap_map); i++) { + if (strnstr(caps, inff_fwcap_map[i].fwcap_id, sizeof(caps))) { + id = inff_fwcap_map[i].feature; + inff_dbg(INFO, "enabling feature: %s\n", + inff_feat_names[id]); + ifp->drvr->feat_flags[id / 8] |= BIT(id % 8); + } + } +} + +/** + * inff_feat_fwcap_debugfs_read() - expose firmware capabilities to debugfs. + * + * @seq: sequence for debugfs entry. + * @data: raw data pointer. + */ +static int inff_feat_fwcap_debugfs_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_pub *drvr = bus_if->drvr; + struct inff_if *ifp = inff_get_ifp(drvr, 0); + char caps[MAX_CAPS_BUFFER_SIZE + 1] = { }; + char *tmp; + int err; + + err = inff_fil_iovar_data_get(ifp, "cap", caps, sizeof(caps)); + if (err) { + iphy_err(drvr, "could not get firmware cap (%d)\n", err); + return err; + } + + /* Put every capability in a new line */ + for (tmp = caps; *tmp; tmp++) { + if (*tmp == ' ') + *tmp = '\n'; + } + + /* Usually there is a space at the end of capabilities string */ + seq_printf(seq, "%s", caps); + /* So make sure we don't print two line breaks */ + if (tmp > caps && *(tmp - 1) != '\n') + seq_puts(seq, "\n"); + + return 0; +} + +void inff_feat_attach(struct inff_pub *drvr) +{ + struct inff_if *ifp = inff_get_ifp(drvr, 0); + struct inff_pno_macaddr_le pfn_mac; + struct inff_gscan_config gscan_cfg; + u32 wowl_cap; + s32 err; + int i; + + inff_feat_firmware_capabilities(ifp); + memset(&gscan_cfg, 0, sizeof(gscan_cfg)); + inff_feat_iovar_int_get(ifp, INFF_FEAT_PNO, "pfn"); + if (drvr->bus_if->wowl_supported) + inff_feat_iovar_int_get(ifp, INFF_FEAT_WOWL, "wowl"); + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL)) { + err = inff_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap); + if (!err) { + ifp->drvr->feat_flags[INFF_FEAT_WOWL_ARP_ND / 8] |= + BIT(INFF_FEAT_WOWL_ARP_ND % 8); + if (wowl_cap & INFF_WOWL_PFN_FOUND) + ifp->drvr->feat_flags[INFF_FEAT_WOWL_ND / 8] |= + BIT(INFF_FEAT_WOWL_ND % 8); + if (wowl_cap & INFF_WOWL_GTK_FAILURE) + ifp->drvr->feat_flags[INFF_FEAT_WOWL_GTK / 8] |= + BIT(INFF_FEAT_WOWL_GTK % 8); + } + } + + inff_feat_iovar_int_get(ifp, INFF_FEAT_RSDB, "rsdb_mode"); + inff_feat_iovar_int_get(ifp, INFF_FEAT_TDLS, "tdls_enable"); + inff_feat_iovar_int_get(ifp, INFF_FEAT_MFP, "mfp"); + inff_feat_iovar_int_get(ifp, INFF_FEAT_DUMP_OBSS, "dump_obss"); + inff_feat_iovar_int_get(ifp, INFF_FEAT_SURVEY_DUMP, "cca_survey_dump"); + + pfn_mac.version = INFF_PFN_MACADDR_CFG_VER; + err = inff_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac, + sizeof(pfn_mac)); + if (!err) + ifp->drvr->feat_flags[INFF_FEAT_SCAN_RANDOM_MAC / 8] |= + BIT(INFF_FEAT_SCAN_RANDOM_MAC % 8); + + inff_feat_iovar_int_get(ifp, INFF_FEAT_FWSUP, "sup_wpa"); + inff_feat_iovar_int_get(ifp, INFF_FEAT_SCAN_V2, "scan_ver"); + inff_feat_iovar_enab_get(ifp, INFF_FEAT_TWT, "twt", INFF_TWT_CMD_ENAB); + inff_feat_iovar_int_get(ifp, INFF_FEAT_WLAN_SENSE, "csi"); + + for (i = 0; i < INFF_MAX_FEATURE_BYTES; i++) { + if (drvr->settings->feature_disable[i]) { + inff_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n", + ifp->drvr->feat_flags[i], + drvr->settings->feature_disable[i]); + ifp->drvr->feat_flags[i] &= ~drvr->settings->feature_disable[i]; + } + } + + inff_feat_wlc_version_overrides(drvr); +} + +void inff_feat_debugfs_create(struct inff_pub *drvr) +{ + inff_debugfs_add_entry(drvr, "features", inff_feat_debugfs_read); + inff_debugfs_add_entry(drvr, "fwcap", inff_feat_fwcap_debugfs_read); +} + +bool inff_feat_is_enabled(struct inff_if *ifp, enum inff_feat_id id) +{ + return (ifp->drvr->feat_flags[id / 8] & BIT(id % 8)); +} + +bool inff_feat_is_quirk_enabled(struct inff_if *ifp, + enum inff_feat_quirk quirk) +{ + return (ifp->drvr->chip_quirks & BIT(quirk)); +} + +bool inff_feat_is_6ghz_enabled(struct inff_if *ifp) +{ + return (!ifp->drvr->settings->disable_6ghz); +} + +bool inff_feat_is_sdio_rxf_in_kthread(struct inff_pub *drvr) +{ + if (drvr) + return drvr->settings->sdio_rxf_in_kthread_enabled; + else + return false; +} + +bool inff_feat_is_offloads_enabled(struct inff_if *ifp) +{ + if (ifp && ifp->drvr) + return ifp->drvr->settings->offload_prof; + + return false; +} diff --git a/drivers/net/wireless/infineon/inffmac/feature.h b/drivers/net/wireless/infineon/inffmac/feature.h new file mode 100644 index 000000000000..14bbe68b9c0a --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/feature.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FEATURE_H +#define INFF_FEATURE_H + +/* + * Features: + * + * MBSS: multiple BSSID support (eg. guest network in AP mode). + * MCHAN: multi-channel for concurrent P2P. + * PNO: preferred network offload. + * WOWL: Wake-On-WLAN. + * P2P: peer-to-peer + * RSDB: Real Simultaneous Dual Band + * TDLS: Tunneled Direct Link Setup + * SCAN_RANDOM_MAC: Random MAC during (net detect) scheduled scan. + * WOWL_ND: WOWL net detect (PNO) + * WOWL_GTK: WOWL GTK rekey failure detect + * WOWL_ARP_ND: ARP and Neighbor Discovery offload support during WOWL. + * MFP: 802.11w Management Frame Protection. + * GSCAN: enhanced scan offload feature. + * FWSUP: Firmware supplicant. + * MONITOR: firmware can pass monitor packets to host. + * MONITOR_FLAG: firmware flags monitor packets. + * MONITOR_FMT_RADIOTAP: firmware provides monitor packets with radiotap header + * MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header + * DOT11H: firmware supports 802.11h + * SAE: simultaneous authentication of equals + * FWAUTH: Firmware authenticator + * DUMP_OBSS: Firmware has capable to dump obss info to support ACS + * SCAN_V2: Version 2 scan params + * SURVEY_DUMP: Firmware has capable to survey dump info + * SAE_EXT: SAE be handled by userspace supplicant + * GCMP: firmware has defined GCMP or not. + * TWT: Firmware has the TWT Module Support. + * OFFLOADS: Firmware can do the packet processing work offloaded by + * Host Driver, i.e, it can process specific types of RX packets like + * ARP, ND, etc and send out a suitable response packet from within + * Firmware. + * ULP: Firmware supports Ultra Low Power mode of operation. + * WLAN_SENSE: Feature that supports Collecting Channel State Information (WLAN). + * GTKO: GTK rekey offload. + * MLO: IEEE 802.11be MLO operation + */ +enum inff_feat_id { + INFF_FEAT_MBSS, + INFF_FEAT_MCHAN, + INFF_FEAT_PNO, + INFF_FEAT_WOWL, + INFF_FEAT_P2P, + INFF_FEAT_RSDB, + INFF_FEAT_TDLS, + INFF_FEAT_SCAN_RANDOM_MAC, + INFF_FEAT_WOWL_ND, + INFF_FEAT_WOWL_GTK, + INFF_FEAT_WOWL_ARP_ND, + INFF_FEAT_MFP, + INFF_FEAT_GSCAN, + INFF_FEAT_FWSUP, + INFF_FEAT_MONITOR, + INFF_FEAT_MONITOR_FLAG, + INFF_FEAT_MONITOR_FMT_RADIOTAP, + INFF_FEAT_MONITOR_FMT_HW_RX_HDR, + INFF_FEAT_DOT11H, + INFF_FEAT_SAE, + INFF_FEAT_FWAUTH, + INFF_FEAT_DUMP_OBSS, + INFF_FEAT_SCAN_V2, + INFF_FEAT_PMKID_V2, + INFF_FEAT_PMKID_V3, + INFF_FEAT_SURVEY_DUMP, + INFF_FEAT_SAE_EXT, + INFF_FEAT_FBT, + INFF_FEAT_OKC, + INFF_FEAT_GCMP, + INFF_FEAT_TWT, + INFF_FEAT_OFFLOADS, + INFF_FEAT_ULP, + INFF_FEAT_PROPTXSTATUS, + INFF_FEAT_OWE, + INFF_FEAT_WLAN_SENSE, + INFF_FEAT_FTM, + INFF_FEAT_GTKO, + INFF_FEAT_MCHAN_CONFIG, + INFF_FEAT_MLO, + INFF_FEAT_LAST +}; + +/* + * Quirks: + * + * AUTO_AUTH: workaround needed for automatic authentication type. + * NEED_MPC: driver needs to disable MPC during scanning operation. + */ +enum inff_feat_quirk { + INFF_FEAT_QUIRK_AUTO_AUTH, + INFF_FEAT_QUIRK_NEED_MPC, + INFF_FEAT_QUIRK_LAST +}; + +/** + * inff_feat_attach() - determine features and quirks. + * + * @drvr: driver instance. + */ +void inff_feat_attach(struct inff_pub *drvr); + +/** + * inff_feat_debugfs_create() - create debugfs entries. + * + * @drvr: driver instance. + */ +void inff_feat_debugfs_create(struct inff_pub *drvr); + +/** + * inff_feat_is_enabled() - query feature. + * + * @ifp: interface instance. + * @id: feature id to check. + * + * Return: true is feature is enabled; otherwise false. + */ +bool inff_feat_is_enabled(struct inff_if *ifp, enum inff_feat_id id); + +/** + * inff_feat_is_quirk_enabled() - query chip quirk. + * + * @ifp: interface instance. + * @quirk: quirk id to check. + * + * Return: true is quirk is enabled; otherwise false. + */ +bool inff_feat_is_quirk_enabled(struct inff_if *ifp, + enum inff_feat_quirk quirk); + +/** + * inff_feat_is_6ghz_enabled() - Find if 6GHZ Operation is allowed + * + * @ifp: interface instance. + * + * Return: true if 6GHz operation is allowed; otherwise false. + */ +bool inff_feat_is_6ghz_enabled(struct inff_if *ifp); + +/** + * inff_feat_is_sdio_rxf_in_kthread() - handle SDIO Rx frame in kthread. + * + * @drvr: driver instance. + */ +bool inff_feat_is_sdio_rxf_in_kthread(struct inff_pub *drvr); + +/** + * inff_feat_is_offloads_enabled() - Find if offload_prof power profile + * is given by user + * + * @ifp: interface instance. + * + * Return: true if offloads_prof is set otherwise false. + */ +bool inff_feat_is_offloads_enabled(struct inff_if *ifp); + +#endif /* INFF_FEATURE_H */ -- 2.25.1 Driver implementation for doing Device Firmware Updates (DFU) on the WLAN Device. Like for example Connecivity Processor (CP) chipsets have multiple offloaded functionalities, which can be upgraded using this DFU support. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/dfu.c | 416 ++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/dfu.h | 39 ++ 2 files changed, 455 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/dfu.c create mode 100644 drivers/net/wireless/infineon/inffmac/dfu.h diff --git a/drivers/net/wireless/infineon/inffmac/dfu.c b/drivers/net/wireless/infineon/inffmac/dfu.c new file mode 100644 index 000000000000..bf53bdecab9f --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/dfu.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +/* + * New IOT specific chips supports new FW download flow to NON-Volatile and Volatile memory. + * This is different from the regular Combo chip flow which downloads to RAM/SRAM. + * The update will be done to RRAM/FLASH/PSRAM portion of this category of chips. + * This would be done in + * 1. first time update to the product + * 2. dynamic update only to WRLS_Core + * 3. complete Product gets updated + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "defs.h" +#include "bus.h" +#include "sdio.h" +#include "chip.h" +#include "core.h" +#include "common.h" +#include "fwil.h" +#include "feature.h" +#include "dfu.h" + +struct inff_dfu_hex_data { + u32 dest_addr; /* destination addr */ + u8 *ds; /* allocate memory for hex file data */ +}; + +static int inff_dfu_msg_writeb(struct inff_sdio_dev *sdiodev, u8 msg) +{ + int err = 0; + + inff_sdiod_func_wb(sdiodev, sdiodev->func3, SDIOD_H2D_MSG_0, msg, &err); + if (err) { + inff_err("err %d", err); + return -EFAULT; + } + + return 0; +} + +static int inff_dfu_wait_msg(struct inff_sdio_dev *sdiodev, u32 msg) +{ + u32 readmsg = 0; + u32 readycnt = 0; + int err = 0; + + while (readmsg != msg) { + usleep_range(INFF_DFU_ROM_BOOT_DELAY, INFF_DFU_ROM_BOOT_DELAY + 10); + + /* Read the REG to check if desired message is ready */ + readmsg = inff_sdiod_func_rb(sdiodev, sdiodev->func3, SDIOD_D2H_MSG_0, &err); + if (err) { + inff_err("read fail %d", err); + return -EFAULT; + } + + readycnt++; + if (readycnt == INFF_DFU_IORDY_CNT) { + inff_err("msg = 0x%x expected = 0x%x", readmsg, msg); + return -EFAULT; + } + } + + inff_dbg(SDIO, "msg = 0x%x cnt = %d", msg, readycnt); + + return 0; +} + +static u16 inff_dfu_extract_fw_hex_field(const char *fw_data, u16 sp, u16 cn) +{ + char field[8]; + u16 v; + int result; + + strscpy(field, fw_data + sp, cn); + field[cn] = '\0'; + + result = kstrtou16(field, 0, &v); + + if (result) + return v; + + return 0; +} + +static int inff_dfu_get_next_line_len(const u8 *fw_data) +{ + u32 str_len = 0; + u8 *line_end = NULL; + + line_end = strchr(fw_data, '\n'); + if (*fw_data != ':' || !line_end) + return 0; + + str_len = (u32)(line_end - fw_data); + inff_dbg(SDIO, "next line len = %d", str_len); + + return str_len; +} + +static u32 inff_dfu_get_hfd_from_fw_data(const u8 **fw_data, + struct inff_dfu_hex_data *hfd) +{ + int str_len; + u16 num_bytes, addr, data_pos, w, i; + u32 nbytes = 0; + + if (!hfd->ds) { + inff_err("String or data string NULL"); + return nbytes; + } + + while (nbytes == 0) { + str_len = inff_dfu_get_next_line_len(*fw_data); + + if (str_len == 0) { + break; + } else if (str_len > 9) { + num_bytes = inff_dfu_extract_fw_hex_field(*fw_data, 1, 2); + addr = inff_dfu_extract_fw_hex_field(*fw_data, 3, 4); + + data_pos = 7; + + for (i = 0; i < num_bytes; i++) { + w = inff_dfu_extract_fw_hex_field(*fw_data, data_pos, 2); + hfd->ds[i] = (u8)(w & 0x00FF); + data_pos += 2; + } + + hfd->dest_addr = addr; + } + + /* move to next line */ + *fw_data += str_len + 1; + } + + return nbytes; +} + +static int inff_dfu_start_download(struct inff_sdio_dev *sdiodev) +{ + struct inff_dfu_hex_data hfd = {0, NULL}; + const struct firmware *fw = NULL; + char dfu_path[INFF_FW_ALTPATH_LEN] = INFF_FW_DEFAULT_PATH; + const u8 *dfu_data = NULL; + u8 *mem_blk = NULL; + u8 *mem_ptr = NULL; + u32 rd; + int ret = 0; + + inff_dbg(SDIO, "image path = %s", sdiodev->settings->firmware_path); + + strcat(dfu_path, sdiodev->settings->firmware_path); + + ret = request_firmware_direct(&fw, dfu_path, sdiodev->dev); + if (ret) { + inff_err("failed to load file %s ret %d\n", dfu_path, ret); + goto err; + } + + dfu_data = fw->data; + + mem_blk = kmalloc(INFF_DFU_BLK_SIZE + INFF_DFU_SD_ALIGN, GFP_KERNEL); + if (!mem_blk) { + ret = -ENOMEM; + goto err; + } + mem_ptr = mem_blk; + + if ((u32)(uintptr_t)mem_ptr % INFF_DFU_SD_ALIGN) + mem_ptr += (INFF_DFU_SD_ALIGN - ((u32)(uintptr_t)mem_blk % INFF_DFU_SD_ALIGN)); + + hfd.ds = kmalloc(INFF_DFU_MAX_STR_LEN, GFP_KERNEL); + if (!hfd.ds) { + ret = -ENOMEM; + goto err; + } + + /* Wait for ready message from Device */ + ret = inff_dfu_wait_msg(sdiodev, INFF_DFU_D2H_MSG_READY); + if (ret) + goto err; + + while ((rd = inff_dfu_get_hfd_from_fw_data(&dfu_data, &hfd)) > 0) { + u32 wbc; + + inff_dbg(SDIO, "\tread %d bytes at address %08x", rd, hfd.dest_addr); + wbc = 0; + + /* length */ + mem_ptr[0] = rd; + mem_ptr[1] = rd >> 8; + + wbc += 2; + + /* address */ + mem_ptr[2] = hfd.dest_addr; + mem_ptr[3] = hfd.dest_addr >> 8; + mem_ptr[4] = hfd.dest_addr >> 16; + mem_ptr[5] = hfd.dest_addr >> 24; + + wbc += 4; + + inff_dbg(SDIO, "addr :0x%x\n", hfd.dest_addr); + + memcpy(&mem_ptr[wbc], hfd.ds, rd); + wbc += rd; + + inff_dbg(SDIO, "data copied %d bytes\n", rd); + inff_dbg(SDIO, "SDIO PKT size %d bytes\n", wbc); + + ret = inff_sdiod_ramrw(sdiodev, true, INFF_DFU_MEM_OFFSET, mem_ptr, wbc); + if (ret) { + inff_err("error %d on writing %d membytes", ret, rd); + goto err; + } + + ret = inff_dfu_msg_writeb(sdiodev, INFF_DFU_H2D_MSG_DATA); + if (ret) + goto err; + + /* Wait for ready message from Device before next chunk */ + ret = inff_dfu_wait_msg(sdiodev, INFF_DFU_D2H_MSG_READY); + if (ret) + goto err; + + /* clear H2D message to prevent minidriver start to process chunk of data */ + ret = inff_dfu_msg_writeb(sdiodev, 0); + if (ret) + goto err; + } + + ret = inff_dfu_wait_msg(sdiodev, INFF_DFU_D2H_MSG_FW_VALIDAT); + if (ret) + goto err; +err: + release_firmware(fw); + kfree(mem_blk); + kfree(hfd.ds); + return ret; +} + +int inff_dfu_start(struct inff_sdio_dev *sdiodev) +{ + int ret = 0; + + ret = sdio_enable_func(sdiodev->func3); + if (ret) { + inff_err("enable func3 err %d", ret); + /* Disable F3 again */ + sdio_disable_func(sdiodev->func3); + goto fail; + } + + ret = inff_dfu_msg_writeb(sdiodev, INFF_DFU_H2D_MSG_START); + if (ret) + goto fail; + + /* download start */ + ret = inff_dfu_start_download(sdiodev); + if (ret) + goto fail; + + inff_dbg(SDIO, "Download Image Done!"); + + return 0; +fail: + inff_err("Download Image Fail! %d", ret); + + return ret; +} + +static enum fw_upload_err inff_dfu_cp_prepare(struct fw_upload *fw_ul, + const u8 *data, u32 size) +{ + struct inff_dfu *dfu = fw_ul->dd_handle; + struct inff_pub *drvr = dfu->drvr; + int ret = 0; + + dfu->cancel_request = false; + + ret = inff_fil_iovar_data_set(drvr->iflist[0], "dfu_pre_download", NULL, 0); + if (ret) { + inff_err("pre download failed, ret=%d\n", ret); + return FW_UPLOAD_ERR_BUSY; + } + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err inff_dfu_cp_write(struct fw_upload *fw_ul, + const u8 *data, u32 offset, u32 size, u32 *written) +{ + struct inff_dfu *dfu = fw_ul->dd_handle; + struct inff_pub *drvr = dfu->drvr; + int ret = 0; + + if (dfu->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + ret = inff_fil_iovar_data_set(drvr->iflist[0], "dfu_write_size", &size, sizeof(size)); + if (ret) { + inff_err("write size failed, ret=%d\n", ret); + return FW_UPLOAD_ERR_INVALID_SIZE; + } + + ret = inff_fil_iovar_data_set(drvr->iflist[0], "dfu_write_data", data, size); + if (ret) { + inff_err("write data failed, ret=%d\n", ret); + return FW_UPLOAD_ERR_FW_INVALID; + } + + *written = size; + + return FW_UPLOAD_ERR_NONE; +} + +static enum fw_upload_err inff_dfu_cp_poll_complete(struct fw_upload *fw_ul) +{ + struct inff_dfu *dfu = fw_ul->dd_handle; + struct inff_pub *drvr = dfu->drvr; + int ret = 0; + + if (dfu->cancel_request) + return FW_UPLOAD_ERR_CANCELED; + + ret = inff_fil_iovar_data_set(drvr->iflist[0], "dfu_verify", NULL, 0); + if (ret) { + inff_err("verify failed, ret=%d\n", ret); + return FW_UPLOAD_ERR_FW_INVALID; + } + + return FW_UPLOAD_ERR_NONE; +} + +static void inff_dfu_cp_cancel(struct fw_upload *fw_ul) +{ + struct inff_dfu *dfu = fw_ul->dd_handle; + + dfu->cancel_request = true; +} + +static const struct fw_upload_ops inffmac_dfu_cp_ops = { + .prepare = inff_dfu_cp_prepare, + .write = inff_dfu_cp_write, + .poll_complete = inff_dfu_cp_poll_complete, + .cancel = inff_dfu_cp_cancel, +}; + +int inff_dfu_attach(struct inff_pub *drvr) +{ + struct device *dev = drvr->bus_if->dev; + struct inff_dfu *dfu = NULL; + const struct fw_upload_ops *dfu_ops = NULL; + struct fw_upload *fw_ul = NULL; + + switch (drvr->bus_if->chip_pub->socitype) { + case SOCI_AI: + /* TODO */ + return 0; + case SOCI_CP: + dfu_ops = &inffmac_dfu_cp_ops; + break; + default: + inff_err("chip type %u is not supported\n", + drvr->bus_if->chip_pub->socitype); + return -ENODEV; + } + + /* allocate memory for DFU struct */ + dfu = kzalloc(sizeof(*dfu), GFP_KERNEL); + if (!dfu) + return -ENOMEM; + + /* register firmware upload feature */ + fw_ul = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), + dfu_ops, dfu); + if (IS_ERR(fw_ul)) { + kfree(dfu); + return PTR_ERR(fw_ul); + } + + dfu->drvr = drvr; + dfu->fw_ul = fw_ul; + dfu->cancel_request = false; + + drvr->dfu = dfu; + + return 0; +} + +void inff_dfu_detach(struct inff_pub *drvr) +{ + if (!drvr->dfu) + return; + + firmware_upload_unregister(drvr->dfu->fw_ul); + kfree(drvr->dfu); + drvr->dfu = NULL; +} diff --git a/drivers/net/wireless/infineon/inffmac/dfu.h b/drivers/net/wireless/infineon/inffmac/dfu.h new file mode 100644 index 000000000000..a12c8d58ffc9 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/dfu.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_DFU_H +#define INFF_DFU_H + +#define INFF_DFU_ROM_BOOT_DELAY 100 /* in usecs */ +#define INFF_DFU_IORDY_CNT 1000 /* wait for 1000 write/read cycles till F3 is enabled */ + +/* host to device message */ +#define SDIOD_H2D_MSG_0 0x10031 +#define INFF_DFU_H2D_MSG_START BIT(0) +#define INFF_DFU_H2D_MSG_DATA BIT(1) + +/* device to Host message */ +#define SDIOD_D2H_MSG_0 0x10039 +#define INFF_DFU_D2H_MSG_READY BIT(0) +#define INFF_DFU_D2H_MSG_FW_VALIDAT BIT(1) + +#define INFF_DFU_MEM_OFFSET 0x19000000 + +#define INFF_DFU_MAX_STR_LEN 600 +#define INFF_DFU_BLK_SIZE (INFF_DFU_MAX_STR_LEN / 2 + 8) +#define INFF_DFU_SD_ALIGN 32 + +struct inff_dfu { + struct inff_pub *drvr; + struct fw_upload *fw_ul; + bool cancel_request; +}; + +int inff_dfu_start(struct inff_sdio_dev *sdiodev); +int inff_dfu_attach(struct inff_pub *drvr); +void inff_dfu_detach(struct inff_pub *drvr); + +#endif /* INFF_DFU_H */ -- 2.25.1 Provides the generic bus abstraction layer that supports PCIe and SDIO. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/bus.h | 344 ++++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/bus.h diff --git a/drivers/net/wireless/infineon/inffmac/bus.h b/drivers/net/wireless/infineon/inffmac/bus.h new file mode 100644 index 000000000000..322de7de1d6c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/bus.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_BUS_H +#define INFF_BUS_H + +#include +#include +#include +#include +#include "debug.h" +#include "msgbuf.h" + +/* The interval to poll console */ +#define INFF_CONSOLE 500 + +/* The maximum console interval value (5 mins) */ +#define MAX_CONSOLE_INTERVAL (5 * 60 * 1000) + +/* State of bus for communication with the dongle */ +enum inff_bus_state { + INFF_BUS_DOWN, /* Not ready for frame transfers */ + INFF_BUS_UP /* Ready for frame transfers */ +}; + +/* Protocol of bus communication with the dongle */ +enum inff_bus_protocol_type { + INFF_PROTO_BCDC, + INFF_PROTO_MSGBUF +}; + +/* Types of bus communication with the dongle */ +enum inff_bus_type { + INFF_BUSTYPE_SDIO, + INFF_BUSTYPE_USB, + INFF_BUSTYPE_PCIE +}; + +/* Firmware blobs that may be available */ +enum inff_blob_type { + INFF_BLOB_CLM, +}; + +struct inff_mp_device; + +struct inff_bus_dcmd { + char *name; + char *param; + int param_len; + struct list_head list; +}; + +/** + * struct inff_bus_ops - bus callback operations. + * + * @preinit: execute bus/device specific dongle init commands (optional). + * @init: prepare for communication with dongle. + * @stop: clear pending frames, disable data flow. + * @txdata: send a data frame to the dongle. When the data + * has been transferred, the common driver must be + * notified using inff_txcomplete(). The common + * driver calls this function with interrupts + * disabled. + * @txctl: transmit a control request message to dongle. + * @rxctl: receive a control response message from dongle. + * @gettxq: obtain a reference of bus transmit queue (optional). + * @wowl_config: specify if dongle is configured for wowl when going to suspend + * @get_ramsize: obtain size of device memory. + * @get_memdump: obtain device memory dump in provided buffer. + * @get_blob: obtain a firmware blob. + * @remove: initiate unbind of the device. + * @interrupt_enable: enable the interrupts from the device. + * @interrupt_disable: disable the interrupts from the device. + * + * This structure provides an abstract interface towards the + * bus specific driver. For control messages to common driver + * will assure there is only one active transaction. Unless + * indicated otherwise these callbacks are mandatory. + */ +struct inff_bus_ops { + int (*preinit)(struct device *dev); + void (*stop)(struct device *dev); + int (*txdata)(struct device *dev, struct sk_buff *skb); + int (*txctl)(struct device *dev, unsigned char *msg, uint len); + int (*rxctl)(struct device *dev, unsigned char *msg, uint len); + struct pktq * (*gettxq)(struct device *dev); + void (*wowl_config)(struct device *dev, bool enabled); + size_t (*get_ramsize)(struct device *dev); + int (*get_memdump)(struct device *dev, void *data, size_t len); + int (*get_blob)(struct device *dev, const struct firmware **fw, + enum inff_blob_type type); + void (*debugfs_create)(struct device *dev); + int (*reset)(struct device *dev); + void (*remove)(struct device *dev); + int (*set_fcmode)(struct device *dev); + int (*napi_poll)(struct napi_struct *napi, int budget); + void (*interrupt_enable)(struct device *dev); + void (*interrupt_disable)(struct device *dev); +}; + +/** + * struct inff_bus_stats - bus statistic counters. + * + * @pktcowed: packets cowed for extra headroom/unorphan. + * @pktcow_failed: packets dropped due to failed cow-ing. + */ +struct inff_bus_stats { + atomic_t pktcowed; + atomic_t pktcow_failed; +}; + +/** + * struct inff_bt_dev - bt shared SDIO device. + * + * @ bt_data: bt internal structure data + * @ bt_sdio_int_cb: bt registered interrupt callback function + * @ bt_use_count: Counter that tracks whether BT is using the bus + */ +struct inff_bt_dev { + void *bt_data; + void (*bt_sdio_int_cb)(void *data); + u32 use_count; /* Counter for tracking if BT is using the bus */ +}; + +/** + * struct inff_bus - interface structure between common and bus layer + * + * @bus_priv: pointer to private bus device. + * @proto_type: protocol type, bcdc or msgbuf + * @dev: device pointer of bus device. + * @drvr: public driver information. + * @state: operational state of the bus interface. + * @stats: statistics shared between common and bus layer. + * @maxctl: maximum size for rxctl request message. + * @chip: device identifier of the dongle chip. + * @always_use_fws_queue: bus wants use queue also when fwsignal is inactive. + * @wowl_supported: is wowl supported by bus driver. + * @chiprev: revision of the dongle chip. + * @msgbuf: msgbuf protocol parameters provided by bus layer. + * @bt_dev: bt shared SDIO device + */ +struct inff_bus { + union { + struct inff_sdio_dev *sdio; + struct inff_pciedev *pcie; + } bus_priv; + enum inff_bus_protocol_type proto_type; + struct device *dev; + struct inff_pub *drvr; + enum inff_bus_state state; + struct inff_bus_stats stats; + uint maxctl; + u32 chip; + u32 chiprev; + bool always_use_fws_queue; + bool wowl_supported; + + const struct inff_bus_ops *ops; + struct inff_bus_msgbuf *msgbuf; +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + struct inff_bt_if *bt_if; +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + struct inff_chip *chip_pub; +}; + +/* + * callback wrappers + */ +static inline int inff_bus_preinit(struct inff_bus *bus) +{ + if (!bus->ops->preinit) + return 0; + return bus->ops->preinit(bus->dev); +} + +static inline void inff_bus_stop(struct inff_bus *bus) +{ + bus->ops->stop(bus->dev); +} + +static inline int inff_bus_txdata(struct inff_bus *bus, struct sk_buff *skb) +{ + return bus->ops->txdata(bus->dev, skb); +} + +static inline +int inff_bus_txctl(struct inff_bus *bus, unsigned char *msg, uint len) +{ + return bus->ops->txctl(bus->dev, msg, len); +} + +static inline +int inff_bus_rxctl(struct inff_bus *bus, unsigned char *msg, uint len) +{ + return bus->ops->rxctl(bus->dev, msg, len); +} + +static inline +struct pktq *inff_bus_gettxq(struct inff_bus *bus) +{ + if (!bus->ops->gettxq) + return ERR_PTR(-ENOENT); + + return bus->ops->gettxq(bus->dev); +} + +static inline +void inff_bus_wowl_config(struct inff_bus *bus, bool enabled) +{ + if (bus->ops->wowl_config) + bus->ops->wowl_config(bus->dev, enabled); +} + +static inline size_t inff_bus_get_ramsize(struct inff_bus *bus) +{ + if (!bus->ops->get_ramsize) + return 0; + + return bus->ops->get_ramsize(bus->dev); +} + +static inline +int inff_bus_get_memdump(struct inff_bus *bus, void *data, size_t len) +{ + if (!bus->ops->get_memdump) + return -EOPNOTSUPP; + + return bus->ops->get_memdump(bus->dev, data, len); +} + +static inline +int inff_bus_get_blob(struct inff_bus *bus, const struct firmware **fw, + enum inff_blob_type type) +{ + return bus->ops->get_blob(bus->dev, fw, type); +} + +static inline +void inff_bus_debugfs_create(struct inff_bus *bus) +{ + if (!bus->ops->debugfs_create) + return; + + return bus->ops->debugfs_create(bus->dev); +} + +static inline +int inff_bus_reset(struct inff_bus *bus) +{ + if (!bus->ops->reset) + return -EOPNOTSUPP; + + return bus->ops->reset(bus->dev); +} + +static inline void inff_bus_remove(struct inff_bus *bus) +{ + if (!bus->ops->remove) { + device_release_driver(bus->dev); + return; + } + + bus->ops->remove(bus->dev); +} + +static inline +int inff_bus_set_fcmode(struct inff_bus *bus) +{ + if (!bus->ops->set_fcmode) + return -EOPNOTSUPP; + + return bus->ops->set_fcmode(bus->dev); +} + +static inline +void inff_bus_interrupt_enable(struct inff_bus *bus) +{ + if (!bus->ops->interrupt_enable) + return; + + bus->ops->interrupt_enable(bus->dev); +} + +static inline +void inff_bus_interrupt_disable(struct inff_bus *bus) +{ + if (!bus->ops->interrupt_disable) + return; + + bus->ops->interrupt_disable(bus->dev); +} + +/* + * interface functions from common layer + */ + +/* Receive frame for delivery to OS. Callee disposes of rxp. */ +struct sk_buff *inff_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event, + bool inirq); +/* Receive async event packet from firmware. Callee disposes of rxp. */ +void inff_rx_event(struct device *dev, struct sk_buff *rxp); + +int inff_alloc(struct device *dev, struct inff_mp_device *settings); +/* Indication from bus module regarding presence/insertion of dongle. */ +int inff_attach(struct device *dev, bool start_bus); +/* Indication from bus module regarding removal/absence of dongle */ +void inff_detach(struct device *dev); +void inff_free(struct device *dev); +/* Indication from bus module that dongle should be reset */ +void inff_dev_reset(struct device *dev); +/* Request from bus module to initiate a coredump */ +void inff_dev_coredump(struct device *dev); +/* Indication that firmware has halted or crashed */ +void inff_fw_crashed(struct device *dev); + +/* Configure the "global" bus state used by upper layers */ +void inff_bus_change_state(struct inff_bus *bus, enum inff_bus_state state); + +s32 inff_iovar_data_set(struct device *dev, char *name, void *data, u32 len); +void inff_bus_add_txhdrlen(struct device *dev, uint len); +int inff_fwlog_attach(struct device *dev); + +#ifdef CONFIG_INFFMAC_SDIO +void inff_sdio_exit(void); +int inff_sdio_register(void); +#else +static inline void inff_sdio_exit(void) { } +static inline int inff_sdio_register(void) { return 0; } +#endif + +#ifdef CONFIG_INFFMAC_PCIE +void inff_pcie_exit(void); +int inff_pcie_register(void); +#else +static inline void inff_pcie_exit(void) { } +static inline int inff_pcie_register(void) { return 0; } +#endif + +#endif /* INFF_BUS_H */ -- 2.25.1 Driver implementation of the BUS protocol abstraction, which is used for the Control and Data communication with the Infineon's WLAN Device. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/proto.c | 71 +++++++ drivers/net/wireless/infineon/inffmac/proto.h | 192 ++++++++++++++++++ 2 files changed, 263 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/proto.c create mode 100644 drivers/net/wireless/infineon/inffmac/proto.h diff --git a/drivers/net/wireless/infineon/inffmac/proto.c b/drivers/net/wireless/infineon/inffmac/proto.c new file mode 100644 index 000000000000..63f07407dc47 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/proto.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include + +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "proto.h" +#include "bcdc.h" +#include "msgbuf.h" + +int inff_proto_attach(struct inff_pub *drvr) +{ + struct inff_proto *proto; + + inff_dbg(TRACE, "Enter\n"); + + proto = kzalloc(sizeof(*proto), GFP_ATOMIC); + if (!proto) + goto fail; + + drvr->proto = proto; + + if (drvr->bus_if->proto_type == INFF_PROTO_BCDC) { + if (inff_proto_bcdc_attach(drvr)) + goto fail; + } else if (drvr->bus_if->proto_type == INFF_PROTO_MSGBUF) { + if (inff_proto_msgbuf_attach(drvr)) + goto fail; + } else { + iphy_err(drvr, "Unsupported proto type %d\n", + drvr->bus_if->proto_type); + goto fail; + } + if (!proto->tx_queue_data || !proto->hdrpull || + !proto->query_dcmd || !proto->set_dcmd || + !proto->configure_addr_mode || + !proto->delete_peer || !proto->add_tdls_peer || + !proto->debugfs_create) { + iphy_err(drvr, "Not all proto handlers have been installed\n"); + goto fail; + } + return 0; + +fail: + kfree(proto); + drvr->proto = NULL; + return -ENOMEM; +} + +void inff_proto_detach(struct inff_pub *drvr) +{ + inff_dbg(TRACE, "Enter\n"); + + if (drvr->proto) { + if (drvr->bus_if->proto_type == INFF_PROTO_BCDC) + inff_proto_bcdc_detach(drvr); + else if (drvr->bus_if->proto_type == INFF_PROTO_MSGBUF) + inff_proto_msgbuf_detach(drvr); + kfree(drvr->proto); + drvr->proto = NULL; + } +} diff --git a/drivers/net/wireless/infineon/inffmac/proto.h b/drivers/net/wireless/infineon/inffmac/proto.h new file mode 100644 index 000000000000..e072c973ec79 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/proto.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_PROTO_H +#define INFF_PROTO_H + +enum proto_addr_mode { + ADDR_INDIRECT = 0, + ADDR_DIRECT +}; + +struct inff_skb_reorder_data { + u8 *reorder; +}; + +struct inff_proto { + int (*hdrpull)(struct inff_pub *drvr, bool do_fws, + struct sk_buff *skb, struct inff_if **ifp); + int (*query_dcmd)(struct inff_pub *drvr, int ifidx, uint cmd, + void *buf, uint len, int *fwerr); + int (*set_dcmd)(struct inff_pub *drvr, int ifidx, uint cmd, void *buf, + uint len, int *fwerr); + int (*tx_queue_data)(struct inff_pub *drvr, int ifidx, + struct sk_buff *skb); + int (*txdata)(struct inff_pub *drvr, int ifidx, u8 offset, + struct sk_buff *skb); + void (*configure_addr_mode)(struct inff_pub *drvr, int ifidx, + enum proto_addr_mode addr_mode); + void (*delete_peer)(struct inff_pub *drvr, int ifidx, + u8 peer[ETH_ALEN]); + void (*add_tdls_peer)(struct inff_pub *drvr, int ifidx, + u8 peer[ETH_ALEN]); + void (*rxreorder)(struct inff_if *ifp, struct sk_buff *skb, bool inirq); + void (*add_if)(struct inff_if *ifp); + void (*del_if)(struct inff_if *ifp); + void (*reset_if)(struct inff_if *ifp); + void (*cleanup_if)(struct inff_if *ifp); + int (*init_done)(struct inff_pub *drvr); + void (*debugfs_create)(struct inff_pub *drvr); + int (*xdp_init)(struct inff_pub *drvr, struct inff_if *ifp); + void (*xdp_deinit)(struct inff_pub *drvr); + void *pd; +}; + +int inff_proto_attach(struct inff_pub *drvr); +void inff_proto_detach(struct inff_pub *drvr); + +static inline int inff_proto_hdrpull(struct inff_pub *drvr, bool do_fws, + struct sk_buff *skb, + struct inff_if **ifp) +{ + struct inff_if *tmp = NULL; + + /* assure protocol is always called with + * non-null initialized pointer. + */ + if (ifp) + *ifp = NULL; + else + ifp = &tmp; + return drvr->proto->hdrpull(drvr, do_fws, skb, ifp); +} + +static inline int inff_proto_query_dcmd(struct inff_pub *drvr, int ifidx, + uint cmd, void *buf, uint len, + int *fwerr) +{ + return drvr->proto->query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); +} + +static inline int inff_proto_set_dcmd(struct inff_pub *drvr, int ifidx, + uint cmd, void *buf, uint len, + int *fwerr) +{ + return drvr->proto->set_dcmd(drvr, ifidx, cmd, buf, len, fwerr); +} + +static inline int inff_proto_tx_queue_data(struct inff_pub *drvr, int ifidx, + struct sk_buff *skb) +{ + return drvr->proto->tx_queue_data(drvr, ifidx, skb); +} + +static inline int inff_proto_txdata(struct inff_pub *drvr, int ifidx, + u8 offset, struct sk_buff *skb) +{ + return drvr->proto->txdata(drvr, ifidx, offset, skb); +} + +static inline void +inff_proto_configure_addr_mode(struct inff_pub *drvr, int ifidx, + enum proto_addr_mode addr_mode) +{ + drvr->proto->configure_addr_mode(drvr, ifidx, addr_mode); +} + +static inline void +inff_proto_delete_peer(struct inff_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) +{ + drvr->proto->delete_peer(drvr, ifidx, peer); +} + +static inline void +inff_proto_add_tdls_peer(struct inff_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) +{ + drvr->proto->add_tdls_peer(drvr, ifidx, peer); +} + +static inline bool inff_proto_is_reorder_skb(struct sk_buff *skb) +{ + struct inff_skb_reorder_data *rd; + + rd = (struct inff_skb_reorder_data *)skb->cb; + return !!rd->reorder; +} + +static inline void +inff_proto_rxreorder(struct inff_if *ifp, struct sk_buff *skb, bool inirq) +{ + ifp->drvr->proto->rxreorder(ifp, skb, inirq); +} + +static inline void +inff_proto_add_if(struct inff_pub *drvr, struct inff_if *ifp) +{ + if (!drvr->proto->add_if) + return; + drvr->proto->add_if(ifp); +} + +static inline void +inff_proto_del_if(struct inff_pub *drvr, struct inff_if *ifp) +{ + if (!drvr->proto->del_if) + return; + drvr->proto->del_if(ifp); +} + +static inline void +inff_proto_reset_if(struct inff_pub *drvr, struct inff_if *ifp) +{ + if (!drvr->proto->reset_if) + return; + drvr->proto->reset_if(ifp); +} + +static inline void +inff_proto_cleanup_if(struct inff_pub *drvr, struct inff_if *ifp) +{ + if (!drvr->proto->cleanup_if) + return; + drvr->proto->cleanup_if(ifp); +} + +static inline int +inff_proto_init_done(struct inff_pub *drvr) +{ + if (!drvr->proto->init_done) + return 0; + return drvr->proto->init_done(drvr); +} + +static inline void +inff_proto_debugfs_create(struct inff_pub *drvr) +{ + drvr->proto->debugfs_create(drvr); +} + +static inline int +inff_proto_xdp_init(struct inff_pub *drvr, struct inff_if *ifp) +{ + if (!drvr->proto || + !drvr->proto->xdp_init) + return 0; + return drvr->proto->xdp_init(drvr, ifp); +} + +static inline void +inff_proto_xdp_deinit(struct inff_pub *drvr) +{ + if (!drvr->proto || + !drvr->proto->xdp_deinit) + return; + drvr->proto->xdp_deinit(drvr); +} + +#endif /* INFF_PROTO_H */ -- 2.25.1 Driver implementation of the Ring buffers used for Control and Data path communication with the Infineon WLAN Device via the PCIe bus using a shared memory. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/commonring.c | 237 ++++++++++++++++++ .../wireless/infineon/inffmac/commonring.h | 63 +++++ 2 files changed, 300 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/commonring.c create mode 100644 drivers/net/wireless/infineon/inffmac/commonring.h diff --git a/drivers/net/wireless/infineon/inffmac/commonring.c b/drivers/net/wireless/infineon/inffmac/commonring.c new file mode 100644 index 000000000000..16cccdba186e --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/commonring.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include + +#include "utils.h" + +#include "core.h" +#include "commonring.h" +#include "debug.h" + +void inff_commonring_register_cb(struct inff_commonring *commonring, + int (*cr_ring_bell)(void *ctx), + int (*cr_update_rptr)(void *ctx), + int (*cr_update_wptr)(void *ctx), + int (*cr_write_rptr)(void *ctx), + int (*cr_write_wptr)(void *ctx), void *ctx) +{ + commonring->cr_ring_bell = cr_ring_bell; + commonring->cr_update_rptr = cr_update_rptr; + commonring->cr_update_wptr = cr_update_wptr; + commonring->cr_write_rptr = cr_write_rptr; + commonring->cr_write_wptr = cr_write_wptr; + commonring->cr_ctx = ctx; +} + +void inff_commonring_config(struct inff_commonring *commonring, u16 depth, + u16 item_len, void *buf_addr) +{ + commonring->depth = depth; + commonring->item_len = item_len; + commonring->buf_addr = buf_addr; + if (!commonring->inited) { + spin_lock_init(&commonring->lock); + commonring->inited = true; + } + commonring->r_ptr = 0; + if (commonring->cr_write_rptr) + commonring->cr_write_rptr(commonring->cr_ctx); + commonring->w_ptr = 0; + if (commonring->cr_write_wptr) + commonring->cr_write_wptr(commonring->cr_ctx); + commonring->f_ptr = 0; +} + +void inff_commonring_lock(struct inff_commonring *commonring) + __acquires(&commonring->lock) +{ + unsigned long flags; + + spin_lock_irqsave(&commonring->lock, flags); + commonring->flags = flags; +} + +void inff_commonring_unlock(struct inff_commonring *commonring) + __releases(&commonring->lock) +{ + spin_unlock_irqrestore(&commonring->lock, commonring->flags); +} + +bool inff_commonring_write_available(struct inff_commonring *commonring) +{ + u16 available; + bool retry = true; + +again: + if (commonring->r_ptr <= commonring->w_ptr) + available = commonring->depth - commonring->w_ptr + + commonring->r_ptr; + else + available = commonring->r_ptr - commonring->w_ptr; + + if (available > 1) { + if (!commonring->was_full) + return true; + if (available > commonring->depth / 8) { + commonring->was_full = false; + return true; + } + if (retry) { + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + retry = false; + goto again; + } + return false; + } + + if (retry) { + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + retry = false; + goto again; + } + + commonring->was_full = true; + return false; +} + +void *inff_commonring_reserve_for_write(struct inff_commonring *commonring) +{ + void *ret_ptr; + u16 available; + bool retry = true; + +again: + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + + if (commonring->r_ptr <= commonring->w_ptr) { + if (commonring->r_ptr == commonring->w_ptr) + inff_dbg(MSGBUF, "r_ptr == w_ptr"); + available = commonring->depth - commonring->w_ptr + + commonring->r_ptr; + } else { + available = commonring->r_ptr - commonring->w_ptr; + } + + if (available > 1) { + ret_ptr = commonring->buf_addr + + (commonring->w_ptr * commonring->item_len); + commonring->w_ptr++; + if (commonring->w_ptr == commonring->depth) + commonring->w_ptr = 0; + return ret_ptr; + } + + if (retry) { + retry = false; + goto again; + } + + commonring->was_full = true; + return NULL; +} + +void * +inff_commonring_reserve_for_write_multiple(struct inff_commonring *commonring, + u16 n_items, u16 *alloced) +{ + void *ret_ptr; + u16 available; + bool retry = true; + +again: + if (commonring->cr_update_rptr) + commonring->cr_update_rptr(commonring->cr_ctx); + + if (commonring->r_ptr <= commonring->w_ptr) { + if (commonring->r_ptr == commonring->w_ptr) + inff_dbg(MSGBUF, "r_ptr == w_ptr"); + available = commonring->depth - commonring->w_ptr + + commonring->r_ptr; + } else { + available = commonring->r_ptr - commonring->w_ptr; + } + + if (available > 1) { + ret_ptr = commonring->buf_addr + + (commonring->w_ptr * commonring->item_len); + *alloced = min_t(u16, n_items, available - 1); + if (*alloced + commonring->w_ptr > commonring->depth) + *alloced = commonring->depth - commonring->w_ptr; + commonring->w_ptr += *alloced; + if (commonring->w_ptr == commonring->depth) + commonring->w_ptr = 0; + return ret_ptr; + } + + if (retry) { + retry = false; + goto again; + } + + commonring->was_full = true; + return NULL; +} + +int inff_commonring_write_complete(struct inff_commonring *commonring) +{ + if (commonring->f_ptr > commonring->w_ptr) + commonring->f_ptr = 0; + + commonring->f_ptr = commonring->w_ptr; + + if (commonring->cr_write_wptr) + commonring->cr_write_wptr(commonring->cr_ctx); + if (commonring->cr_ring_bell) + return commonring->cr_ring_bell(commonring->cr_ctx); + + return -EIO; +} + +void inff_commonring_write_cancel(struct inff_commonring *commonring, + u16 n_items) +{ + if (commonring->w_ptr == 0) + commonring->w_ptr = commonring->depth - n_items; + else + commonring->w_ptr -= n_items; +} + +void *inff_commonring_get_read_ptr(struct inff_commonring *commonring, + u16 *n_items) +{ + if (commonring->cr_update_wptr) + commonring->cr_update_wptr(commonring->cr_ctx); + + *n_items = (commonring->w_ptr >= commonring->r_ptr) ? + (commonring->w_ptr - commonring->r_ptr) : + (commonring->depth - commonring->r_ptr); + + if (*n_items == 0) + return NULL; + + return commonring->buf_addr + + (commonring->r_ptr * commonring->item_len); +} + +int inff_commonring_read_complete(struct inff_commonring *commonring, + u16 n_items) +{ + commonring->r_ptr += n_items; + if (commonring->r_ptr == commonring->depth) + commonring->r_ptr = 0; + + if (commonring->cr_write_rptr) + return commonring->cr_write_rptr(commonring->cr_ctx); + + return -EIO; +} diff --git a/drivers/net/wireless/infineon/inffmac/commonring.h b/drivers/net/wireless/infineon/inffmac/commonring.h new file mode 100644 index 000000000000..6e40b2b07486 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/commonring.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_COMMONRING_H +#define INFF_COMMONRING_H + +struct inff_commonring { + u16 r_ptr; + u16 w_ptr; + u16 f_ptr; + u16 depth; + u16 item_len; + + void *buf_addr; + + int (*cr_ring_bell)(void *ctx); + int (*cr_update_rptr)(void *ctx); + int (*cr_update_wptr)(void *ctx); + int (*cr_write_rptr)(void *ctx); + int (*cr_write_wptr)(void *ctx); + + void *cr_ctx; + + spinlock_t lock; /* used to protect common ring */ + unsigned long flags; + bool inited; + bool was_full; + + atomic_t outstanding_tx; +}; + +void inff_commonring_register_cb(struct inff_commonring *commonring, + int (*cr_ring_bell)(void *ctx), + int (*cr_update_rptr)(void *ctx), + int (*cr_update_wptr)(void *ctx), + int (*cr_write_rptr)(void *ctx), + int (*cr_write_wptr)(void *ctx), void *ctx); +void inff_commonring_config(struct inff_commonring *commonring, u16 depth, + u16 item_len, void *buf_addr); +void inff_commonring_lock(struct inff_commonring *commonring); +void inff_commonring_unlock(struct inff_commonring *commonring); +bool inff_commonring_write_available(struct inff_commonring *commonring); +void *inff_commonring_reserve_for_write(struct inff_commonring *commonring); +void * +inff_commonring_reserve_for_write_multiple(struct inff_commonring *commonring, + u16 n_items, u16 *alloced); +int inff_commonring_write_complete(struct inff_commonring *commonring); +void inff_commonring_write_cancel(struct inff_commonring *commonring, + u16 n_items); +void *inff_commonring_get_read_ptr(struct inff_commonring *commonring, + u16 *n_items); +int inff_commonring_read_complete(struct inff_commonring *commonring, + u16 n_items); + +#define inff_commonring_n_items(commonring) ((commonring)->depth) +#define inff_commonring_len_item(commonring) ((commonring)->item_len) + +#endif /* INFF_COMMONRING_H */ -- 2.25.1 Driver implementation of the Ring buffers used for TX Data path communication with the Infineon WLAN Device via the PCIe bus using a shared memory. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/flowring.c | 492 ++++++++++++++++++ .../net/wireless/infineon/inffmac/flowring.h | 74 +++ 2 files changed, 566 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/flowring.c create mode 100644 drivers/net/wireless/infineon/inffmac/flowring.h diff --git a/drivers/net/wireless/infineon/inffmac/flowring.c b/drivers/net/wireless/infineon/inffmac/flowring.c new file mode 100644 index 000000000000..5724904e995c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/flowring.c @@ -0,0 +1,492 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include "utils.h" + +#include "core.h" +#include "debug.h" +#include "bus.h" +#include "proto.h" +#include "flowring.h" +#include "msgbuf.h" +#include "common.h" + +#define INFF_FLOWRING_HIGH 1024 +#define INFF_FLOWRING_LOW (INFF_FLOWRING_HIGH - 256) +#define INFF_FLOWRING_INVALID_IFIDX 0xff + +#define INFF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + (fifo) + (ifidx) * 16) +#define INFF_FLOWRING_HASH_STA(fifo, ifidx) ((fifo) + (ifidx) * 16) + +static const u8 inff_flowring_prio2fifo[] = { + 0, + 1, + 1, + 0, + 2, + 2, + 3, + 3 +}; + +static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +static bool +inff_flowring_is_tdls_mac(struct inff_flowring *flow, u8 mac[ETH_ALEN]) +{ + struct inff_flowring_tdls_entry *search; + + search = flow->tdls_entry; + + while (search) { + if (memcmp(search->mac, mac, ETH_ALEN) == 0) + return true; + search = search->next; + } + + return false; +} + +u32 inff_flowring_lookup(struct inff_flowring *flow, u8 da[ETH_ALEN], + u8 prio, u8 ifidx) +{ + struct inff_flowring_hash *hash; + u16 hash_idx; + u32 i; + bool found; + bool sta; + u8 fifo; + u8 *mac; + + fifo = inff_flowring_prio2fifo[prio]; + sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); + mac = da; + if (!sta && (is_multicast_ether_addr(da))) { + mac = (u8 *)ALLFFMAC; + fifo = 0; + } + if ((sta) && flow->tdls_active && + (inff_flowring_is_tdls_mac(flow, da))) { + sta = false; + } + hash_idx = sta ? INFF_FLOWRING_HASH_STA(fifo, ifidx) : + INFF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (INFF_FLOWRING_HASHSIZE - 1); + found = false; + hash = flow->hash; + for (i = 0; i < INFF_FLOWRING_HASHSIZE; i++) { + if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) && + hash[hash_idx].fifo == fifo && + hash[hash_idx].ifidx == ifidx) { + found = true; + break; + } + hash_idx++; + hash_idx &= (INFF_FLOWRING_HASHSIZE - 1); + } + if (found) + return hash[hash_idx].flowid; + + return INFF_FLOWRING_INVALID_ID; +} + +u32 inff_flowring_create(struct inff_flowring *flow, u8 da[ETH_ALEN], + u8 prio, u8 ifidx) +{ + struct inff_flowring_ring *ring; + struct inff_flowring_hash *hash; + u16 hash_idx; + u32 i; + bool found; + u8 fifo; + bool sta; + u8 *mac; + + fifo = inff_flowring_prio2fifo[prio]; + sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); + mac = da; + if (!sta && (is_multicast_ether_addr(da))) { + mac = (u8 *)ALLFFMAC; + fifo = 0; + } + if ((sta) && flow->tdls_active && + (inff_flowring_is_tdls_mac(flow, da))) { + sta = false; + } + hash_idx = sta ? INFF_FLOWRING_HASH_STA(fifo, ifidx) : + INFF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (INFF_FLOWRING_HASHSIZE - 1); + found = false; + hash = flow->hash; + for (i = 0; i < INFF_FLOWRING_HASHSIZE; i++) { + if (hash[hash_idx].ifidx == INFF_FLOWRING_INVALID_IFIDX && + (is_zero_ether_addr(hash[hash_idx].mac))) { + found = true; + break; + } + hash_idx++; + hash_idx &= (INFF_FLOWRING_HASHSIZE - 1); + } + if (found) { + for (i = 0; i < flow->nrofrings; i++) { + if (!flow->rings[i]) + break; + } + if (i == flow->nrofrings) + return -ENOMEM; + + ring = kzalloc(sizeof(*ring), GFP_ATOMIC); + if (!ring) + return -ENOMEM; + + memcpy(hash[hash_idx].mac, mac, ETH_ALEN); + hash[hash_idx].fifo = fifo; + hash[hash_idx].ifidx = ifidx; + hash[hash_idx].flowid = i; + + ring->hash_id = hash_idx; + ring->status = RING_CLOSED; + skb_queue_head_init(&ring->skblist); + flow->rings[i] = ring; + + return i; + } + return INFF_FLOWRING_INVALID_ID; +} + +u8 inff_flowring_tid(struct inff_flowring *flow, u16 flowid) +{ + struct inff_flowring_ring *ring; + + ring = flow->rings[flowid]; + + return flow->hash[ring->hash_id].fifo; +} + +static void inff_flowring_block(struct inff_flowring *flow, u16 flowid, + bool blocked) +{ + struct inff_flowring_ring *ring; + struct inff_bus *bus_if; + struct inff_pub *drvr; + struct inff_if *ifp; + bool currently_blocked; + int i; + u8 ifidx; + unsigned long flags; + + spin_lock_irqsave(&flow->block_lock, flags); + + ring = flow->rings[flowid]; + if (ring->blocked == blocked) { + spin_unlock_irqrestore(&flow->block_lock, flags); + return; + } + ifidx = inff_flowring_ifidx_get(flow, flowid); + + currently_blocked = false; + for (i = 0; i < flow->nrofrings; i++) { + if (flow->rings[i] && i != flowid) { + ring = flow->rings[i]; + if (ring->status == RING_OPEN && + (inff_flowring_ifidx_get(flow, i) == ifidx)) { + if (ring->blocked) { + currently_blocked = true; + break; + } + } + } + } + flow->rings[flowid]->blocked = blocked; + if (currently_blocked) { + spin_unlock_irqrestore(&flow->block_lock, flags); + return; + } + + bus_if = dev_get_drvdata(flow->dev); + drvr = bus_if->drvr; + ifp = inff_get_ifp(drvr, ifidx); + inff_txflowblock_if(ifp, INFF_NETIF_STOP_REASON_FLOW, blocked); + + spin_unlock_irqrestore(&flow->block_lock, flags); +} + +void inff_flowring_delete(struct inff_flowring *flow, u16 flowid) +{ + struct inff_bus *bus_if = dev_get_drvdata(flow->dev); + struct inff_flowring_ring *ring; + struct inff_if *ifp; + u16 hash_idx; + u8 ifidx; + struct sk_buff *skb; + + ring = flow->rings[flowid]; + if (!ring) + return; + + ifidx = inff_flowring_ifidx_get(flow, flowid); + ifp = inff_get_ifp(bus_if->drvr, ifidx); + + inff_flowring_block(flow, flowid, false); + hash_idx = ring->hash_id; + flow->hash[hash_idx].ifidx = INFF_FLOWRING_INVALID_IFIDX; + eth_zero_addr(flow->hash[hash_idx].mac); + flow->rings[flowid] = NULL; + + skb = skb_dequeue(&ring->skblist); + while (skb) { + inff_txfinalize(ifp, skb, false); + skb = skb_dequeue(&ring->skblist); + } + + kfree(ring); +} + +u32 inff_flowring_enqueue(struct inff_flowring *flow, u16 flowid, + struct sk_buff *skb) +{ + struct inff_flowring_ring *ring; + + ring = flow->rings[flowid]; + + skb_queue_tail(&ring->skblist, skb); + + if (!ring->blocked && + (skb_queue_len(&ring->skblist) > INFF_FLOWRING_HIGH)) { + inff_flowring_block(flow, flowid, true); + inff_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid); + /* To prevent (work around) possible race condition, check + * queue len again. It is also possible to use locking to + * protect, but that is undesirable for every enqueue and + * dequeue. This simple check will solve a possible race + * condition if it occurs. + */ + if (skb_queue_len(&ring->skblist) < INFF_FLOWRING_LOW) + inff_flowring_block(flow, flowid, false); + } + return skb_queue_len(&ring->skblist); +} + +struct sk_buff *inff_flowring_dequeue(struct inff_flowring *flow, u16 flowid) +{ + struct inff_flowring_ring *ring; + struct sk_buff *skb; + + ring = flow->rings[flowid]; + if (ring->status != RING_OPEN) + return NULL; + + skb = skb_dequeue(&ring->skblist); + + if (ring->blocked && + (skb_queue_len(&ring->skblist) < INFF_FLOWRING_LOW)) { + inff_flowring_block(flow, flowid, false); + inff_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid); + } + + return skb; +} + +void inff_flowring_reinsert(struct inff_flowring *flow, u16 flowid, + struct sk_buff *skb) +{ + struct inff_flowring_ring *ring; + + ring = flow->rings[flowid]; + + skb_queue_head(&ring->skblist, skb); +} + +u32 inff_flowring_qlen(struct inff_flowring *flow, u16 flowid) +{ + struct inff_flowring_ring *ring; + + ring = flow->rings[flowid]; + if (!ring) + return 0; + + if (ring->status != RING_OPEN) + return 0; + + return skb_queue_len(&ring->skblist); +} + +void inff_flowring_open(struct inff_flowring *flow, u16 flowid) +{ + struct inff_flowring_ring *ring; + + ring = flow->rings[flowid]; + if (!ring) { + inff_err("Ring NULL, for flowid %d\n", flowid); + return; + } + + ring->status = RING_OPEN; +} + +u8 inff_flowring_ifidx_get(struct inff_flowring *flow, u16 flowid) +{ + struct inff_flowring_ring *ring; + u16 hash_idx; + + ring = flow->rings[flowid]; + hash_idx = ring->hash_id; + + return flow->hash[hash_idx].ifidx; +} + +struct inff_flowring *inff_flowring_attach(struct device *dev, u16 nrofrings) +{ + struct inff_flowring *flow; + u32 i; + + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (flow) { + flow->dev = dev; + flow->nrofrings = nrofrings; + spin_lock_init(&flow->block_lock); + for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++) + flow->addr_mode[i] = ADDR_INDIRECT; + for (i = 0; i < ARRAY_SIZE(flow->hash); i++) + flow->hash[i].ifidx = INFF_FLOWRING_INVALID_IFIDX; + flow->rings = kcalloc(nrofrings, sizeof(*flow->rings), + GFP_KERNEL); + if (!flow->rings) { + kfree(flow); + flow = NULL; + } + } + + return flow; +} + +void inff_flowring_detach(struct inff_flowring *flow) +{ + struct inff_bus *bus_if = dev_get_drvdata(flow->dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_flowring_tdls_entry *search; + struct inff_flowring_tdls_entry *remove; + u16 flowid; + + for (flowid = 0; flowid < flow->nrofrings; flowid++) { + if (flow->rings[flowid]) + inff_msgbuf_delete_flowring(drvr, flowid); + } + + search = flow->tdls_entry; + while (search) { + remove = search; + search = search->next; + kfree(remove); + } + kfree(flow->rings); + kfree(flow); +} + +void inff_flowring_configure_addr_mode(struct inff_flowring *flow, int ifidx, + enum proto_addr_mode addr_mode) +{ + struct inff_bus *bus_if = dev_get_drvdata(flow->dev); + struct inff_pub *drvr = bus_if->drvr; + u32 i; + u16 flowid; + + if (flow->addr_mode[ifidx] != addr_mode) { + for (i = 0; i < ARRAY_SIZE(flow->hash); i++) { + if (flow->hash[i].ifidx == ifidx) { + flowid = flow->hash[i].flowid; + if (flow->rings[flowid]->status != RING_OPEN) + continue; + inff_msgbuf_delete_flowring(drvr, flowid); + } + } + flow->addr_mode[ifidx] = addr_mode; + } +} + +void inff_flowring_delete_peer(struct inff_flowring *flow, int ifidx, + u8 peer[ETH_ALEN]) +{ + struct inff_bus *bus_if = dev_get_drvdata(flow->dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_flowring_hash *hash; + struct inff_flowring_tdls_entry *prev; + struct inff_flowring_tdls_entry *search; + u32 i; + u16 flowid; + bool sta; + + sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); + + search = flow->tdls_entry; + prev = NULL; + while (search) { + if (memcmp(search->mac, peer, ETH_ALEN) == 0) { + sta = false; + break; + } + prev = search; + search = search->next; + } + + hash = flow->hash; + for (i = 0; i < INFF_FLOWRING_HASHSIZE; i++) { + if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) && + hash[i].ifidx == ifidx) { + flowid = flow->hash[i].flowid; + if (flow->rings[flowid]->status == RING_OPEN) + inff_msgbuf_delete_flowring(drvr, flowid); + } + } + + if (search) { + if (prev) + prev->next = search->next; + else + flow->tdls_entry = search->next; + kfree(search); + if (!flow->tdls_entry) + flow->tdls_active = false; + } +} + +void inff_flowring_add_tdls_peer(struct inff_flowring *flow, int ifidx, + u8 peer[ETH_ALEN]) +{ + struct inff_flowring_tdls_entry *tdls_entry; + struct inff_flowring_tdls_entry *search; + + tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC); + if (!tdls_entry) + return; + + memcpy(tdls_entry->mac, peer, ETH_ALEN); + tdls_entry->next = NULL; + if (!flow->tdls_entry) { + flow->tdls_entry = tdls_entry; + } else { + search = flow->tdls_entry; + if (memcmp(search->mac, peer, ETH_ALEN) == 0) + goto free_entry; + while (search->next) { + search = search->next; + if (memcmp(search->mac, peer, ETH_ALEN) == 0) + goto free_entry; + } + search->next = tdls_entry; + } + + flow->tdls_active = true; + return; + +free_entry: + kfree(tdls_entry); +} diff --git a/drivers/net/wireless/infineon/inffmac/flowring.h b/drivers/net/wireless/infineon/inffmac/flowring.h new file mode 100644 index 000000000000..ad502aa123f1 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/flowring.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FLOWRING_H +#define INFF_FLOWRING_H + +#define INFF_FLOWRING_HASHSIZE 512 /* has to be 2^x */ +#define INFF_FLOWRING_INVALID_ID 0xFFFFFFFF + +struct inff_flowring_hash { + u8 mac[ETH_ALEN]; + u8 fifo; + u8 ifidx; + u16 flowid; +}; + +enum ring_status { + RING_CLOSED, + RING_CLOSING, + RING_OPEN +}; + +struct inff_flowring_ring { + u16 hash_id; + bool blocked; + enum ring_status status; + struct sk_buff_head skblist; +}; + +struct inff_flowring_tdls_entry { + u8 mac[ETH_ALEN]; + struct inff_flowring_tdls_entry *next; +}; + +struct inff_flowring { + struct device *dev; + struct inff_flowring_hash hash[INFF_FLOWRING_HASHSIZE]; + struct inff_flowring_ring **rings; + spinlock_t block_lock; /* used to protect flow ring */ + enum proto_addr_mode addr_mode[INFF_MAX_IFS]; + u16 nrofrings; + bool tdls_active; + struct inff_flowring_tdls_entry *tdls_entry; +}; + +u32 inff_flowring_lookup(struct inff_flowring *flow, u8 da[ETH_ALEN], + u8 prio, u8 ifidx); +u32 inff_flowring_create(struct inff_flowring *flow, u8 da[ETH_ALEN], + u8 prio, u8 ifidx); +void inff_flowring_delete(struct inff_flowring *flow, u16 flowid); +void inff_flowring_open(struct inff_flowring *flow, u16 flowid); +u8 inff_flowring_tid(struct inff_flowring *flow, u16 flowid); +u32 inff_flowring_enqueue(struct inff_flowring *flow, u16 flowid, + struct sk_buff *skb); +struct sk_buff *inff_flowring_dequeue(struct inff_flowring *flow, u16 flowid); +void inff_flowring_reinsert(struct inff_flowring *flow, u16 flowid, + struct sk_buff *skb); +u32 inff_flowring_qlen(struct inff_flowring *flow, u16 flowid); +u8 inff_flowring_ifidx_get(struct inff_flowring *flow, u16 flowid); +struct inff_flowring *inff_flowring_attach(struct device *dev, u16 nrofrings); +void inff_flowring_detach(struct inff_flowring *flow); +void inff_flowring_configure_addr_mode(struct inff_flowring *flow, int ifidx, + enum proto_addr_mode addr_mode); +void inff_flowring_delete_peer(struct inff_flowring *flow, int ifidx, + u8 peer[ETH_ALEN]); +void inff_flowring_add_tdls_peer(struct inff_flowring *flow, int ifidx, + u8 peer[ETH_ALEN]); + +#endif /* INFF_FLOWRING_H */ -- 2.25.1 Implements the specific bus logic for Infineon devices connected to the linux machine via an SDIO interface. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/sdio.c | 5909 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/sdio.h | 553 ++ 2 files changed, 6462 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/sdio.c create mode 100644 drivers/net/wireless/infineon/inffmac/sdio.h diff --git a/drivers/net/wireless/infineon/inffmac/sdio.c b/drivers/net/wireless/infineon/inffmac/sdio.c new file mode 100644 index 000000000000..93183545e987 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/sdio.c @@ -0,0 +1,5909 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "defs.h" +#include "utils.h" +#include "hw_ids.h" +#include "sdio.h" +#include "chip.h" +#include "firmware.h" +#include "core.h" +#include "common.h" +#include "bcdc.h" +#include "fwil.h" +#include "bt_shared_sdio.h" +#include "trxhdr.h" +#include "feature.h" +#include "chip_43022.h" +#include "chip_5557x.h" +#include "chip_5591x.h" +#include "dfu.h" + +#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) +#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) +#define ULP_HUDI_PROC_DONE_TIME msecs_to_jiffies(2500) + +/* watermark expressed in number of words */ +#define DEFAULT_F2_WATERMARK 0x8 + +#ifdef DEBUG + +#define INFF_TRAP_INFO_SIZE 80 + +#define CBUF_LEN (128) + +/* Device console log buffer state */ +#define CONSOLE_BUFFER_MAX 2024 + +struct rte_log_le { + __le32 buf; /* Can't be pointer on (64-bit) hosts */ + __le32 buf_size; + __le32 idx; + char *_buf_compat; /* Redundant pointer for backward compat. */ +}; + +struct rte_console { + /* Virtual UART + * When there is no UART (e.g. Quickturn), + * the host should write a complete + * input line directly into cbuf and then write + * the length into vcons_in. + * This may also be used when there is a real UART + * (at risk of conflicting with + * the real UART). vcons_out is currently unused. + */ + uint vcons_in; + uint vcons_out; + + /* Output (logging) buffer + * Console output is written to a ring buffer log_buf at index log_idx. + * The host may read the output when it sees log_idx advance. + * Output will be lost if the output wraps around faster than the host + * polls. + */ + struct rte_log_le log_le; + + /* Console input line buffer + * Characters are read one at a time into cbuf + * until is received, then + * the buffer is processed as a command line. + * Also used for virtual UART. + */ + uint cbuf_idx; + char cbuf[CBUF_LEN]; +}; + +#endif /* DEBUG */ +#include "chipcommon.h" + +#include "bus.h" +#include "debug.h" +#include "tracepoint.h" + +#define TXQLEN 2048 /* bulk tx queue length */ +#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */ +#define TXLOW (TXHI - 256) /* turn off flow control below TXLOW */ +#define PRIOMASK 7 + +#define TXRETRIES 2 /* # of retries for tx frames */ + +#define INFF_RXBOUND 50 /* Default for max rx frames in + * one scheduling + */ + +#define INFF_TXBOUND 20 /* Default for max tx frames in + * one scheduling + */ + +#define INFF_TXMINMAX 1 /* Max tx frames if rx still pending */ + +#define MEMBLOCK 2048 /* Block size used for downloading + * of dongle image + */ +#define MAX_DATA_BUF (32 * 1024) /* Must be large enough to hold + * biggest possible glom + */ + +#define INFF_FIRSTREAD BIT(6) + +/* SBSDIO_DEVICE_CTL */ + +/* 1: device will assert busy signal when receiving CMD53 */ +#define SBSDIO_DEVCTL_SETBUSY 0x01 +/* 1: assertion of sdio interrupt is synchronous to the sdio clock */ +#define SBSDIO_DEVCTL_SPI_INTR_SYNC 0x02 +/* 1: mask all interrupts to host except the chipActive (rev 8) */ +#define SBSDIO_DEVCTL_CA_INT_ONLY 0x04 +/* 1: isolate internal sdio signals, put external pads in tri-state; requires + * sdio bus power cycle to clear (rev 9) + */ +#define SBSDIO_DEVCTL_PADS_ISO 0x08 +/* 1: enable F2 Watermark */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 +/* Force SD->SB reset mapping (rev 11) */ +#define SBSDIO_DEVCTL_SB_RST_CTL 0x30 +/* Determined by CoreControl bit */ +#define SBSDIO_DEVCTL_RST_CORECTL 0x00 +/* Force backplane reset */ +#define SBSDIO_DEVCTL_RST_BPRESET 0x10 +/* Force no backplane reset */ +#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20 +/* Reset SB Address to default value */ +#define SBSDIO_DEVCTL_ADDR_RESET 0x40 + +/* direct(mapped) cis space */ + +/* MAPPED common CIS address */ +#define SBSDIO_CIS_BASE_COMMON 0x1000 +/* maximum bytes in one CIS */ +#define SBSDIO_CIS_SIZE_LIMIT 0x200 +/* cis offset addr is < 17 bits */ +#define SBSDIO_CIS_OFT_ADDR_MASK 0x1FFFF + +/* manfid tuple length, include tuple, link bytes */ +#define SBSDIO_CIS_MANFID_TUPLE_LEN 6 + +#define SD_REG(field) \ + (offsetof(struct sdpcmd_regs, field)) + +/* SDIO function 1 register CHIPCLKCSR */ +/* Force ALP request to backplane */ +#define SBSDIO_FORCE_ALP 0x01 +/* Force HT request to backplane */ +#define SBSDIO_FORCE_HT 0x02 +/* Force ILP request to backplane */ +#define SBSDIO_FORCE_ILP 0x04 +/* Make ALP ready (power up xtal) */ +#define SBSDIO_ALP_AVAIL_REQ 0x08 +/* Make HT ready (power up PLL) */ +#define SBSDIO_HT_AVAIL_REQ 0x10 +/* Squelch clock requests from HW */ +#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20 +/* Status: ALP is ready */ +#define SBSDIO_ALP_AVAIL 0x40 +/* Status: HT is ready */ +#define SBSDIO_HT_AVAIL 0x80 +#define SBSDIO_CSR_MASK 0x1F +#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL) +#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS) +#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS) +#define SBSDIO_ALPONLY(regval) ({ \ + typeof(regval) _regval = (regval); \ + (SBSDIO_ALPAV(_regval) && !SBSDIO_HTAV(_regval)); \ + }) +#define SBSDIO_CLKAV(regval, alponly) ({ \ + typeof(regval) _regval = (regval); \ + (SBSDIO_ALPAV(_regval) && (alponly ? 1 : SBSDIO_HTAV(_regval))); \ + }) +#define ALP_WAIT_MIN 65 +#define ALP_WAIT_MAX 85 + +/* intstatus */ +#define I_SMB_SW0 BIT(0) /* To SB Mail S/W interrupt 0 */ +#define I_SMB_SW1 BIT(1) /* To SB Mail S/W interrupt 1 */ +#define I_SMB_SW2 BIT(2) /* To SB Mail S/W interrupt 2 */ +#define I_SMB_SW3 BIT(3) /* To SB Mail S/W interrupt 3 */ +#define I_SMB_SW_MASK 0x0000000f /* To SB Mail S/W interrupts mask */ +#define I_SMB_SW_SHIFT 0 /* To SB Mail S/W interrupts shift */ +#define I_HMB_SW0 BIT(4) /* To Host Mail S/W interrupt 0 */ +#define I_HMB_SW1 BIT(5) /* To Host Mail S/W interrupt 1 */ +#define I_HMB_SW2 BIT(6) /* To Host Mail S/W interrupt 2 */ +#define I_HMB_SW3 BIT(7) /* To Host Mail S/W interrupt 3 */ +#define I_HMB_SW_MASK 0x000000f0 /* To Host Mail S/W interrupts mask */ +#define I_HMB_SW_SHIFT 4 /* To Host Mail S/W interrupts shift */ +#define I_WR_OOSYNC BIT(8) /* Write Frame Out Of Sync */ +#define I_RD_OOSYNC BIT(9) /* Read Frame Out Of Sync */ +#define I_PC BIT(10) /* descriptor error */ +#define I_PD BIT(11) /* data error */ +#define I_DE BIT(12) /* Descriptor protocol Error */ +#define I_RU BIT(13) /* Receive descriptor Underflow */ +#define I_RO BIT(14) /* Receive fifo Overflow */ +#define I_XU BIT(15) /* Transmit fifo Underflow */ +#define I_RI BIT(16) /* Receive Interrupt */ +#define I_BUSPWR BIT(17) /* SDIO Bus Power Change (rev 9) */ +#define I_XMTDATA_AVAIL BIT(23) /* bits in fifo */ +#define I_XI BIT(24) /* Transmit Interrupt */ +#define I_RF_TERM BIT(25) /* Read Frame Terminate */ +#define I_WF_TERM BIT(26) /* Write Frame Terminate */ +#define I_PCMCIA_XU BIT(27) /* PCMCIA Transmit FIFO Underflow */ +#define I_SBINT BIT(28) /* sbintstatus Interrupt */ +#define I_CHIPACTIVE BIT(29) /* chip from doze to active state */ +#define I_SRESET BIT(30) /* CCCR RES interrupt */ +#define I_IOE2 BIT(31) /* CCCR IOE2 Bit Changed */ +#define I_ERRORS (I_PC | I_PD | I_DE | I_RU | I_RO | I_XU) +#define I_DMA (I_RI | I_XI | I_ERRORS) + +/* corecontrol */ +#define CC_CISRDY BIT(0) /* CIS Ready */ +#define CC_BPRESEN BIT(1) /* CCCR RES signal */ +#define CC_F2RDY BIT(2) /* set CCCR IOR2 bit */ +#define CC_CLRPADSISO BIT(3) /* clear SDIO pads isolation */ +#define CC_XMTDATAAVAIL_MODE BIT(4) +#define CC_XMTDATAAVAIL_CTRL BIT(5) + +/* SDA_FRAMECTRL */ +#define SFC_RF_TERM BIT(0) /* Read Frame Terminate */ +#define SFC_WF_TERM BIT(1) /* Write Frame Terminate */ +#define SFC_CRC4WOOS BIT(2) /* CRC error for write out of sync */ +#define SFC_ABORTALL BIT(3) /* Abort all in-progress frames */ + +/* + * Software allocation of To SB Mailbox resources + */ + +/* tosbmailbox bits corresponding to intstatus bits */ +#define SMB_NAK BIT(0) /* Frame NAK */ +#define SMB_INT_ACK BIT(1) /* Host Interrupt ACK */ +#define SMB_USE_OOB BIT(2) /* Use OOB Wakeup */ +#define SMB_DEV_INT BIT(3) /* Miscellaneous Interrupt */ + +/* tosbmailboxdata */ +#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version */ + +/* + * Software allocation of To Host Mailbox resources + */ + +/* intstatus bits */ +#define I_HMB_FC_STATE I_HMB_SW0 /* Flow Control State */ +#define I_HMB_FC_CHANGE I_HMB_SW1 /* Flow Control State Changed */ +#define I_HMB_FRAME_IND I_HMB_SW2 /* Frame Indication */ +#define I_HMB_HOST_INT I_HMB_SW3 /* Miscellaneous Interrupt */ + +/* tohostmailboxdata */ +#define HMB_DATA_NAKHANDLED 0x0001 /* retransmit NAK'd frame */ +#define HMB_DATA_DEVREADY 0x0002 /* talk to host after enable */ +#define HMB_DATA_FC 0x0004 /* per prio flowcontrol update flag */ +#define HMB_DATA_FWREADY 0x0008 /* fw ready for protocol activity */ +#define HMB_DATA_FWHALT 0x0010 /* firmware halted */ + +#define HMB_DATA_FCDATA_MASK 0xff000000 +#define HMB_DATA_FCDATA_SHIFT 24 + +#define HMB_DATA_VERSION_MASK 0x00ff0000 +#define HMB_DATA_VERSION_SHIFT 16 + +/* + * Software-defined protocol header + */ + +/* Current protocol version */ +#define SDPCM_PROT_VERSION 4 + +/* + * Shared structure between dongle and the host. + * The structure contains pointers to trap or assert information. + */ +#define SDPCM_SHARED_VERSION 0x0003 +#define SDPCM_SHARED_VERSION_MASK 0x00FF +#define SDPCM_SHARED_ASSERT_BUILT 0x0100 +#define SDPCM_SHARED_ASSERT 0x0200 +#define SDPCM_SHARED_TRAP 0x0400 + +/* Space for header read, limit for data packets */ +#define MAX_HDR_READ BIT(6) +#define MAX_RX_DATASZ 2048 + +/* Bump up limit on waiting for HT to account for first startup; + * if the image is doing a CRC calculation before programming the PMU + * for HT availability, it could take a couple hundred ms more, so + * max out at a 1 second (1000000us). + */ +#undef PMU_MAX_TRANSITION_DLY +#define PMU_MAX_TRANSITION_DLY 1000000 + +/* Value for ChipClockCSR during initial setup */ +#define INFF_INIT_CLKCTL1 (SBSDIO_FORCE_HW_CLKREQ_OFF | \ + SBSDIO_ALP_AVAIL_REQ) + +/* Flags for SDH calls */ +#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED) + +#define INFF_IDLE_ACTIVE 0 /* Do not request any SD clock change + * when idle + */ +#define INFF_IDLE_STOP (-1) /* Request SD clock be stopped */ +#define INFF_IDLE_INTERVAL 1 + +#define KSO_WAIT_US 50 +#define KSO_MAX_SEQ_TIME_NS (1000000 * 10) /* Ideal time for kso sequence 10ms in ns*/ +#define MAX_KSO_ATTEMPTS (PMU_MAX_TRANSITION_DLY / KSO_WAIT_US) + +static void inff_sdio_firmware_callback(struct device *dev, int err, + struct inff_fw_request *fwreq); +static int inff_sdio_f2_ready(struct inff_sdio *bus); +static int inff_ulp_event_notify(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data); +static void +inff_sched_rxf(struct inff_sdio *bus, struct sk_buff *skb); + +#ifdef DEBUG +/* Device console log buffer state */ +struct inff_console { + uint count; /* Poll interval msec counter */ + uint log_addr; /* Log struct address (fixed) */ + struct rte_log_le log_le; /* Log struct (host copy) */ + uint bufsize; /* Size of log buffer */ + u8 *buf; /* Log buffer (host copy) */ + uint last; /* Last buffer read index */ +}; + +struct inff_trap_info { + __le32 type; + __le32 epc; + __le32 cpsr; + __le32 spsr; + __le32 r0; /* a1 */ + __le32 r1; /* a2 */ + __le32 r2; /* a3 */ + __le32 r3; /* a4 */ + __le32 r4; /* v1 */ + __le32 r5; /* v2 */ + __le32 r6; /* v3 */ + __le32 r7; /* v4 */ + __le32 r8; /* v5 */ + __le32 r9; /* sb/v6 */ + __le32 r10; /* sl/v7 */ + __le32 r11; /* fp/v8 */ + __le32 r12; /* ip */ + __le32 r13; /* sp */ + __le32 r14; /* lr */ + __le32 pc; /* r15 */ +}; +#endif /* DEBUG */ + +struct sdpcm_shared { + u32 flags; + u32 trap_addr; + u32 assert_exp_addr; + u32 assert_file_addr; + u32 assert_line; + u32 console_addr; /* Address of struct rte_console */ + u32 msgtrace_addr; + u8 tag[32]; + u32 brpt_addr; +}; + +struct sdpcm_shared_le { + __le32 flags; + __le32 trap_addr; + __le32 assert_exp_addr; + __le32 assert_file_addr; + __le32 assert_line; + __le32 console_addr; /* Address of struct rte_console */ + __le32 msgtrace_addr; + u8 tag[32]; + __le32 brpt_addr; +}; + +/* dongle SDIO bus specific header info */ +struct inff_sdio_hdrinfo { + u8 seq_num; + u8 channel; + u16 len; + u16 len_left; + u16 len_nxtfrm; + u8 dat_offset; + bool lastfrm; + u16 tail_pad; +}; + +struct task_ctl { + struct task_struct *p_task; + struct completion comp; +}; + +/* + * hold counter variables + */ +struct inff_sdio_count { + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ + uint pollcnt; /* Count of active polls */ + uint regfails; /* Count of R_REG failures */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + uint tickcnt; /* Number of watchdog been schedule */ + ulong tx_ctlerrs; /* Err of sending ctrl frames */ + ulong tx_ctlpkts; /* Ctrl frames sent to dongle */ + ulong rx_ctlerrs; /* Err of processing rx ctrl frames */ + ulong rx_ctlpkts; /* Ctrl frames processed from dongle */ + ulong rx_readahead_cnt; /* packets where header read-ahead was used */ +}; + +/* misc chip info needed by some of the routines */ +/* Private data for SDIO bus interaction */ +struct inff_sdio { + struct inff_sdio_dev *sdiodev; /* sdio device handler */ + struct inff_chip *ci; /* Chip info struct */ + struct inff_core *sdio_core; /* sdio core info struct */ + + u32 hostintmask; /* Copy of Host Interrupt Mask */ + atomic_t intstatus; /* Intstatus bits (events) pending */ + atomic_t fcstate; /* State of dongle flow-control */ + + uint blocksize; /* Block size of SDIO transfers */ + uint roundup; /* Max roundup limit */ + + struct pktq txq; /* Queue length used for flow-control */ + u8 flowcontrol; /* per prio flow control bitmask */ + u8 tx_seq; /* Transmit sequence number (next) */ + u8 tx_max; /* Maximum transmit sequence allowed */ + + u8 *hdrbuf; /* buffer for handling rx frame */ + u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */ + u8 rx_seq; /* Receive sequence number (expected) */ + struct inff_sdio_hdrinfo cur_read; + /* info of current read frame */ + bool rxskip; /* Skip receive (awaiting NAK ACK) */ + bool rxpending; /* Data frame pending in dongle */ + + uint rxbound; /* Rx frames to read before resched */ + uint txbound; /* Tx frames to send before resched */ + uint txminmax; + + struct sk_buff *glomd; /* Packet containing glomming descriptor */ + struct sk_buff_head glom; /* Packet list for glommed superframe */ + + u8 *rxbuf; /* Buffer for receiving control packets */ + uint rxblen; /* Allocated length of rxbuf */ + u8 *rxctl; /* Aligned pointer into rxbuf */ + u8 *rxctl_orig; /* pointer for freeing rxctl */ + uint rxlen; /* Length of valid data in buffer */ + spinlock_t rxctl_lock; /* protection lock for ctrl frame resources */ + + u8 sdpcm_ver; /* Bus protocol reported by dongle */ + + atomic_t ipend; /* Device interrupt is pending */ + uint spurious; /* Count of spurious interrupts */ + +#ifdef DEBUG + uint console_interval; + struct inff_console console; /* Console output polling support */ + uint console_addr; /* Console address from shared struct */ +#endif /* DEBUG */ + + uint clkstate; /* State of sd and backplane clock(s) */ + s32 idletime; /* Control for activity timeout */ + s32 idlecount; /* Activity timeout counter */ + s32 idleclock; /* How to set bus driver when idle */ + bool rxflow_mode; /* Rx flow control mode */ + bool rxflow; /* Is rx flow control on */ + bool alp_only; /* Don't use HT clock (ALP only) */ + + u8 *ctrl_frame_buf; + u16 ctrl_frame_len; + bool ctrl_frame_stat; + int ctrl_frame_err; + + spinlock_t txq_lock; /* protect bus->txq */ + wait_queue_head_t ctrl_wait; + wait_queue_head_t dcmd_resp_wait; + + struct timer_list timer; + struct completion watchdog_wait; + struct task_struct *watchdog_tsk; + bool wd_active; + + struct workqueue_struct *inff_wq; + struct work_struct datawork; + bool dpc_triggered; + bool dpc_running; + + bool txoff; /* Transmit flow-controlled */ + struct inff_sdio_count sdcnt; + bool sr_enabled; /* SaveRestore enabled */ + bool sleeping; + + u8 tx_hdrlen; /* sdio bus header length for tx packet */ + bool txglom; /* host tx glomming enable flag */ + u16 head_align; /* buffer pointer alignment */ + u16 sgentry_align; /* scatter-gather buffer alignment */ + struct mutex sdsem; + bool chipid_preset; + #define MAXSKBPEND 1024 + struct sk_buff *skbbuf[MAXSKBPEND]; + u32 store_idx; + u32 sent_idx; + struct task_ctl thr_rxf_ctl; + spinlock_t rxf_lock; /* lock for rxf idx protection */ + bool h1_ddr50_mode; /* H1 DDR50 Mode enabled*/ +}; + +/* clkstate */ +#define CLK_NONE 0 +#define CLK_SDONLY 1 +#define CLK_PENDING 2 +#define CLK_AVAIL 3 + +#ifdef DEBUG +static int qcount[NUMPRIO]; +#endif /* DEBUG */ + +#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */ + +#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL) + +/* Limit on rounding up frames */ +static const uint max_roundup = 512; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define ALIGNMENT 8 +#else +#define ALIGNMENT 4 +#endif + +enum inff_sdio_frmtype { + INFF_SDIO_FT_NORMAL, + INFF_SDIO_FT_SUPER, + INFF_SDIO_FT_SUB, +}; + +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu)) + +/* SDIO Pad drive strength to select value mappings */ +struct sdiod_drive_str { + u8 strength; /* Pad Drive Strength in mA */ + u8 sel; /* Chip-specific select value */ +}; + +/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */ +static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = { + {32, 0x6}, + {26, 0x7}, + {22, 0x4}, + {16, 0x5}, + {12, 0x2}, + {8, 0x3}, + {4, 0x0}, + {0, 0x1} +}; + +/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */ +static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = { + {6, 0x7}, + {5, 0x6}, + {4, 0x5}, + {3, 0x4}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} +}; + +/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */ +static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = { + {3, 0x3}, + {2, 0x2}, + {1, 0x1}, + {0, 0x0} }; + +/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */ +static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = { + {16, 0x7}, + {12, 0x5}, + {8, 0x3}, + {4, 0x1} +}; + +/* per-board firmware binaries */ +#define INFF_55572_FIRMWARE_BASENAME INFF_FW_DEFAULT_PATH "inffmac55572-sdio" +#define INFF_55500_FIRMWARE_BASENAME INFF_FW_DEFAULT_PATH "inffmac55500-sdio" +#define INFF_43022_FIRMWARE_BASENAME INFF_FW_DEFAULT_PATH "inffmac43022-sdio" + +MODULE_FIRMWARE(INFF_55572_FIRMWARE_BASENAME ".trxse"); +MODULE_FIRMWARE(INFF_55500_FIRMWARE_BASENAME ".trxse"); +MODULE_FIRMWARE(INFF_43022_FIRMWARE_BASENAME ".trxs"); + +static const struct inff_firmware_mapping inff_sdio_fwnames[] = { + INFF_FW_ENTRY(INF_CC_5557X_CHIP_ID, 0xFFFFFFFF, 55572), + INFF_FW_ENTRY(INF_CC_5551X_CHIP_ID, 0xFFFFFFFF, 55500), + INFF_FW_ENTRY(INF_CC_43022_CHIP_ID, 0xFFFFFFFF, 43022), +}; + +#define TXCTL_CREDITS 2 + +bool inff_sdio_bus_sleep_state(struct inff_sdio *bus) +{ + return bus->sleeping; +} + +static inline bool inff_sdio_bus_access_allowed(u32 addr) +{ + return (addr == SBSDIO_FUNC1_SLEEPCSR) ? true : false; +} + +u8 inff_sdiod_func0_rb(struct inff_sdio_dev *sdiodev, u32 addr, int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x\n", addr); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus) || sdiodev->ignore_bus_error) + return inff_sdiod_func0_rb_ext(sdiodev, addr, ret); + + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + return 0xFF; +} + +void inff_sdiod_func0_wb(struct inff_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x val 0x%x\n", addr, data); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus) || sdiodev->ignore_bus_error) { + inff_sdiod_func0_wb_ext(sdiodev, addr, data, ret); + } else { + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + } +} + +u8 inff_sdiod_readb(struct inff_sdio_dev *sdiodev, u32 addr, int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x\n", addr); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus) || inff_sdio_bus_access_allowed(addr)) + return inff_sdiod_readb_ext(sdiodev, addr, ret); + + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + return 0xFF; +} + +void inff_sdiod_writeb(struct inff_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x val 0x%x\n", addr, data); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus) || inff_sdio_bus_access_allowed(addr)) { + inff_sdiod_writeb_ext(sdiodev, addr, data, ret); + } else { + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + } +} + +u8 inff_sdiod_func_rb(struct inff_sdio_dev *sdiodev, struct sdio_func *func, u32 addr, int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x\n", addr); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus)) + return inff_sdiod_func_rb_ext(func, addr, ret); + + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + return 0xFF; +} + +void inff_sdiod_func_wb(struct inff_sdio_dev *sdiodev, struct sdio_func *func, u32 addr, + u32 data, int *ret) +{ + inff_dbg(SDIOEXT, "addr 0x%x val 0x%x\n", addr, data); + + if (!inff_sdio_bus_sleep_state(sdiodev->bus)) { + inff_sdiod_func_wb_ext(func, addr, data, ret); + } else { + inff_err(" Error Access Not allowed\n"); + if (ret) + *ret = -EPERM; + } +} + +static void pkt_align(struct sk_buff *p, int len, int align) +{ + uint datalign; + + datalign = (unsigned long)(p->data); + datalign = roundup(datalign, (align)) - datalign; + if (datalign) + skb_pull(p, datalign); + __skb_trim(p, len); +} + +/* To check if there's window offered */ +static bool data_ok(struct inff_sdio *bus) +{ + return (u8)(bus->tx_max - bus->tx_seq) > TXCTL_CREDITS && + ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; +} + +/* To check if there's window offered */ +static bool txctl_ok(struct inff_sdio *bus) +{ + return (bus->tx_max - bus->tx_seq) != 0 && + ((bus->tx_max - bus->tx_seq) & 0x80) == 0; +} + +static int +inff_sdio_kso_control(struct inff_sdio *bus, bool on) +{ + u8 wr_val = 0, rd_val, cmp_val, bmask; + int err = 0; + int err_cnt = 0; + int try_cnt = 0; + unsigned long kso_loop_time = 0; + struct timespec64 ts_start, ts_end, ts_delta; + struct inff_sdio_dev *sdiod = bus->sdiodev; + + inff_dbg(SDIO, "Enter: on=%d\n", on); + + sdio_retune_crc_disable(bus->sdiodev->func1); + + /* Cannot re-tune if device is asleep; defer till we're awake */ + if (on) + sdio_retune_hold_now(bus->sdiodev->func1); + + wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + /* Start time of kso_sequence */ + ktime_get_ts64(&ts_start); + + /* Change bus width to 1-bit mode before kso 0 */ + if (!on && bus->idleclock == INFF_IDLE_STOP) + inff_sdio_set_sdbus_clk_width(bus, SDIO_SDMODE_1BIT); + + /* 1st KSO write goes to AOS wake up core if device is asleep */ + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + + /* The chip could go down immediately after + * KSO bit is cleared. So the further reads of KSO register could + * fail. Thereby just bailing out immediately after clearing KSO + * bit, to avoid polling of KSO bit. + */ + if (!on) { + bus->sdiodev->sbwad_valid = 0; + return err; + } + + /* device WAKEUP through KSO: + * write bit 0 & read back until + * both bits 0 (kso bit) & 1 (dev on status) are set + */ + cmp_val = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK | + SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK; + bmask = cmp_val; + + do { + /* reliable KSO bit set/clr: + * the sdiod sleep write access is synced to PMU 32khz clk + * just one write attempt may fail, + * read it back until it matches written value + */ + rd_val = inff_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, + &err); + if (!err) { + if ((rd_val & bmask) == cmp_val) + break; + } else { + err_cnt++; + } + + /* Do one KSO write-read-check without any delay in between the steps, + * if Device is already up KSO sequence will complete immediately + * without any delay for Host + */ + if (try_cnt == 0) { + /* If Device is already up then it will not reach here, + * if the control reaches here it means device is in sleep + * so delay for some time to let Device wake up before + * starting subsequent KSO wr-rd-check sequence + * Delay should be less than the time device takes to wakeup + * in normal case, because giving more delay than what device needs + * will lead to host being unnecessarily blocked here while device is + * already up and ready, leading to more power consumptions of both + * host and device, as well as overall increased response delays + */ + usleep_range(2500, 3000); + } else { + /* Initial delay is done, now do continuous KSO wr-rd-check + * sequence with some small delay + */ + usleep_range(KSO_WAIT_US / 2, KSO_WAIT_US); + } + + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, + &err); + + } while (try_cnt++ < MAX_KSO_ATTEMPTS); + + ktime_get_ts64(&ts_end); + ts_delta = timespec64_sub(ts_end, ts_start); + kso_loop_time = timespec64_to_ns(&ts_delta); + + if (try_cnt > MAX_KSO_ATTEMPTS) + inff_err("ERR: KSO=%d sequence failed after max tries=%d and err_cnt=%d\n" + "kso_seq_time=%luns rd_val=0x%x err=%d\n", + on, try_cnt, err_cnt, kso_loop_time, rd_val, err); + + if (bus->idleclock == INFF_IDLE_STOP) { + /* Change the bus width to 4-bit mode on kso 1 */ + sdiod->ignore_bus_error = true; + inff_sdio_set_sdbus_clk_width(bus, SDIO_SDMODE_4BIT); + sdiod->ignore_bus_error = false; + } + + /* New KSO Sequence for H1 DDR50 Mode*/ + if (bus->h1_ddr50_mode) { + u32 ret, chipid; + + /* Set Flag to ignore SDIO Bus access error during KSO */ + sdiod->ignore_bus_error = true; + chipid = inff_sdiod_readl(sdiod, + bus->ci->ccsec->bus_corebase + SD_REG(chipid), + &ret); + /* Clear Flag to ignore SDIO Bus access error during KSO */ + sdiod->ignore_bus_error = false; + inff_dbg(SDIO, "chipid: 0x%x ret = 0x%x\n", chipid, ret); + } + + sdio_retune_release(bus->sdiodev->func1); + + if (kso_loop_time > KSO_MAX_SEQ_TIME_NS) + inff_err("ERR: KSO=%d sequence took %luns > expected %uns try_cnt=%d\n" + "err_cnt=%d rd_val=0x%x err=%d\n", + on, kso_loop_time, KSO_MAX_SEQ_TIME_NS, try_cnt, err_cnt, rd_val, err); + + inff_dbg(SDIO, "INFO: KSO=%d try_cnt=%d err_cnt=%d kso_seq_time=%luns\n" + "rd_val=0x%x err=%d\n", on, try_cnt, err_cnt, kso_loop_time, rd_val, err); + + sdio_retune_crc_enable(bus->sdiodev->func1); + + return err; +} + +#define HOSTINTMASK (I_HMB_SW_MASK | I_CHIPACTIVE) + +/* Turn backplane clock on or off */ +static int inff_sdio_htclk(struct inff_sdio *bus, bool on, bool pendok) +{ + int err; + u8 clkctl, clkreq, devctl; + unsigned long timeout; + + inff_dbg(SDIO, "Enter\n"); + + clkctl = 0; + + if (bus->sr_enabled) { + bus->clkstate = (on ? CLK_AVAIL : CLK_SDONLY); + return 0; + } + + if (on) { + /* Request HT Avail */ + clkreq = + bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ; + + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + clkreq, &err); + if (err) { + inff_err("HT Avail request error: %d\n", err); + return -EBADE; + } + + /* Check current status */ + clkctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (err) { + inff_err("HT Avail read error: %d\n", err); + return -EBADE; + } + + /* Go to pending and await interrupt if appropriate */ + if (!SBSDIO_CLKAV(clkctl, bus->alp_only) && pendok) { + /* Allow only clock-available interrupt */ + devctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_DEVICE_CTL, &err); + if (err) { + inff_err("Devctl error setting CA: %d\n", err); + return -EBADE; + } + + devctl |= SBSDIO_DEVCTL_CA_INT_ONLY; + inff_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); + inff_dbg(SDIO, "CLKCTL: set PENDING\n"); + bus->clkstate = CLK_PENDING; + + return 0; + } else if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + inff_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); + } + + /* Otherwise, wait here (polling) for HT Avail */ + timeout = jiffies + + msecs_to_jiffies(PMU_MAX_TRANSITION_DLY / 1000); + while (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + clkctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + &err); + if (time_after(jiffies, timeout)) + break; + + usleep_range(5000, 10000); + } + if (err) { + inff_err("HT Avail request error: %d\n", err); + return -EBADE; + } + if (!SBSDIO_CLKAV(clkctl, bus->alp_only)) { + inff_err("HT Avail timeout (%d): clkctl 0x%02x\n", + PMU_MAX_TRANSITION_DLY, clkctl); + return -EBADE; + } + + /* Mark clock available */ + bus->clkstate = CLK_AVAIL; + inff_dbg(SDIO, "CLKCTL: turned ON\n"); + +#if defined(DEBUG) + if (!bus->alp_only) { + if (SBSDIO_ALPONLY(clkctl)) + inff_err("HT Clock should be on\n"); + } +#endif /* defined (DEBUG) */ + + } else { + clkreq = 0; + + if (bus->clkstate == CLK_PENDING) { + /* Cancel CA-only interrupt filter */ + devctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + inff_sdiod_writeb(bus->sdiodev, SBSDIO_DEVICE_CTL, + devctl, &err); + } + + bus->clkstate = CLK_SDONLY; + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + clkreq, &err); + inff_dbg(SDIO, "CLKCTL: turned OFF\n"); + if (err) { + inff_err("Failed access turning clock off: %d\n", + err); + return -EBADE; + } + } + return 0; +} + +/** + * inff_sdio_set_sdbus_clk_width - set SD clock enable/disable and sd_mode + * @func: SDIO function attached to host + * @flags: reusing existing mmc->pm_flags to pass idle clk disable/enable or + * change sdbus width through mmc. + */ +int inff_sdio_set_sdbus_clk_width(struct inff_sdio *bus, unsigned int flags) +{ + struct mmc_host *host; + u8 ctrl; + int ret = 0; + + if (WARN_ON(!bus)) + return -EINVAL; + + host = bus->sdiodev->func1->card->host; + + inff_dbg(SDIO, "Enter\n"); + + if (flags == SDIO_IDLECLOCK_DIS || flags == SDIO_IDLECLOCK_EN) { + /* Switch OFF/ON SD CLOCK in sdio Host Controller */ + host->pm_caps |= flags; + /* Call SDHCI interface function from ops */ + host->ops->set_ios(host, &host->ios); + } else if (flags == SDIO_SDMODE_1BIT || flags == SDIO_SDMODE_4BIT) { + ctrl = inff_sdiod_func0_rb(bus->sdiodev, SDIO_CCCR_IF, &ret); + /* Check for Error */ + if (ret) + return ret; + + /* Clear first two bits + * 00 - 1 bit wide + * 10 - 4 bit wide + */ + ctrl &= ~SDIO_BUS_WIDTH_MASK; + /* set as 4-bit bus width */ + if (flags == SDIO_SDMODE_4BIT) + ctrl |= SDIO_BUS_WIDTH_4BIT; + + inff_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_IF, ctrl, &ret); + /* Update HOST CTRL register with 1 bit or 4 bit mode */ + host->pm_caps |= flags; + /* Call SDHCI interface function from ops */ + host->ops->set_ios(host, &host->ios); + } + return ret; +} + +/* Change idle/active SD state */ +static int inff_sdio_sdclk(struct inff_sdio *bus, bool on) +{ + inff_dbg(SDIO, "Enter\n"); + + if (bus->idleclock == INFF_IDLE_STOP) + inff_sdio_set_sdbus_clk_width(bus, (on ? + SDIO_IDLECLOCK_DIS : + SDIO_IDLECLOCK_EN)); + + if (on) + bus->clkstate = CLK_SDONLY; + else + bus->clkstate = CLK_NONE; + + return 0; +} + +/* Transition SD and backplane clock readiness */ +int inff_sdio_clkctl(struct inff_sdio *bus, uint target, bool pendok) +{ +#ifdef DEBUG + uint oldstate = bus->clkstate; +#endif /* DEBUG */ + + inff_dbg(SDIO, "Enter\n"); + + /* Early exit if we're already there */ + if (bus->clkstate == target) + return 0; + + switch (target) { + case CLK_AVAIL: + /* Make sure SD clock is available */ + if (bus->clkstate == CLK_NONE) + inff_sdio_sdclk(bus, true); + /* Now request HT Avail on the backplane */ + inff_sdio_htclk(bus, true, pendok); + break; + + case CLK_SDONLY: +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + if (inff_btsdio_is_active(bus->sdiodev->bus_if)) { + inff_dbg(SDIO, "BT is active, not switching to CLK_SDONLY\n"); + inff_sdio_wd_timer(bus, true); + break; + } +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + /* Remove HT request, or bring up SD clock */ + if (bus->clkstate == CLK_NONE) + inff_sdio_sdclk(bus, true); + else if (bus->clkstate == CLK_AVAIL) + inff_sdio_htclk(bus, false, false); + else + inff_err("request for %d -> %d\n", + bus->clkstate, target); + break; + + case CLK_NONE: +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + if (inff_btsdio_is_active(bus->sdiodev->bus_if)) { + inff_dbg(SDIO, "BT is active, not switching to CLK_NONE\n"); + break; + } +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + + /* Make sure to remove HT request */ + if (bus->clkstate == CLK_AVAIL) + inff_sdio_htclk(bus, false, false); + /* Now remove the SD clock */ + inff_sdio_sdclk(bus, false); + break; + } +#ifdef DEBUG + inff_dbg(SDIO, "%d -> %d\n", oldstate, bus->clkstate); +#endif /* DEBUG */ + + return 0; +} + +static int +inff_sdio_bus_sleep(struct inff_sdio *bus, bool sleep, bool pendok) +{ + int err = 0; + u8 clkcsr; + +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + /* The following is the assumption based on which the hook is placed. + * From WLAN driver, either from the active contexts OR from the + * watchdog contexts, we will be attempting to go to sleep. At that + * moment if we see that BT is still actively using the bus, we will + * return -EBUSY from here, and the bus sleep state would not have + * changed, so the caller can then schedule the watchdog again + * which will come and attempt to sleep at a later point. + * + * In case if BT is the only one and is the last user, we don't switch + * off the clock immediately, we allow the WLAN to decide when to sleep + * i.e from the watchdog. + * Now if the watchdog becomes active and attempts to switch off the + * clock and if another WLAN context is active they are any way + * serialized with sdlock. + */ + if (sleep && inff_btsdio_is_active(bus->sdiodev->bus_if)) { + inff_dbg(SDIO, "Bus cannot sleep when BT is active\n"); + return -EBUSY; + } +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + + if (bus->ci->socitype == SOCI_CP) { + inff_dbg(SDIO, "CP chip don't support sleep yet\n"); + return -ENODEV; + } + + inff_dbg(SDIO, "Enter: request %s currently %s\n", + (sleep ? "SLEEP" : "WAKE"), + (bus->sleeping ? "SLEEP" : "WAKE")); + + /* If SR is enabled control bus state with KSO */ + if (bus->sr_enabled) { + /* Done if we're already in the requested state */ + if (sleep == bus->sleeping) + goto end; + + /* Going to sleep */ + if (sleep) { + clkcsr = inff_sdiod_readb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + &err); + if ((clkcsr & SBSDIO_CSR_MASK) == 0) { + inff_dbg(SDIO, "no clock, set ALP\n"); + inff_sdiod_writeb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + SBSDIO_ALP_AVAIL_REQ, &err); + } + + err = inff_sdio_kso_control(bus, false); + + if (bus->idleclock == INFF_IDLE_STOP) + inff_sdio_sdclk(bus, false); + } else { + if (bus->idleclock == INFF_IDLE_STOP && bus->clkstate == CLK_NONE) + inff_sdio_clkctl(bus, CLK_SDONLY, false); + + err = inff_sdio_kso_control(bus, true); + } + if (err) { + inff_err("error while changing bus sleep state %d\n", + err); + goto done; + } + } + +end: + /* control clocks */ + if (sleep) { + if (!bus->sr_enabled) + inff_sdio_clkctl(bus, CLK_NONE, pendok); + } else { + inff_sdio_clkctl(bus, CLK_AVAIL, pendok); + inff_sdio_wd_timer(bus, true); + } + bus->sleeping = sleep; + inff_dbg(SDIO, "new state %s\n", + (sleep ? "SLEEP" : "WAKE")); +done: + inff_dbg(SDIO, "Exit: err=%d\n", err); + return err; +} + +#ifdef DEBUG +static inline bool inff_sdio_valid_shared_address(u32 addr) +{ + return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff)); +} + +static int inff_sdio_readshared(struct inff_sdio *bus, + struct sdpcm_shared *sh) +{ + u32 addr = 0; + int rv; + u32 shaddr = 0; + struct sdpcm_shared_le sh_le; + __le32 addr_le; + + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_bus_sleep(bus, false, false); + + /* + * Read last word in socram to determine + * address of sdpcm_shared structure + * + * In 43022:secure-mode shared console address will be present at + * (512KB-4) location because other RAM area will be Read-blocked + * for host. Host can only read/write (384Kb-512Kb) RAM area. + * Read block is controlled by OTP bit. + */ + if (bus->ci->chip == INF_CC_43022_CHIP_ID) { + shaddr = bus->ci->rambase + CM3_SOCRAM_WRITE_END_LOCATION - 4; + } else { + shaddr = bus->ci->rambase + bus->ci->ramsize - 4; + /* can't access PMU register in 43022 and bus->ci->srsize is zero + * for 43022. So, skip the below statement for 43022. + */ + if (!bus->ci->rambase && inff_chip_sr_capable(bus->ci)) + shaddr -= bus->ci->srsize; + } + + rv = inff_sdiod_ramrw(bus->sdiodev, false, shaddr, + (u8 *)&addr_le, 4); + if (rv < 0) + goto fail; + + /* + * Check if addr is valid. + * NVRAM length at the end of memory should have been overwritten. + */ + addr = le32_to_cpu(addr_le); + if (!inff_sdio_valid_shared_address(addr)) { + inff_err("invalid sdpcm_shared address 0x%08X\n", addr); + rv = -EINVAL; + goto fail; + } + + inff_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr); + + /* Read hndrte_shared structure */ + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le, + sizeof(struct sdpcm_shared_le)); + if (rv < 0) + goto fail; + + sdio_release_host(bus->sdiodev->func1); + + /* Endianness */ + sh->flags = le32_to_cpu(sh_le.flags); + sh->trap_addr = le32_to_cpu(sh_le.trap_addr); + sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr); + sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr); + sh->assert_line = le32_to_cpu(sh_le.assert_line); + sh->console_addr = le32_to_cpu(sh_le.console_addr); + sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr); + + inff_dbg(INFO, "rte_console address is is 0x%08x\n", sh->console_addr); + + if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) { + inff_err("sdpcm_shared version %d unsupported, drivers expects version <= %d\n", + sh->flags & SDPCM_SHARED_VERSION_MASK, + SDPCM_SHARED_VERSION); + return -EPROTO; + } + return 0; + +fail: + inff_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n", + rv, addr); + sdio_release_host(bus->sdiodev->func1); + return rv; +} + +static void inff_sdio_get_console_addr(struct inff_sdio *bus) +{ + struct sdpcm_shared sh; + + if (inff_sdio_readshared(bus, &sh) == 0) { + /* reset the last read count when buffer address is updated */ + bus->console.last = 0; + bus->console_addr = sh.console_addr; + } +} +#else +static void inff_sdio_get_console_addr(struct inff_sdio *bus) +{ +} +#endif /* DEBUG */ + +static u32 inff_sdio_hostmail(struct inff_sdio *bus, u32 *hmbd) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + struct inff_core *core = bus->sdio_core; + u32 intstatus = 0; + u32 hmb_data; + u8 fcbits; + int ret; + + inff_dbg(SDIO, "Enter\n"); + + /* Read mailbox data and ack that we did so */ + hmb_data = inff_sdiod_readl(sdiod, + core->base + SD_REG(tohostmailboxdata), + &ret); + /* skip generating SMB_INT_ACK if there is no MB data */ + if (!ret && hmb_data) + inff_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), + SMB_INT_ACK, &ret); + + bus->sdcnt.f1regdata += 2; + + /* dongle indicates the firmware has halted/crashed */ + if (hmb_data & HMB_DATA_FWHALT) { + inff_dbg(SDIO, "mailbox indicates firmware halted\n"); + inff_fw_crashed(&sdiod->func1->dev); + } + + /* Dongle recomposed rx frames, accept them again */ + if (hmb_data & HMB_DATA_NAKHANDLED) { + inff_dbg(SDIO, "Dongle reports NAK handled, expect rtx of %d\n", + bus->rx_seq); + if (!bus->rxskip) + inff_err("unexpected NAKHANDLED!\n"); + + bus->rxskip = false; + intstatus |= I_HMB_FRAME_IND; + } + + /* + * DEVREADY does not occur with gSPI. + */ + if (hmb_data & (HMB_DATA_DEVREADY | HMB_DATA_FWREADY)) { + bus->sdpcm_ver = + (hmb_data & HMB_DATA_VERSION_MASK) >> + HMB_DATA_VERSION_SHIFT; + if (bus->sdpcm_ver != SDPCM_PROT_VERSION) + inff_err("ver mismatch, %d, expecting %d, hmb_data 0x%x\n", + bus->sdpcm_ver, SDPCM_PROT_VERSION, hmb_data); + else + inff_dbg(SDIO, "Dongle ready, protocol version %d\n", + bus->sdpcm_ver); + + /* + * Retrieve console state address now that firmware should have + * updated it. + */ + inff_sdio_get_console_addr(bus); + } + + /* + * Flow Control has been moved into the RX headers and this out of band + * method isn't used any more. + * remaining backward compatible with older dongles. + */ + if (hmb_data & HMB_DATA_FC) { + fcbits = (hmb_data & HMB_DATA_FCDATA_MASK) >> + HMB_DATA_FCDATA_SHIFT; + + if (fcbits & ~bus->flowcontrol) + bus->sdcnt.fc_xoff++; + + if (bus->flowcontrol & ~fcbits) + bus->sdcnt.fc_xon++; + + bus->sdcnt.fc_rcvd++; + bus->flowcontrol = fcbits; + } + + /* Shouldn't be any others */ + if (hmb_data & ~(HMB_DATA_DEVREADY | + HMB_DATA_NAKHANDLED | + HMB_DATA_FC | + HMB_DATA_FWREADY | + HMB_DATA_FWHALT | + HMB_DATA_FCDATA_MASK | HMB_DATA_VERSION_MASK)) + inff_err("Unknown mailbox data content: 0x%02x\n", + hmb_data); + /* Populate hmb_data if argument is passed for DS1 check later */ + if (hmbd) + *hmbd = hmb_data; + + return intstatus; +} + +static void inff_sdio_rxfail(struct inff_sdio *bus, bool abort, bool rtx) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + struct inff_core *core = bus->sdio_core; + uint retries = 0; + u16 lastrbc; + u8 hi, lo; + int err; + + inff_err("%sterminate frame%s\n", + abort ? "abort command, " : "", + rtx ? ", send NAK" : ""); + + if (abort) + inff_sdiod_abort(bus->sdiodev, bus->sdiodev->func2); + + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, + &err); + bus->sdcnt.f1regdata++; + + /* Wait until the packet has been flushed (device/FIFO stable) */ + for (lastrbc = retries = 0xffff; retries > 0; retries--) { + hi = inff_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCHI, + &err); + lo = inff_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO, + &err); + bus->sdcnt.f1regdata += 2; + + if (hi == 0 && lo == 0) + break; + + if ((hi > (lastrbc >> 8)) && (lo > (lastrbc & 0x00ff))) { + inff_err("count growing: last 0x%04x now 0x%04x\n", + lastrbc, (hi << 8) + lo); + } + lastrbc = (hi << 8) + lo; + } + + if (!retries) + inff_err("count never zeroed: last 0x%04x\n", lastrbc); + else + inff_dbg(SDIO, "flush took %d iterations\n", 0xffff - retries); + + if (rtx) { + bus->sdcnt.rxrtx++; + inff_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox), + SMB_NAK, &err); + + bus->sdcnt.f1regdata++; + if (err == 0) + bus->rxskip = true; + } + + /* Clear partial in any case */ + bus->cur_read.len = 0; +} + +static void inff_sdio_txfail(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiodev = bus->sdiodev; + u8 i, hi, lo; + + /* On failure, abort the command and terminate the frame */ + inff_err("sdio error, abort command and terminate frame\n"); + bus->sdcnt.tx_sderrs++; + + inff_sdiod_abort(sdiodev, sdiodev->func2); + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); + bus->sdcnt.f1regdata++; + + for (i = 0; i < 3; i++) { + hi = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCHI, NULL); + lo = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); + bus->sdcnt.f1regdata += 2; + if (hi == 0 && lo == 0) + break; + } +} + +/* return total length of buffer chain */ +static uint inff_sdio_glom_len(struct inff_sdio *bus) +{ + struct sk_buff *p; + uint total; + + total = 0; + skb_queue_walk(&bus->glom, p) + total += p->len; + return total; +} + +static void inff_sdio_free_glom(struct inff_sdio *bus) +{ + struct sk_buff *cur, *next; + + skb_queue_walk_safe(&bus->glom, cur, next) { + skb_unlink(cur, &bus->glom); + inff_pkt_buf_free_skb(cur); + } +} + +/* + * inffmac sdio bus specific header + * This is the lowest layer header wrapped on the packets transmitted between + * host and WiFi dongle which contains information needed for SDIO core and + * firmware + * + * It consists of 3 parts: hardware header, hardware extension header and + * software header + * hardware header (frame tag) - 4 bytes + * Byte 0~1: Frame length + * Byte 2~3: Checksum, bit-wise inverse of frame length + * hardware extension header - 8 bytes + * Tx glom mode only, N/A for Rx or normal Tx + * Byte 0~1: Packet length excluding hw frame tag + * Byte 2: Reserved + * Byte 3: Frame flags, bit 0: last frame indication + * Byte 4~5: Reserved + * Byte 6~7: Tail padding length + * software header - 8 bytes + * Byte 0: Rx/Tx sequence number + * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag + * Byte 2: Length of next data frame, reserved for Tx + * Byte 3: Data offset + * Byte 4: Flow control bits, reserved for Tx + * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet + * Byte 6~7: Reserved + */ +#define SDPCM_HWHDR_LEN 4 +#define SDPCM_HWEXT_LEN 8 +#define SDPCM_SWHDR_LEN 8 +#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN) +/* software header */ +#define SDPCM_SEQ_MASK 0x000000ff +#define SDPCM_SEQ_WRAP 256 +#define SDPCM_CHANNEL_MASK 0x00000f00 +#define SDPCM_CHANNEL_SHIFT 8 +#define SDPCM_CONTROL_CHANNEL 0 /* Control */ +#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */ +#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */ +#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */ +#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */ +#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80) +#define SDPCM_NEXTLEN_MASK 0x00ff0000 +#define SDPCM_NEXTLEN_SHIFT 16 +#define SDPCM_DOFFSET_MASK 0xff000000 +#define SDPCM_DOFFSET_SHIFT 24 +#define SDPCM_FCMASK_MASK 0x000000ff +#define SDPCM_WINDOW_MASK 0x0000ff00 +#define SDPCM_WINDOW_SHIFT 8 + +static inline u8 inff_sdio_getdatoffset(u8 *swheader) +{ + u32 hdrvalue; + + hdrvalue = le32_to_cpu(*(__le32 *)swheader); + return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT); +} + +static inline bool inff_sdio_fromevntchan(u8 *swheader) +{ + u32 hdrvalue; + u8 ret; + + hdrvalue = le32_to_cpu(*(__le32 *)swheader); + ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT); + + return (ret == SDPCM_EVENT_CHANNEL); +} + +static int inff_sdio_hdparse(struct inff_sdio *bus, u8 *header, + struct inff_sdio_hdrinfo *rd, + enum inff_sdio_frmtype type) +{ + u16 len, checksum; + u8 rx_seq, fc, tx_seq_max; + u32 swheader; + + trace_inff_sdpcm_hdr(SDPCM_RX, header); + + /* hw header */ + len = get_unaligned_le16(header); + checksum = get_unaligned_le16(header + sizeof(u16)); + /* All zero means no more to read */ + if (!(len | checksum)) { + bus->rxpending = false; + return -ENODATA; + } + if ((u16)(~(len ^ checksum))) { + inff_err("HW header checksum error\n"); + bus->sdcnt.rx_badhdr++; + inff_sdio_rxfail(bus, false, false); + return -EIO; + } + if (len < SDPCM_HDRLEN) { + inff_err("HW header length error\n"); + return -EPROTO; + } + if (type == INFF_SDIO_FT_SUPER && + (roundup(len, bus->blocksize) != rd->len)) { + inff_err("HW superframe header length error\n"); + return -EPROTO; + } + if (type == INFF_SDIO_FT_SUB && len > rd->len) { + inff_err("HW subframe header length error\n"); + return -EPROTO; + } + rd->len = len; + + /* software header */ + header += SDPCM_HWHDR_LEN; + swheader = le32_to_cpu(*(__le32 *)header); + if (type == INFF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) { + inff_err("Glom descriptor found in superframe head\n"); + rd->len = 0; + return -EINVAL; + } + rx_seq = (u8)(swheader & SDPCM_SEQ_MASK); + rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT; + if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL && + type != INFF_SDIO_FT_SUPER) { + inff_err("HW header length too long\n"); + bus->sdcnt.rx_toolong++; + inff_sdio_rxfail(bus, false, false); + rd->len = 0; + return -EPROTO; + } + if (type == INFF_SDIO_FT_SUPER && rd->channel != SDPCM_GLOM_CHANNEL) { + inff_err("Wrong channel for superframe\n"); + rd->len = 0; + return -EINVAL; + } + if (type == INFF_SDIO_FT_SUB && rd->channel != SDPCM_DATA_CHANNEL && + rd->channel != SDPCM_EVENT_CHANNEL) { + inff_err("Wrong channel for subframe\n"); + rd->len = 0; + return -EINVAL; + } + rd->dat_offset = inff_sdio_getdatoffset(header); + if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) { + inff_err("seq %d: bad data offset\n", rx_seq); + bus->sdcnt.rx_badhdr++; + inff_sdio_rxfail(bus, false, false); + rd->len = 0; + return -ENXIO; + } + if (rd->seq_num != rx_seq) { + inff_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num); + bus->sdcnt.rx_badseq++; + rd->seq_num = rx_seq; + } + /* no need to check the reset for subframe */ + if (type == INFF_SDIO_FT_SUB) + return 0; + rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT; + if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) { + /* only warm for NON glom packet */ + if (rd->channel != SDPCM_GLOM_CHANNEL) + inff_err("seq %d: next length error\n", rx_seq); + rd->len_nxtfrm = 0; + } + swheader = le32_to_cpu(*(__le32 *)(header + 4)); + fc = swheader & SDPCM_FCMASK_MASK; + if (bus->flowcontrol != fc) { + if (~bus->flowcontrol & fc) + bus->sdcnt.fc_xoff++; + if (bus->flowcontrol & ~fc) + bus->sdcnt.fc_xon++; + bus->sdcnt.fc_rcvd++; + bus->flowcontrol = fc; + } + tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT; + if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) { + inff_err("seq %d: max tx seq number error\n", rx_seq); + tx_seq_max = bus->tx_seq + 2; + } + bus->tx_max = tx_seq_max; + + return 0; +} + +static inline void inff_sdio_update_hwhdr(u8 *header, u16 frm_length) +{ + *(__le16 *)header = cpu_to_le16(frm_length); + *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length); +} + +static void inff_sdio_hdpack(struct inff_sdio *bus, u8 *header, + struct inff_sdio_hdrinfo *hd_info) +{ + u32 hdrval; + u8 hdr_offset; + + inff_sdio_update_hwhdr(header, hd_info->len); + hdr_offset = SDPCM_HWHDR_LEN; + + if (bus->txglom) { + hdrval = (hd_info->len - hdr_offset) | (hd_info->lastfrm << 24); + *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval); + hdrval = (u16)hd_info->tail_pad << 16; + *(((__le32 *)(header + hdr_offset)) + 1) = cpu_to_le32(hdrval); + hdr_offset += SDPCM_HWEXT_LEN; + } + + hdrval = hd_info->seq_num; + hdrval |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) & + SDPCM_CHANNEL_MASK; + hdrval |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) & + SDPCM_DOFFSET_MASK; + *((__le32 *)(header + hdr_offset)) = cpu_to_le32(hdrval); + *(((__le32 *)(header + hdr_offset)) + 1) = 0; + trace_inff_sdpcm_hdr(SDPCM_TX + !!(bus->txglom), header); +} + +static inline int inff_rxf_enqueue(struct inff_sdio *bus, struct sk_buff *skb) +{ + u32 store_idx; + u32 sent_idx; + + if (!skb) { + inff_err("NULL skb!!!\n"); + return -EINVAL; + } + + spin_lock_bh(&bus->rxf_lock); + store_idx = bus->store_idx; + sent_idx = bus->sent_idx; + if (bus->skbbuf[store_idx]) { + /* Make sure the previous packets are processed */ + spin_unlock_bh(&bus->rxf_lock); + inff_err("pktbuf not consumed %p, store idx %d sent idx %d\n", + skb, store_idx, sent_idx); + msleep(1000); + return -EINVAL; + } + inff_dbg(DATA, "Store SKB %p. idx %d -> %d\n", + skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)); + bus->skbbuf[store_idx] = skb; + bus->store_idx = (store_idx + 1) & (MAXSKBPEND - 1); + spin_unlock_bh(&bus->rxf_lock); + + return 0; +} + +static struct sk_buff *inff_rxf_dequeue(struct inff_sdio *bus) +{ + u32 store_idx; + u32 sent_idx; + struct sk_buff *skb; + + spin_lock_bh(&bus->rxf_lock); + + store_idx = bus->store_idx; + sent_idx = bus->sent_idx; + skb = bus->skbbuf[sent_idx]; + + if (!skb) { + spin_unlock_bh(&bus->rxf_lock); + inff_err("Dequeued packet is NULL, store idx %d sent idx %d\n", + store_idx, sent_idx); + return NULL; + } + + bus->skbbuf[sent_idx] = NULL; + bus->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1); + + inff_dbg(DATA, "dequeue (%p), sent idx %d\n", + skb, sent_idx); + + spin_unlock_bh(&bus->rxf_lock); + + return skb; +} + +static u8 inff_sdio_rxglom(struct inff_sdio *bus, u8 rxseq) +{ + u16 dlen, totlen; + u8 *dptr, num = 0; + u16 sublen; + struct sk_buff *pfirst, *pnext; + struct sk_buff *skb_head = NULL, *skb_prev = NULL, *skb_to_rxfq = NULL; + int errcode; + u8 doff; + struct inff_sdio_hdrinfo rd_new; + struct inff_mp_device *settings = bus->sdiodev->settings; + + /* If packets, issue read(s) and send up packet chain */ + /* Return sequence numbers consumed? */ + + inff_dbg(SDIO, "start: glomd %p glom %p\n", + bus->glomd, skb_peek(&bus->glom)); + + /* If there's a descriptor, generate the packet chain */ + if (bus->glomd) { + pfirst = NULL; + pnext = NULL; + /* it is a u32 len to u16 dlen, should have a sanity check here. */ + if (bus->glomd->len <= 0xFFFF) { + dlen = (u16)(bus->glomd->len); + if (!dlen || (dlen & 1)) { + inff_err("bad glomd len(%d), ignore descriptor\n", + dlen); + dlen = 0; + } + } else { + inff_err("overflowed glomd len(%d), ignore descriptor\n", + bus->glomd->len); + dlen = 0; + } + dptr = bus->glomd->data; + + for (totlen = num = 0; dlen; num++) { + /* Get (and move past) next length */ + sublen = get_unaligned_le16(dptr); + dlen -= sizeof(u16); + dptr += sizeof(u16); + if (sublen < SDPCM_HDRLEN || + (num == 0 && (sublen < (2 * SDPCM_HDRLEN)))) { + inff_err("descriptor len %d bad: %d\n", + num, sublen); + pnext = NULL; + break; + } + if (sublen % bus->sgentry_align) { + inff_err("sublen %d not multiple of %d\n", + sublen, bus->sgentry_align); + } + totlen += sublen; + + /* For last frame, adjust read len so total + * is a block multiple + */ + if (!dlen) { + sublen += + (roundup(totlen, bus->blocksize) - totlen); + totlen = roundup(totlen, bus->blocksize); + } + + /* Allocate/chain packet for next subframe */ + pnext = inff_pkt_buf_get_skb(sublen + bus->sgentry_align); + if (!pnext) { + inff_err("inff_pkt_buf_get_skb failed, num %d len %d\n", + num, sublen); + break; + } + skb_queue_tail(&bus->glom, pnext); + + /* Adhere to start alignment requirements */ + pkt_align(pnext, sublen, bus->sgentry_align); + } + + /* If all allocations succeeded, save packet chain + * in bus structure + */ + if (pnext) { + inff_dbg(GLOM, "allocated %d-byte packet chain for %d subframes\n", + totlen, num); + if (INFF_GLOM_ON() && bus->cur_read.len && + totlen != bus->cur_read.len) { + inff_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n", + bus->cur_read.len, totlen, rxseq); + } + pfirst = NULL; + pnext = NULL; + } else { + inff_sdio_free_glom(bus); + num = 0; + } + + /* Done with descriptor packet */ + inff_pkt_buf_free_skb(bus->glomd); + bus->glomd = NULL; + bus->cur_read.len = 0; + } + + /* Ok -- either we just generated a packet chain, + * or had one from before + */ + if (!skb_queue_empty(&bus->glom)) { + u32 len_glom = 0; + + if (INFF_GLOM_ON()) { + inff_dbg(GLOM, "try superframe read, packet chain:\n"); + skb_queue_walk(&bus->glom, pnext) { + inff_dbg(GLOM, " %p: %p len 0x%04x (%d)\n", + pnext, (u8 *)(pnext->data), + pnext->len, pnext->len); + } + } + + pfirst = skb_peek(&bus->glom); + len_glom = inff_sdio_glom_len(bus); + if (len_glom > 0xFFFF) { + inff_err("glom_len is %d bytes, overflowed\n", + len_glom); + goto frame_error_handle; + } else { + dlen = (u16)len_glom; + } + + /* Do an SDIO read for the superframe. Configurable iovar to + * read directly into the chained packet, or allocate a large + * packet and copy into the chain. + */ + sdio_claim_host(bus->sdiodev->func1); + errcode = inff_sdiod_recv_chain(bus->sdiodev, + &bus->glom, dlen); + sdio_release_host(bus->sdiodev->func1); + bus->sdcnt.f2rxdata++; + + /* On failure, kill the superframe */ + if (errcode < 0) { + inff_err("glom read of %d bytes failed: %d\n", + dlen, errcode); + goto frame_error_handle; + } + + inff_dbg_hex_dump(INFF_GLOM_ON(), + pfirst->data, min_t(int, pfirst->len, 48), + "SUPERFRAME:\n"); + + rd_new.seq_num = rxseq; + rd_new.len = dlen; + sdio_claim_host(bus->sdiodev->func1); + errcode = inff_sdio_hdparse(bus, pfirst->data, &rd_new, + INFF_SDIO_FT_SUPER); + sdio_release_host(bus->sdiodev->func1); + bus->cur_read.len = rd_new.len_nxtfrm << 4; + + /* Remove superframe header, remember offset */ + skb_pull(pfirst, rd_new.dat_offset); + num = 0; + + /* Validate all the subframe headers */ + skb_queue_walk(&bus->glom, pnext) { + /* leave when invalid subframe is found */ + if (errcode) + break; + + rd_new.len = pnext->len; + rd_new.seq_num = rxseq++; + sdio_claim_host(bus->sdiodev->func1); + errcode = inff_sdio_hdparse(bus, pnext->data, &rd_new, + INFF_SDIO_FT_SUB); + sdio_release_host(bus->sdiodev->func1); + inff_dbg_hex_dump(INFF_GLOM_ON(), + pnext->data, 32, "subframe:\n"); + + num++; + } + + /* Terminate frame on error */ + if (errcode) + goto frame_error_handle; + + /* Basic SD framing looks ok - process each packet (header) */ + + skb_queue_walk_safe(&bus->glom, pfirst, pnext) { + dptr = (u8 *)(pfirst->data); + sublen = get_unaligned_le16(dptr); + doff = inff_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]); + + inff_dbg_hex_dump(INFF_BYTES_ON() && INFF_DATA_ON(), + dptr, pfirst->len, + "Rx Subframe Data:\n"); + + __skb_trim(pfirst, sublen); + skb_pull(pfirst, doff); + + if (pfirst->len == 0) { + skb_unlink(pfirst, &bus->glom); + inff_pkt_buf_free_skb(pfirst); + continue; + } + + inff_dbg_hex_dump(INFF_GLOM_ON(), + pfirst->data, + min_t(int, pfirst->len, 32), + "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n", + bus->glom.qlen, pfirst, pfirst->data, + pfirst->len, pfirst->next, + pfirst->prev); + skb_unlink(pfirst, &bus->glom); + if (inff_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN])) { + inff_rx_event(bus->sdiodev->dev, pfirst); + skb_to_rxfq = NULL; + } else { + skb_to_rxfq = inff_rx_frame(bus->sdiodev->dev, pfirst, + false, false); + } + + if (settings && settings->sdio_rxf_in_kthread_enabled && skb_to_rxfq) { + if (!skb_head) + skb_head = skb_to_rxfq; + else + skb_prev->next = skb_to_rxfq; + + skb_prev = skb_to_rxfq; + } + bus->sdcnt.rxglompkts++; + } + + bus->sdcnt.rxglomframes++; + } + + if (settings && settings->sdio_rxf_in_kthread_enabled && skb_head) + inff_sched_rxf(bus, skb_head); + + return num; + +frame_error_handle: + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_rxfail(bus, true, false); + bus->sdcnt.rxglomfail++; + inff_sdio_free_glom(bus); + sdio_release_host(bus->sdiodev->func1); + bus->cur_read.len = 0; + + return 0; +} + +static int inff_sdio_dcmd_resp_wait(struct inff_sdio *bus, uint *condition, + bool *pending) +{ + DECLARE_WAITQUEUE(wait, current); + int timeout = DCMD_RESP_TIMEOUT; + + /* Wait until control frame is available */ + add_wait_queue(&bus->dcmd_resp_wait, &wait); + set_current_state(TASK_INTERRUPTIBLE); + + while (!(*condition) && (!signal_pending(current) && timeout)) + timeout = schedule_timeout(timeout); + + if (signal_pending(current)) + *pending = true; + + set_current_state(TASK_RUNNING); + remove_wait_queue(&bus->dcmd_resp_wait, &wait); + + return timeout; +} + +static int inff_sdio_dcmd_resp_wake(struct inff_sdio *bus) +{ + wake_up_interruptible(&bus->dcmd_resp_wait); + + return 0; +} + +static void +inff_sdio_read_control(struct inff_sdio *bus, u8 *hdr, uint len, uint doff) +{ + uint rdlen, pad; + u8 *buf = NULL, *rbuf; + int sdret; + + inff_dbg(SDIO, "Enter\n"); + if (bus->rxblen) + buf = vzalloc(bus->rxblen); + if (!buf) + goto done; + + rbuf = bus->rxbuf; + pad = ((unsigned long)rbuf % bus->head_align); + if (pad) + rbuf += (bus->head_align - pad); + + /* Copy the already-read portion over */ + memcpy(buf, hdr, INFF_FIRSTREAD); + if (len <= INFF_FIRSTREAD) + goto gotpkt; + + /* Raise rdlen to next SDIO block to avoid tail command */ + rdlen = len - INFF_FIRSTREAD; + if (bus->roundup && bus->blocksize && rdlen > bus->blocksize) { + pad = bus->blocksize - (rdlen % bus->blocksize); + if (pad <= bus->roundup && pad < bus->blocksize && + ((len + pad) < bus->sdiodev->bus_if->maxctl)) + rdlen += pad; + } else if (rdlen % bus->head_align) { + rdlen += bus->head_align - (rdlen % bus->head_align); + } + + /* Drop if the read is too big or it exceeds our maximum */ + if ((rdlen + INFF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) { + inff_err("%d-byte control read exceeds %d-byte buffer\n", + rdlen, bus->sdiodev->bus_if->maxctl); + inff_sdio_rxfail(bus, false, false); + goto done; + } + + if ((len - doff) > bus->sdiodev->bus_if->maxctl) { + inff_err("%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", + len, len - doff, bus->sdiodev->bus_if->maxctl); + bus->sdcnt.rx_toolong++; + inff_sdio_rxfail(bus, false, false); + goto done; + } + + /* Read remain of frame body */ + sdret = inff_sdiod_recv_buf(bus->sdiodev, SDIO_FUNC_2, rbuf, rdlen); + bus->sdcnt.f2rxdata++; + + /* Control frame failures need retransmission */ + if (sdret < 0) { + inff_err("read %d control bytes failed: %d\n", + rdlen, sdret); + bus->sdcnt.rxc_errors++; + inff_sdio_rxfail(bus, true, true); + goto done; + } else { + memcpy(buf + INFF_FIRSTREAD, rbuf, rdlen); + } + +gotpkt: + + inff_dbg_hex_dump(INFF_BYTES_ON() && INFF_CTL_ON(), + buf, len, "RxCtrl:\n"); + + /* Point to valid data and indicate its length */ + spin_lock_bh(&bus->rxctl_lock); + if (bus->rxctl) { + inff_err("last control frame is being processed.\n"); + spin_unlock_bh(&bus->rxctl_lock); + vfree(buf); + goto done; + } + bus->rxctl = buf + doff; + bus->rxctl_orig = buf; + bus->rxlen = len - doff; + spin_unlock_bh(&bus->rxctl_lock); + +done: + /* Awake any waiters */ + inff_sdio_dcmd_resp_wake(bus); +} + +/* Pad read to blocksize for efficiency */ +static void inff_sdio_pad(struct inff_sdio *bus, u16 *pad, u16 *rdlen) +{ + if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) { + *pad = bus->blocksize - (*rdlen % bus->blocksize); + if (*pad <= bus->roundup && *pad < bus->blocksize && + *rdlen + *pad + INFF_FIRSTREAD < MAX_RX_DATASZ) + *rdlen += *pad; + } else if (*rdlen % bus->head_align) { + *rdlen += bus->head_align - (*rdlen % bus->head_align); + } +} + +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO +static bool inff_sdio_rx_pkt_is_avail(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + u32 newstatus = 0; + u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); + int err = 0; + bool ret = true; + + if (!inff_btsdio_is_active(bus->sdiodev->bus_if)) + return true; + + /* read interrupt to get fifo status*/ + newstatus = inff_sdiod_readl(sdiod, intstat_addr, &err); + if (err < 0) { + inff_err("read int status failed %d\n", err); + ret = false; + } else if (!(newstatus & I_XMTDATA_AVAIL)) { + /* no more frames */ + inff_dbg(DATA, "no more frames, int status: 0x%08x\n", newstatus); + ret = false; + } + + return ret; +} +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + +static uint inff_sdio_readframes(struct inff_sdio *bus, uint maxframes) +{ + struct sk_buff *pkt; /* Packet for event or data frames */ + u16 pad; /* Number of pad bytes to read */ + uint rxleft = 0; /* Remaining number of frames allowed */ + int ret; /* Return code from calls */ + uint rxcount = 0; /* Total frames read */ + struct inff_sdio_hdrinfo *rd = &bus->cur_read, rd_new; + u8 head_read = 0; + struct sk_buff *skb_to_rxfq = NULL, *skb_head = NULL, *skb_prev = NULL; + struct inff_mp_device *settings = bus->sdiodev->settings; + + inff_dbg(SDIO, "Enter\n"); + + /* Not finished unless we encounter no more frames indication */ + bus->rxpending = true; + + for (rd->seq_num = bus->rx_seq, rxleft = maxframes; + !bus->rxskip && rxleft && bus->sdiodev->state == INFF_SDIOD_DATA; + rd->seq_num++, rxleft--) { + /* Handle glomming separately */ + if (bus->glomd || !skb_queue_empty(&bus->glom)) { + u8 cnt; + + inff_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n", + bus->glomd, skb_peek(&bus->glom)); + cnt = inff_sdio_rxglom(bus, rd->seq_num); + inff_dbg(GLOM, "rxglom returned %d\n", cnt); + rd->seq_num += cnt - 1; + rxleft = (rxleft > cnt) ? (rxleft - cnt) : 1; + continue; + } + + sdio_claim_host(bus->sdiodev->func1); + +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + /* make sure rx pkt check and rece buf in the same critical section */ + if (!inff_sdio_rx_pkt_is_avail(bus)) { + bus->rxpending = false; + sdio_release_host(bus->sdiodev->func1); + break; + } +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + + rd->len_left = rd->len; + /* read header first for unknown frame length */ + if (!rd->len) { + ret = inff_sdiod_recv_buf(bus->sdiodev, SDIO_FUNC_2, + bus->rxhdr, INFF_FIRSTREAD); + bus->sdcnt.f2rxhdrs++; + if (ret < 0) { + inff_err("RXHEADER FAILED: %d\n", + ret); + bus->sdcnt.rx_hdrfail++; + inff_sdio_rxfail(bus, true, true); + sdio_release_host(bus->sdiodev->func1); + continue; + } + + inff_dbg_hex_dump(INFF_BYTES_ON() || INFF_HDRS_ON(), + bus->rxhdr, SDPCM_HDRLEN, + "RxHdr:\n"); + + if (inff_sdio_hdparse(bus, bus->rxhdr, rd, + INFF_SDIO_FT_NORMAL)) { + sdio_release_host(bus->sdiodev->func1); + if (!bus->rxpending) + break; + + continue; + } + + if (rd->channel == SDPCM_CONTROL_CHANNEL) { + inff_sdio_read_control(bus, bus->rxhdr, + rd->len, + rd->dat_offset); + /* prepare the descriptor for the next read */ + rd->len = rd->len_nxtfrm << 4; + rd->len_nxtfrm = 0; + /* treat all packet as event if we don't know */ + rd->channel = SDPCM_EVENT_CHANNEL; + sdio_release_host(bus->sdiodev->func1); + continue; + } + rd->len_left = rd->len > INFF_FIRSTREAD ? + rd->len - INFF_FIRSTREAD : 0; + head_read = INFF_FIRSTREAD; + } + + inff_sdio_pad(bus, &pad, &rd->len_left); + + pkt = inff_pkt_buf_get_skb(rd->len_left + head_read + + bus->head_align); + if (!pkt) { + /* Give up on data, request rtx of events */ + inff_err("inff_pkt_buf_get_skb failed\n"); + inff_sdio_rxfail(bus, false, + RETRYCHAN(rd->channel)); + sdio_release_host(bus->sdiodev->func1); + continue; + } + skb_pull(pkt, head_read); + pkt_align(pkt, rd->len_left, bus->head_align); + + ret = inff_sdiod_recv_pkt(bus->sdiodev, SDIO_FUNC_2, pkt); + bus->sdcnt.f2rxdata++; + sdio_release_host(bus->sdiodev->func1); + + if (ret < 0) { + inff_err("read %d bytes from channel %d failed: %d\n", + rd->len, rd->channel, ret); + inff_pkt_buf_free_skb(pkt); + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_rxfail(bus, true, + RETRYCHAN(rd->channel)); + sdio_release_host(bus->sdiodev->func1); + continue; + } + + if (head_read) { + skb_push(pkt, head_read); + memcpy(pkt->data, bus->rxhdr, head_read); + head_read = 0; + } else { + memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN); + rd_new.seq_num = rd->seq_num; + sdio_claim_host(bus->sdiodev->func1); + if (inff_sdio_hdparse(bus, bus->rxhdr, &rd_new, + INFF_SDIO_FT_NORMAL)) { + rd->len = 0; + inff_sdio_rxfail(bus, true, true); + sdio_release_host(bus->sdiodev->func1); + inff_pkt_buf_free_skb(pkt); + continue; + } + bus->sdcnt.rx_readahead_cnt++; + if (rd->len != roundup(rd_new.len, 16)) { + inff_err("frame length mismatch:read %d, should be %d\n", + rd->len, + roundup(rd_new.len, 16) >> 4); + rd->len = 0; + inff_sdio_rxfail(bus, true, true); + sdio_release_host(bus->sdiodev->func1); + inff_pkt_buf_free_skb(pkt); + continue; + } + sdio_release_host(bus->sdiodev->func1); + rd->len_nxtfrm = rd_new.len_nxtfrm; + rd->channel = rd_new.channel; + rd->dat_offset = rd_new.dat_offset; + + inff_dbg_hex_dump(!(INFF_BYTES_ON() && + INFF_DATA_ON()) && + INFF_HDRS_ON(), + bus->rxhdr, SDPCM_HDRLEN, + "RxHdr:\n"); + + if (rd_new.channel == SDPCM_CONTROL_CHANNEL) { + inff_err("readahead on control packet %d?\n", + rd_new.seq_num); + /* Force retry w/normal header read */ + rd->len = 0; + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_rxfail(bus, false, true); + sdio_release_host(bus->sdiodev->func1); + inff_pkt_buf_free_skb(pkt); + continue; + } + } + + inff_dbg_hex_dump(INFF_BYTES_ON() && INFF_DATA_ON(), + pkt->data, rd->len, "Rx Data:\n"); + + /* Save superframe descriptor and allocate packet frame */ + if (rd->channel == SDPCM_GLOM_CHANNEL) { + if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) { + inff_dbg(GLOM, "glom descriptor, %d bytes:\n", + rd->len); + inff_dbg_hex_dump(INFF_GLOM_ON(), + pkt->data, rd->len, + "Glom Data:\n"); + __skb_trim(pkt, rd->len); + skb_pull(pkt, SDPCM_HDRLEN); + bus->glomd = pkt; + } else { + inff_err("%s: glom superframe w/o descriptor!\n", __func__); + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_rxfail(bus, false, false); + sdio_release_host(bus->sdiodev->func1); + } + /* prepare the descriptor for the next read */ + rd->len = rd->len_nxtfrm << 4; + rd->len_nxtfrm = 0; + /* treat all packet as event if we don't know */ + rd->channel = SDPCM_EVENT_CHANNEL; + continue; + } + + /* Fill in packet len and prio, deliver upward */ + __skb_trim(pkt, rd->len); + skb_pull(pkt, rd->dat_offset); + + if (pkt->len == 0) { + inff_pkt_buf_free_skb(pkt); + skb_to_rxfq = NULL; + } else if (rd->channel == SDPCM_EVENT_CHANNEL) { + inff_rx_event(bus->sdiodev->dev, pkt); + skb_to_rxfq = NULL; + } else { + skb_to_rxfq = inff_rx_frame(bus->sdiodev->dev, pkt, + false, false); + } + + if (settings && settings->sdio_rxf_in_kthread_enabled && skb_to_rxfq) { + if (!skb_head) + skb_head = skb_to_rxfq; + else + skb_prev->next = skb_to_rxfq; + + skb_prev = skb_to_rxfq; + } + + /* prepare the descriptor for the next read */ + rd->len = rd->len_nxtfrm << 4; + rd->len_nxtfrm = 0; + /* treat all packet as event if we don't know */ + rd->channel = SDPCM_EVENT_CHANNEL; + } + + if (settings && settings->sdio_rxf_in_kthread_enabled && skb_head) + inff_sched_rxf(bus, skb_head); + + rxcount = maxframes - rxleft; + /* Message if we hit the limit */ + if (!rxleft) + inff_dbg(DATA, "hit rx limit of %d frames\n", maxframes); + else + inff_dbg(DATA, "processed %d frames\n", rxcount); + /* Back off rxseq if awaiting rtx, update rx_seq */ + if (bus->rxskip) + rd->seq_num--; + bus->rx_seq = rd->seq_num; + + return rxcount; +} + +static void +inff_sdio_wait_event_wakeup(struct inff_sdio *bus) +{ + wake_up(&bus->ctrl_wait); +} + +static int inff_sdio_txpkt_hdalign(struct inff_sdio *bus, struct sk_buff *pkt) +{ + struct inff_bus_stats *stats; + u16 head_pad; + u8 *dat_buf; + + dat_buf = (u8 *)(pkt->data); + + /* Check head padding */ + head_pad = ((unsigned long)dat_buf % bus->head_align); + if (head_pad) { + if (skb_headroom(pkt) < head_pad) { + stats = &bus->sdiodev->bus_if->stats; + atomic_inc(&stats->pktcowed); + if (skb_cow_head(pkt, head_pad)) { + atomic_inc(&stats->pktcow_failed); + return -ENOMEM; + } + head_pad = 0; + } + skb_push(pkt, head_pad); + dat_buf = (u8 *)(pkt->data); + } + memset(dat_buf, 0, head_pad + bus->tx_hdrlen); + return head_pad; +} + +/* + * struct inff_skbuff_cb reserves first two bytes in sk_buff::cb for + * bus layer usage. + */ +/* flag marking a dummy skb added for DMA alignment requirement */ +#define ALIGN_SKB_FLAG 0x8000 +/* bit mask of data length chopped from the previous packet */ +#define ALIGN_SKB_CHOP_LEN_MASK 0x7fff + +static int inff_sdio_txpkt_prep_sg(struct inff_sdio *bus, + struct sk_buff_head *pktq, + struct sk_buff *pkt, u16 total_len) +{ + struct inff_sdio_dev *sdiodev; + struct sk_buff *pkt_pad; + u16 tail_pad, tail_chop, chain_pad; + unsigned int blksize; + bool lastfrm; + int ntail, ret; + + sdiodev = bus->sdiodev; + blksize = sdiodev->func2->cur_blksize; + /* sg entry alignment should be a divisor of block size */ + WARN_ON(blksize % bus->sgentry_align); + + /* Check tail padding */ + lastfrm = skb_queue_is_last(pktq, pkt); + tail_pad = 0; + tail_chop = pkt->len % bus->sgentry_align; + if (tail_chop) + tail_pad = bus->sgentry_align - tail_chop; + chain_pad = (total_len + tail_pad) % blksize; + if (lastfrm && chain_pad) + tail_pad += blksize - chain_pad; + if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { + pkt_pad = inff_pkt_buf_get_skb(tail_pad + tail_chop + + bus->head_align); + if (!pkt_pad) + return -ENOMEM; + ret = inff_sdio_txpkt_hdalign(bus, pkt_pad); + if (unlikely(ret < 0)) { + kfree_skb(pkt_pad); + return ret; + } + memcpy(pkt_pad->data, + pkt->data + pkt->len - tail_chop, + tail_chop); + *(u16 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; + skb_trim(pkt, pkt->len - tail_chop); + skb_trim(pkt_pad, tail_pad + tail_chop); + __skb_queue_after(pktq, pkt, pkt_pad); + } else { + ntail = pkt->data_len + tail_pad - + (pkt->end - pkt->tail); + if (skb_cloned(pkt) || ntail > 0) + if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC)) + return -ENOMEM; + if (skb_linearize(pkt)) + return -ENOMEM; + __skb_put(pkt, tail_pad); + } + + return tail_pad; +} + +/** + * inff_sdio_txpkt_prep - packet preparation for transmit + * @bus: inff_sdio structure pointer + * @pktq: packet list pointer + * @chan: virtual channel to transmit the packet + * + * Processes to be applied to the packet + * - Align data buffer pointer + * - Align data buffer length + * - Prepare header + * Return: negative value if there is error + */ +static int +inff_sdio_txpkt_prep(struct inff_sdio *bus, struct sk_buff_head *pktq, + uint chan) +{ + u16 head_pad, total_len; + struct sk_buff *pkt_next; + u8 txseq; + int ret; + struct inff_sdio_hdrinfo hd_info = {0}; + + txseq = bus->tx_seq; + total_len = 0; + skb_queue_walk(pktq, pkt_next) { + /* alignment packet inserted in previous + * loop cycle can be skipped as it is + * already properly aligned and does not + * need an sdpcm header. + */ + if (*(u16 *)pkt_next->cb & ALIGN_SKB_FLAG) + continue; + + /* align packet data pointer */ + ret = inff_sdio_txpkt_hdalign(bus, pkt_next); + if (ret < 0) + return ret; + head_pad = (u16)ret; + if (head_pad) + memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); + + total_len += pkt_next->len; + + hd_info.len = pkt_next->len; + hd_info.lastfrm = skb_queue_is_last(pktq, pkt_next); + if (bus->txglom && pktq->qlen > 1) { + ret = inff_sdio_txpkt_prep_sg(bus, pktq, + pkt_next, total_len); + if (ret < 0) + return ret; + hd_info.tail_pad = (u16)ret; + total_len += (u16)ret; + } + + hd_info.channel = chan; + hd_info.dat_offset = head_pad + bus->tx_hdrlen; + hd_info.seq_num = txseq++; + + /* Now fill the header */ + inff_sdio_hdpack(bus, pkt_next->data, &hd_info); + + if (INFF_BYTES_ON() && + ((INFF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) || + (INFF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL))) + inff_dbg_hex_dump(true, pkt_next->data, hd_info.len, + "Tx Frame:\n"); + else if (INFF_HDRS_ON()) + inff_dbg_hex_dump(true, pkt_next->data, + head_pad + bus->tx_hdrlen, + "Tx Header:\n"); + } + /* Hardware length tag of the first packet should be total + * length of the chain (including padding) + */ + if (bus->txglom) + inff_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len); + return 0; +} + +/** + * inff_sdio_txpkt_postp - packet post processing for transmit + * @bus: inff_sdio structure pointer + * @pktq: packet list pointer + * + * Processes to be applied to the packet + * - Remove head padding + * - Remove tail padding + */ +static void +inff_sdio_txpkt_postp(struct inff_sdio *bus, struct sk_buff_head *pktq) +{ + u8 *hdr; + u32 dat_offset; + u16 tail_pad; + u16 dummy_flags, chop_len; + struct sk_buff *pkt_next, *tmp, *pkt_prev; + + skb_queue_walk_safe(pktq, pkt_next, tmp) { + dummy_flags = *(u16 *)(pkt_next->cb); + if (dummy_flags & ALIGN_SKB_FLAG) { + chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK; + if (chop_len) { + pkt_prev = pkt_next->prev; + skb_put(pkt_prev, chop_len); + } + __skb_unlink(pkt_next, pktq); + inff_pkt_buf_free_skb(pkt_next); + } else { + hdr = pkt_next->data + bus->tx_hdrlen - SDPCM_SWHDR_LEN; + dat_offset = le32_to_cpu(*(__le32 *)hdr); + dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >> + SDPCM_DOFFSET_SHIFT; + skb_pull(pkt_next, dat_offset); + if (bus->txglom) { + tail_pad = le16_to_cpu(*(__le16 *)(hdr - 2)); + skb_trim(pkt_next, pkt_next->len - tail_pad); + } + } + } +} + +/* Writes a HW/SW header into the packet and sends it. */ +/* Assumes: (a) header space already there, (b) caller holds lock */ +static int inff_sdio_txpkt(struct inff_sdio *bus, struct sk_buff_head *pktq, + uint chan) +{ + int ret; + struct sk_buff *pkt_next, *tmp; + + inff_dbg(TRACE, "Enter\n"); + + ret = inff_sdio_txpkt_prep(bus, pktq, chan); + if (ret) + goto done; + + sdio_claim_host(bus->sdiodev->func1); + ret = inff_sdiod_send_pkt(bus->sdiodev, pktq); + bus->sdcnt.f2txdata++; + + if (ret < 0) + inff_sdio_txfail(bus); + + sdio_release_host(bus->sdiodev->func1); + +done: + inff_sdio_txpkt_postp(bus, pktq); + if (ret == 0) + bus->tx_seq = (bus->tx_seq + pktq->qlen) % SDPCM_SEQ_WRAP; + skb_queue_walk_safe(pktq, pkt_next, tmp) { + __skb_unlink(pkt_next, pktq); + inff_proto_bcdc_txcomplete(bus->sdiodev->dev, pkt_next, + ret == 0); + } + return ret; +} + +static uint inff_sdio_sendfromq(struct inff_sdio *bus, uint maxframes) +{ + struct sk_buff *pkt; + struct sk_buff_head pktq; + int ret = 0, prec_out, i; + uint cnt = 0; + u8 tx_prec_map, pkt_num, que_cnt; + + inff_dbg(TRACE, "Enter\n"); + + tx_prec_map = ~bus->flowcontrol; + + /* Send frames until the limit or some other event */ + for (cnt = 0; (cnt < maxframes) && data_ok(bus);) { + pkt_num = 1; + que_cnt = bus->tx_max - bus->tx_seq; + if (bus->txglom) { + pkt_num = min_t(u8, que_cnt - TXCTL_CREDITS, + bus->sdiodev->txglomsz); + } + + pkt_num = min_t(u32, pkt_num, + inff_pktq_mlen(&bus->txq, ~bus->flowcontrol)); + __skb_queue_head_init(&pktq); + spin_lock_bh(&bus->txq_lock); + for (i = 0; i < pkt_num; i++) { + pkt = inff_pktq_mdeq(&bus->txq, tx_prec_map, + &prec_out); + if (!pkt) + break; + __skb_queue_tail(&pktq, pkt); + } + spin_unlock_bh(&bus->txq_lock); + if (i == 0) + break; + + ret = inff_sdio_txpkt(bus, &pktq, SDPCM_DATA_CHANNEL); + + cnt += i; + } + + /* Deflow-control stack if needed */ + if (bus->sdiodev->state == INFF_SDIOD_DATA && + bus->txoff && (pktq_len(&bus->txq) < TXLOW)) { + bus->txoff = false; + inff_proto_bcdc_txflowblock(bus->sdiodev->dev, false); + } + + return cnt; +} + +static int inff_sdio_tx_ctrlframe(struct inff_sdio *bus, u8 *frame, u16 len) +{ + u8 doff; + u16 pad; + uint retries = 0; + struct inff_sdio_hdrinfo hd_info = {0}; + int ret; + + inff_dbg(SDIO, "Enter\n"); + + /* Back the pointer to make room for bus header */ + frame -= bus->tx_hdrlen; + len += bus->tx_hdrlen; + + /* Add alignment padding (optional for ctl frames) */ + doff = ((unsigned long)frame % bus->head_align); + if (doff) { + frame -= doff; + len += doff; + memset(frame + bus->tx_hdrlen, 0, doff); + } + + /* Round send length to next SDIO block */ + pad = 0; + if (bus->roundup && bus->blocksize && len > bus->blocksize) { + pad = bus->blocksize - (len % bus->blocksize); + if (pad > bus->roundup || pad >= bus->blocksize) + pad = 0; + } else if (len % bus->head_align) { + pad = bus->head_align - (len % bus->head_align); + } + len += pad; + + hd_info.len = len - pad; + hd_info.channel = SDPCM_CONTROL_CHANNEL; + hd_info.dat_offset = doff + bus->tx_hdrlen; + hd_info.seq_num = bus->tx_seq; + hd_info.lastfrm = true; + hd_info.tail_pad = pad; + inff_sdio_hdpack(bus, frame, &hd_info); + + if (bus->txglom) + inff_sdio_update_hwhdr(frame, len); + + inff_dbg_hex_dump(INFF_BYTES_ON() && INFF_CTL_ON(), + frame, len, "Tx Frame:\n"); + inff_dbg_hex_dump(!(INFF_BYTES_ON() && INFF_CTL_ON()) && + INFF_HDRS_ON(), + frame, min_t(u16, len, 16), "TxHdr:\n"); + + do { + ret = inff_sdiod_send_buf(bus->sdiodev, SDIO_FUNC_2, frame, len); + + if (ret < 0) + inff_sdio_txfail(bus); + else + bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP; + } while (ret < 0 && retries++ < TXRETRIES); + + return ret; +} + +static bool inff_chip_is_ulp(struct inff_chip *ci) +{ + if (ci->chip == INF_CC_43022_CHIP_ID) + return true; + else + return false; +} + +static bool inff_sdio_use_ht_avail(struct inff_chip *ci) +{ + if (ci->chip == INF_CC_5551X_CHIP_ID || + ci->chip == INF_CC_5557X_CHIP_ID) + return true; + else + return false; +} + +static void inff_sdio_bus_stop(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + struct inff_core *core = bus->sdio_core; + u32 local_hostintmask; + u8 saveclk, bpreq; + int err; + + inff_dbg(TRACE, "Enter\n"); + + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + + if (bus->thr_rxf_ctl.p_task) { + send_sig(SIGTERM, bus->thr_rxf_ctl.p_task, 1); + kthread_stop(bus->thr_rxf_ctl.p_task); + bus->thr_rxf_ctl.p_task = NULL; + } + + if (sdiodev->state != INFF_SDIOD_NOMEDIUM) { + sdio_claim_host(sdiodev->func1); + + /* Enable clock for device interrupts */ + inff_sdio_bus_sleep(bus, false, false); + + /* Disable and clear interrupts at the chip level also */ + inff_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask), + 0, NULL); + + local_hostintmask = bus->hostintmask; + bus->hostintmask = 0; + + /* Force backplane clocks to assure F2 interrupt propagates */ + saveclk = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + &err); + if (!err) { + bpreq = saveclk; + bpreq |= (inff_sdio_use_ht_avail(bus->ci) || + inff_chip_is_ulp(bus->ci)) ? + SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; + inff_sdiod_writeb(sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + bpreq, &err); + } + if (err) + inff_err("Failed to force clock for F2: err %d\n", + err); + + /* Turn off the bus (F2), free any pending packets */ + inff_dbg(INTR, "disable SDIO interrupts\n"); + sdio_disable_func(sdiodev->func2); + + /* Clear any pending interrupts now that F2 is disabled */ + inff_sdiod_writel(sdiodev, core->base + SD_REG(intstatus), + local_hostintmask, NULL); + + sdio_release_host(sdiodev->func1); + } + /* Clear the data packet queues */ + inff_pktq_flush(&bus->txq, true, NULL, NULL); + + /* Clear any held glomming stuff */ + inff_pkt_buf_free_skb(bus->glomd); + inff_sdio_free_glom(bus); + + /* Clear rx control and wake any waiters */ + spin_lock_bh(&bus->rxctl_lock); + bus->rxlen = 0; + spin_unlock_bh(&bus->rxctl_lock); + inff_sdio_dcmd_resp_wake(bus); + + /* Reset some F2 state stuff */ + bus->rxskip = false; + bus->tx_seq = 0; + bus->rx_seq = 0; +} + +static inline void inff_sdio_clrintr(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiodev; + unsigned long flags; + + sdiodev = bus->sdiodev; + if (sdiodev->oob_irq_requested) { + spin_lock_irqsave(&sdiodev->irq_en_lock, flags); + if (!sdiodev->irq_en && !atomic_read(&bus->ipend)) { + enable_irq(sdiodev->settings->bus.sdio.oob_irq_nr); + sdiodev->irq_en = true; + } + spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); + } +} + +static int inff_sdio_intr_rstatus(struct inff_sdio *bus) +{ + struct inff_core *core = bus->sdio_core; + u32 addr; + unsigned long val; + int ret; + + addr = core->base + SD_REG(intstatus); + + val = inff_sdiod_readl(bus->sdiodev, addr, &ret); + bus->sdcnt.f1regdata++; + if (ret != 0) + return ret; + + val &= bus->hostintmask; + atomic_set(&bus->fcstate, !!(val & I_HMB_FC_STATE)); + + /* Clear interrupts */ + if (val) { + inff_sdiod_writel(bus->sdiodev, addr, val, &ret); + bus->sdcnt.f1regdata++; + atomic_or(val, &bus->intstatus); + } + + return ret; +} + +/* This Function is used to retrieve important + * details from dongle related to ULP mode Mostly + * values/SHM details that will be vary depending + * on the firmware branches + */ +static void +inff_sdio_ulp_preinit(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_if *ifp = bus_if->drvr->iflist[0]; + + inff_dbg(ULP, "Enter\n"); + + /* Query ulp_sdioctrl iovar to get the ULP related SHM offsets */ + inff_fil_iovar_data_get(ifp, "ulp_sdioctrl", + &sdiodev->fmac_ulp.ulp_shm_offset, + sizeof(sdiodev->fmac_ulp.ulp_shm_offset)); + + sdiodev->ulp = false; + + inff_dbg(ULP, "m_ulp_ctrl_sdio[%x] m_ulp_wakeevt_ind [%x]\n", + M_DS1_CTRL_SDIO(sdiodev->fmac_ulp), + M_WAKEEVENT_IND(sdiodev->fmac_ulp)); + inff_dbg(ULP, "m_ulp_wakeind [%x]\n", + M_ULP_WAKE_IND(sdiodev->fmac_ulp)); +} + +/* Reinitialize ARM because In DS1 mode ARM got off */ +static int +inff_sdio_ulp_reinit_fw(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiodev = bus->sdiodev; + struct inff_fw_request *fwreq; + int err = 0; + + /* After firmware redownload tx/rx seq are reset accordingly + * these values are reset on FMAC side tx_max is initially set to 4, + * which later is updated by FW. + */ + bus->tx_seq = 0; + bus->rx_seq = 0; + bus->tx_max = 4; + + fwreq = inff_prepare_fw_request(sdiodev->settings->firmware_path, + bus->ci, inff_sdio_fwnames, + ARRAY_SIZE(inff_sdio_fwnames), + bus->sdiodev->settings->board_type); + if (!fwreq) + return -ENOMEM; + + /* stop the watch dog -> idle time until reinit is done */ + inff_sdio_wd_timer(bus, false); + + err = inff_fw_get_firmwares(sdiodev->dev, fwreq, + inff_sdio_firmware_callback); + if (err != 0) { + inff_err("async firmware request failed: %d\n", err); + kfree(fwreq); + } + + return err; +} + +/* Check if device is in DS1 mode and handshake with ULP UCODE */ +static bool +inff_sdio_ulp_pre_redownload_check(struct inff_sdio *bus, u32 hmb_data) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + int err = 0; + u32 value = 0; + u32 val32, ulp_wake_ind, wowl_wake_ind; + int reg_addr; + unsigned long timeout; + struct inff_ulp *fmac_ulp = &bus->sdiodev->fmac_ulp; + int i = 0; + + /* If any host mail box data is present, ignore DS1 exit sequence */ + if (hmb_data) + return false; + /* Skip if DS1 Exit is already in progress + * This can happen if firmware download is taking more time + */ + if (fmac_ulp->ulp_state == FMAC_ULP_TRIGGERED) + return false; + + value = inff_sdiod_func0_rb(sdiod, SDIO_CCCR_IOEx, &err); + + if (value != SDIO_FUNC_ENABLE_1) + return false; + + inff_dbg(ULP, "GOT THE INTERRUPT FROM UCODE\n"); + sdiod->ulp = true; + fmac_ulp->ulp_state = FMAC_ULP_TRIGGERED; + + /* D11 SHM and PMU can not be accessed from host in case of 43022. + * so, this logic may have to be moved to DS2 FW. Until then, skip it + * for DS2. + */ + if (bus->ci->chip != INF_CC_43022_CHIP_ID) { + ulp_wake_ind = D11SHM_RDW(sdiod, + M_ULP_WAKE_IND(sdiod->fmac_ulp), + &err); + wowl_wake_ind = D11SHM_RDW(sdiod, + M_WAKEEVENT_IND(sdiod->fmac_ulp), + &err); + + inff_dbg(ULP, "wowl_wake_ind: 0x%08x, ulp_wake_ind: 0x%08x state %s\n", + wowl_wake_ind, ulp_wake_ind, (fmac_ulp->ulp_state) ? + "DS1 Exit Triggered" : "IDLE State"); + + if (wowl_wake_ind || ulp_wake_ind) { + /* RX wake Don't do anything. + * Just bail out and re-download firmware. + */ + /* Print out PHY TX error block when bit 9 set */ + if ((ulp_wake_ind & C_DS1_PHY_TXERR) && + M_DS1_PHYTX_ERR_BLK(sdiod->fmac_ulp)) { + inff_err("Dump PHY TX Error SHM Locations\n"); + for (i = 0; i < PHYTX_ERR_BLK_SIZE; i++) { + u32 tx_err; + + tx_err = D11SHM_RDW(sdiod, + (M_DS1_PHYTX_ERR_BLK(sdiod->fmac_ulp) + + (i * 2)), &err); + pr_err("0x%x", tx_err); + } + inff_err("\n"); + } + } else { + /* TX wake negotiate with MAC */ + inff_dbg(ULP, "M_DS1_CTRL_SDIO: 0x%08x\n", + (u32)D11SHM_RDW(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + &err)); + val32 = D11SHM_RD(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + &err); + D11SHM_WR(sdiod, M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + val32, (C_DS1_CTRL_SDIO_DS1_EXIT | + C_DS1_CTRL_REQ_VALID), &err); + val32 = D11REG_RD(sdiod, D11_MACCONTROL_REG, &err); + val32 = val32 | D11_MACCONTROL_REG_WAKE; + D11REG_WR(sdiod, D11_MACCONTROL_REG, val32, &err); + + /* Poll for PROC_DONE to be set by ucode */ + value = D11SHM_RDW(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + &err); + /* Wait here (polling) for C_DS1_CTRL_PROC_DONE */ + timeout = jiffies + ULP_HUDI_PROC_DONE_TIME; + while (!(value & C_DS1_CTRL_PROC_DONE)) { + value = D11SHM_RDW(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + &err); + if (time_after(jiffies, timeout)) + break; + usleep_range(1000, 2000); + } + inff_dbg(ULP, "M_DS1_CTRL_SDIO: 0x%08x\n", + (u32)D11SHM_RDW(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), &err)); + value = D11SHM_RDW(sdiod, + M_DS1_CTRL_SDIO(sdiod->fmac_ulp), + &err); + if (!(value & C_DS1_CTRL_PROC_DONE)) { + inff_err("Timeout Failed to enter DS1 Exit state!\n"); + return false; + } + } + ulp_wake_ind = D11SHM_RDW(sdiod, + M_ULP_WAKE_IND(sdiod->fmac_ulp), + &err); + wowl_wake_ind = D11SHM_RDW(sdiod, + M_WAKEEVENT_IND(sdiod->fmac_ulp), + &err); + inff_dbg(ULP, "wowl_wake_ind: 0x%08x, ulp_wake_ind: 0x%08x\n", + wowl_wake_ind, ulp_wake_ind); + + /* skip setting min resource mask for secure chip */ + reg_addr = CORE_CC_REG(inff_chip_get_pmu(bus->ci)->base, + min_res_mask); + inff_sdiod_writel(sdiod, reg_addr, + DEFAULT_43022_MIN_RES_MASK, &err); + if (err) + inff_err("min_res_mask failed\n"); + } + return true; +} + +static void inff_sdio_dpc(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + u32 newstatus = 0; + u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus); + unsigned long intstatus; + uint txlimit = bus->txbound; /* Tx frames to send before resched */ + uint framecnt; /* Temporary counter of tx/rx frames */ + int err = 0; + + inff_dbg(SDIO, "Enter\n"); + + sdio_claim_host(bus->sdiodev->func1); + + /* If waiting for HTAVAIL, check status */ + if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) { + u8 clkctl, devctl = 0; + +#ifdef DEBUG + /* Check for inconsistent device control */ + devctl = inff_sdiod_readb(bus->sdiodev, SBSDIO_DEVICE_CTL, + &err); +#endif /* DEBUG */ + + /* Read CSR, if clock on switch to AVAIL, else ignore */ + clkctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, &err); + + inff_dbg(SDIO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n", + devctl, clkctl); + + if (SBSDIO_HTAV(clkctl)) { + devctl = inff_sdiod_readb(bus->sdiodev, + SBSDIO_DEVICE_CTL, &err); + devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY; + inff_sdiod_writeb(bus->sdiodev, + SBSDIO_DEVICE_CTL, devctl, &err); + bus->clkstate = CLK_AVAIL; + } + } + + /* Make sure backplane clock is on */ + inff_sdio_bus_sleep(bus, false, true); + + /* Pending interrupt indicates new device status */ + if (atomic_read(&bus->ipend) > 0) { + atomic_set(&bus->ipend, 0); + err = inff_sdio_intr_rstatus(bus); + } + + /* Start with leftover status bits */ + intstatus = atomic_xchg(&bus->intstatus, 0); + + /* Handle flow-control change: read new state in case our ack + * crossed another change interrupt. If change still set, assume + * FC ON for safety, let next loop through do the debounce. + */ + if (intstatus & I_HMB_FC_CHANGE) { + intstatus &= ~I_HMB_FC_CHANGE; + inff_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err); + + newstatus = inff_sdiod_readl(sdiod, intstat_addr, &err); + + bus->sdcnt.f1regdata += 2; + atomic_set(&bus->fcstate, + !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE))); + intstatus |= (newstatus & bus->hostintmask); + } + + /* Handle host mailbox indication */ + if (intstatus & I_HMB_HOST_INT) { + u32 hmb_data = 0; + + intstatus &= ~I_HMB_HOST_INT; + intstatus |= inff_sdio_hostmail(bus, &hmb_data); + if (inff_sdio_ulp_pre_redownload_check(bus, hmb_data)) { + /* Will toggle redownload_fw, after F2 enabled and + * register IRQ. + */ + sdiod->redownload_fw = true; + inff_sdio_ulp_reinit_fw(bus); + } + } + + sdio_release_host(bus->sdiodev->func1); + + /* Generally don't ask for these, can get CRC errors... */ + if (intstatus & I_WR_OOSYNC) { + inff_err("Dongle reports WR_OOSYNC\n"); + intstatus &= ~I_WR_OOSYNC; + } + + if (intstatus & I_RD_OOSYNC) { + inff_err("Dongle reports RD_OOSYNC\n"); + intstatus &= ~I_RD_OOSYNC; + } + + if (intstatus & I_SBINT) { + inff_err("Dongle reports SBINT\n"); + intstatus &= ~I_SBINT; + } + + /* Would be active due to wake-wlan in gSPI */ + if (intstatus & I_CHIPACTIVE) { + inff_dbg(SDIO, "Dongle reports CHIPACTIVE\n"); + intstatus &= ~I_CHIPACTIVE; + } + + if (intstatus & I_HMB_FC_STATE) { + inff_dbg(INFO, "Dongle reports HMB_FC_STATE\n"); + intstatus &= ~I_HMB_FC_STATE; + } + + /* Ignore frame indications if rxskip is set */ + if (bus->rxskip) + intstatus &= ~I_HMB_FRAME_IND; + + /* On frame indication, read available frames */ + if ((intstatus & I_HMB_FRAME_IND) && bus->clkstate == CLK_AVAIL && + !sdiod->redownload_fw) { + inff_sdio_readframes(bus, bus->rxbound); + if (!bus->rxpending) + intstatus &= ~I_HMB_FRAME_IND; + } + + /* Keep still-pending events for next scheduling */ + if (intstatus) + atomic_or(intstatus, &bus->intstatus); + + inff_sdio_clrintr(bus); + + if (bus->clkstate == CLK_AVAIL && inff_sdio_f2_ready(bus)) { + if (bus->ctrl_frame_stat && txctl_ok(bus)) { + sdio_claim_host(bus->sdiodev->func1); + if (bus->ctrl_frame_stat) { + err = inff_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, + bus->ctrl_frame_len); + bus->ctrl_frame_err = err; + wmb(); /*Ensure tx ctrlframe cache line entry is flushed*/ + bus->ctrl_frame_stat = false; + if (err) + inff_err("sdio ctrlframe tx failed err=%d\n", err); + } + sdio_release_host(bus->sdiodev->func1); + inff_sdio_wait_event_wakeup(bus); + } + /* Send queued frames (limit 1 if rx may still be pending) */ + if (!atomic_read(&bus->fcstate) && data_ok(bus) && + inff_pktq_mlen(&bus->txq, ~bus->flowcontrol) && txlimit) { + framecnt = bus->rxpending ? min(txlimit, bus->txminmax) : + txlimit; + inff_sdio_sendfromq(bus, framecnt); + } + } + + if (bus->sdiodev->state != INFF_SDIOD_DATA || err != 0) { + inff_err("failed backplane access over SDIO, halting operation\n"); + atomic_set(&bus->intstatus, 0); + if (bus->ctrl_frame_stat) { + sdio_claim_host(bus->sdiodev->func1); + if (bus->ctrl_frame_stat) { + bus->ctrl_frame_err = -ENODEV; + /* Ensure err is written to variable before wakeup */ + wmb(); + bus->ctrl_frame_stat = false; + inff_sdio_wait_event_wakeup(bus); + } + sdio_release_host(bus->sdiodev->func1); + } + } else if (atomic_read(&bus->intstatus) || + atomic_read(&bus->ipend) > 0 || + (!atomic_read(&bus->fcstate) && + inff_pktq_mlen(&bus->txq, ~bus->flowcontrol) && + data_ok(bus))) { + bus->dpc_triggered = true; + } +} + +static struct pktq *inff_sdio_bus_gettxq(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + + return &bus->txq; +} + +static bool inff_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec) +{ + struct sk_buff *p; + int eprec = -1; /* precedence to evict from */ + + /* Fast case, precedence queue is not full and we are also not + * exceeding total queue length + */ + if (!pktq_pfull(q, prec) && !pktq_full(q)) { + inff_pktq_penq(q, prec, pkt); + return true; + } + + /* Determine precedence from which to evict packet, if any */ + if (pktq_pfull(q, prec)) { + eprec = prec; + } else if (pktq_full(q)) { + p = inff_pktq_peek_tail(q, &eprec); + if (eprec > prec) + return false; + } + + /* Evict if needed */ + if (eprec >= 0) { + /* Detect queueing to unconfigured precedence */ + if (eprec == prec) + return false; /* refuse newer (incoming) packet */ + /* Evict packet according to discard policy */ + p = inff_pktq_pdeq_tail(q, eprec); + if (!p) + inff_err("inff_pktq_pdeq_tail() failed\n"); + inff_pkt_buf_free_skb(p); + } + + /* Enqueue */ + p = inff_pktq_penq(q, prec, pkt); + if (!p) + inff_err("inff_pktq_penq() failed\n"); + + return p ? true : false; +} + +static int inff_sdio_bus_txdata(struct device *dev, struct sk_buff *pkt) +{ + int ret = -EBADE; + uint prec; + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + + inff_dbg(TRACE, "Enter: pkt: data %p len %d\n", pkt->data, pkt->len); + if (sdiodev->state != INFF_SDIOD_DATA) + return -EIO; + + /* Add space for the header */ + skb_push(pkt, bus->tx_hdrlen); + /* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */ + + /* In WLAN, priority is always set by the AP using WMM parameters + * and this need not always follow the standard 802.1d priority. + * Based on AP WMM config, map from 802.1d priority to corresponding + * precedence level. + */ + prec = inff_map_prio_to_prec(bus_if->drvr->config, + (pkt->priority & PRIOMASK)); + + /* Check for existing queue, current flow-control, + * pending event, or pending clock + */ + inff_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); + bus->sdcnt.fcqueued++; + + skb_tx_timestamp(pkt); + + /* Priority based enq */ + spin_lock_bh(&bus->txq_lock); + /* reset bus_flags in packet cb */ + *(u16 *)(pkt->cb) = 0; + if (!inff_sdio_prec_enq(&bus->txq, pkt, prec)) { + skb_pull(pkt, bus->tx_hdrlen); + inff_err("out of bus->txq !!!\n"); + ret = -ENOSR; + } else { + ret = 0; + } + + if (pktq_len(&bus->txq) >= TXHI) { + bus->txoff = true; + inff_proto_bcdc_txflowblock(dev, true); + } + spin_unlock_bh(&bus->txq_lock); + +#ifdef DEBUG + if (pktq_plen(&bus->txq, prec) > qcount[prec]) + qcount[prec] = pktq_plen(&bus->txq, prec); +#endif + + inff_sdio_trigger_dpc(bus); + return ret; +} + +#ifdef DEBUG +#define CONSOLE_LINE_MAX 192 + +static int inff_sdio_readconsole(struct inff_sdio *bus) +{ + struct inff_console *c = &bus->console; + u8 line[CONSOLE_LINE_MAX], ch; + u32 n, idx, addr; + int rv; + + /* Don't do anything until FWREADY updates console address */ + if (bus->console_addr == 0) + return 0; + + /* Read console log struct */ + addr = bus->console_addr + offsetof(struct rte_console, log_le); + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&c->log_le, + sizeof(c->log_le)); + if (rv < 0) + return rv; + + /* Allocate console buffer (one time only) */ + if (!c->buf) { + c->bufsize = le32_to_cpu(c->log_le.buf_size); + c->buf = kmalloc(c->bufsize, GFP_ATOMIC); + if (!c->buf) + return -ENOMEM; + } + + idx = le32_to_cpu(c->log_le.idx); + + /* Protect against corrupt value */ + if (idx > c->bufsize) + return -EBADE; + + /* Skip reading the console buffer if the index pointer + * has not moved + */ + if (idx == c->last) + return 0; + + /* Read the console buffer */ + addr = le32_to_cpu(c->log_le.buf); + + /* During FW Control Switch from Bootloader to Ram + * Console address read will return all 0's which is not a valid. + * when we try to access 0 ram address we are getting SDIO error. + */ + if (addr == 0) + return 0; + + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, c->buf, c->bufsize); + if (rv < 0) + return rv; + + while (c->last != idx) { + for (n = 0; n < CONSOLE_LINE_MAX - 2; n++) { + if (c->last == idx) { + /* This would output a partial line. + * Instead, back up + * the buffer pointer and output this + * line next time around. + */ + if (c->last >= n) + c->last -= n; + else + c->last = c->bufsize - n; + goto break2; + } + ch = c->buf[c->last]; + c->last = (c->last + 1) % c->bufsize; + if (ch == '\n') + break; + line[n] = ch; + } + + if (n > 0) { + if (line[n - 1] == '\r') + n--; + line[n] = 0; + pr_debug("CONSOLE: %s\n", line); + } + } +break2: + + return 0; +} +#endif /* DEBUG */ + +static int +inff_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + int ret; + + inff_dbg(TRACE, "Enter\n"); + if (sdiodev->state != INFF_SDIOD_DATA) + return -EIO; + + /* Send from dpc */ + bus->ctrl_frame_buf = msg; + bus->ctrl_frame_len = msglen; + /* Ensure msg is written to variable before trigger dpc */ + wmb(); + bus->ctrl_frame_stat = true; + + inff_sdio_trigger_dpc(bus); + wait_event_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, + CTL_DONE_TIMEOUT); + + ret = 0; + if (bus->ctrl_frame_stat) { + sdio_claim_host(bus->sdiodev->func1); + if (bus->ctrl_frame_stat) { + inff_dbg(SDIO, "ctrl_frame timeout\n"); + bus->ctrl_frame_stat = false; + ret = -ETIMEDOUT; + } + sdio_release_host(bus->sdiodev->func1); + } + if (!ret) { + inff_dbg(SDIO, "ctrl_frame complete, err=%d\n", + bus->ctrl_frame_err); + /* Ensure read completed */ + rmb(); + ret = bus->ctrl_frame_err; + } + + if (ret) + bus->sdcnt.tx_ctlerrs++; + else + bus->sdcnt.tx_ctlpkts++; + + return ret; +} + +#ifdef DEBUG +static int inff_sdio_dump_console(struct seq_file *seq, struct inff_sdio *bus, + struct sdpcm_shared *sh) +{ + u32 addr, console_ptr, console_size, console_index; + char *conbuf = NULL; + __le32 sh_val; + int rv; + + /* obtain console information from device memory */ + addr = sh->console_addr + offsetof(struct rte_console, log_le); + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, + (u8 *)&sh_val, sizeof(u32)); + if (rv < 0) + return rv; + console_ptr = le32_to_cpu(sh_val); + + addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size); + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, + (u8 *)&sh_val, sizeof(u32)); + if (rv < 0) + return rv; + console_size = le32_to_cpu(sh_val); + + addr = sh->console_addr + offsetof(struct rte_console, log_le.idx); + rv = inff_sdiod_ramrw(bus->sdiodev, false, addr, + (u8 *)&sh_val, sizeof(u32)); + if (rv < 0) + return rv; + console_index = le32_to_cpu(sh_val); + + /* allocate buffer for console data */ + if (console_size <= CONSOLE_BUFFER_MAX) + conbuf = vzalloc(console_size + 1); + + if (!conbuf) + return -ENOMEM; + + /* obtain the console data from device */ + conbuf[console_size] = '\0'; + rv = inff_sdiod_ramrw(bus->sdiodev, false, console_ptr, (u8 *)conbuf, + console_size); + if (rv < 0) + goto done; + + rv = seq_write(seq, conbuf + console_index, + console_size - console_index); + if (rv < 0) + goto done; + + if (console_index > 0) + rv = seq_write(seq, conbuf, console_index - 1); + +done: + vfree(conbuf); + return rv; +} + +static int inff_sdio_trap_info(struct seq_file *seq, struct inff_sdio *bus, + struct sdpcm_shared *sh) +{ + int error; + struct inff_trap_info tr; + + if ((sh->flags & SDPCM_SHARED_TRAP) == 0) { + inff_dbg(INFO, "no trap in firmware\n"); + return 0; + } + + error = inff_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr, + sizeof(struct inff_trap_info)); + if (error < 0) + return error; + + if (seq) + seq_printf(seq, + "dongle trap info: type 0x%x @ epc 0x%08x\n" + " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" + " lr 0x%08x pc 0x%08x offset 0x%x\n" + " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" + " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", + le32_to_cpu(tr.type), le32_to_cpu(tr.epc), + le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), + le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), + le32_to_cpu(tr.pc), sh->trap_addr, + le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), + le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), + le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), + le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); + else + pr_debug("dongle trap info: type 0x%x @ epc 0x%08x\n" + " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n" + " lr 0x%08x pc 0x%08x offset 0x%x\n" + " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n" + " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n", + le32_to_cpu(tr.type), le32_to_cpu(tr.epc), + le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr), + le32_to_cpu(tr.r13), le32_to_cpu(tr.r14), + le32_to_cpu(tr.pc), sh->trap_addr, + le32_to_cpu(tr.r0), le32_to_cpu(tr.r1), + le32_to_cpu(tr.r2), le32_to_cpu(tr.r3), + le32_to_cpu(tr.r4), le32_to_cpu(tr.r5), + le32_to_cpu(tr.r6), le32_to_cpu(tr.r7)); + return 0; +} + +static int inff_sdio_assert_info(struct seq_file *seq, struct inff_sdio *bus, + struct sdpcm_shared *sh) +{ + int error = 0; + char file[80] = "?"; + char expr[80] = ""; + + if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { + inff_dbg(INFO, "firmware not built with -assert\n"); + return 0; + } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) { + inff_dbg(INFO, "no assert in dongle\n"); + return 0; + } + + sdio_claim_host(bus->sdiodev->func1); + if (sh->assert_file_addr != 0) { + error = inff_sdiod_ramrw(bus->sdiodev, false, + sh->assert_file_addr, (u8 *)file, 80); + if (error < 0) + return error; + } + if (sh->assert_exp_addr != 0) { + error = inff_sdiod_ramrw(bus->sdiodev, false, + sh->assert_exp_addr, (u8 *)expr, 80); + if (error < 0) + return error; + } + sdio_release_host(bus->sdiodev->func1); + + seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n", + file, sh->assert_line, expr); + return 0; +} + +static int inff_sdio_checkdied(struct inff_sdio *bus) +{ + int error; + struct sdpcm_shared sh; + + error = inff_sdio_readshared(bus, &sh); + + if (error < 0) + return error; + + if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) + inff_dbg(INFO, "firmware not built with -assert\n"); + else if (sh.flags & SDPCM_SHARED_ASSERT) + inff_err("assertion in dongle\n"); + + if (sh.flags & SDPCM_SHARED_TRAP) { + inff_err("firmware trap in dongle\n"); + inff_sdio_trap_info(NULL, bus, &sh); + } + + return 0; +} + +static int inff_sdio_died_dump(struct seq_file *seq, struct inff_sdio *bus) +{ + int error = 0; + struct sdpcm_shared sh; + + error = inff_sdio_readshared(bus, &sh); + if (error < 0) + goto done; + + error = inff_sdio_assert_info(seq, bus, &sh); + if (error < 0) + goto done; + + error = inff_sdio_trap_info(seq, bus, &sh); + if (error < 0) + goto done; + + error = inff_sdio_dump_console(seq, bus, &sh); + +done: + return error; +} + +static int inff_sdio_forensic_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_sdio *bus = bus_if->bus_priv.sdio->bus; + + return inff_sdio_died_dump(seq, bus); +} + +static int inff_debugfs_sdio_count_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio_count *sdcnt = &sdiodev->bus->sdcnt; + + seq_printf(seq, + "intrcount: %u\nlastintrs: %u\n" + "pollcnt: %u\nregfails: %u\n" + "tx_sderrs: %u\nfcqueued: %u\n" + "rxrtx: %u\nrx_toolong: %u\n" + "rxc_errors: %u\nrx_hdrfail: %u\n" + "rx_badhdr: %u\nrx_badseq: %u\n" + "fc_rcvd: %u\nfc_xoff: %u\n" + "fc_xon: %u\nrxglomfail: %u\n" + "rxglomframes: %u\nrxglompkts: %u\n" + "f2rxhdrs: %u\nf2rxdata: %u\n" + "f2txdata: %u\nf1regdata: %u\n" + "tickcnt: %u\ntx_ctlerrs: %lu\n" + "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n" + "rx_ctlpkts: %lu\nrx_readahead: %lu\n", + sdcnt->intrcount, sdcnt->lastintrs, + sdcnt->pollcnt, sdcnt->regfails, + sdcnt->tx_sderrs, sdcnt->fcqueued, + sdcnt->rxrtx, sdcnt->rx_toolong, + sdcnt->rxc_errors, sdcnt->rx_hdrfail, + sdcnt->rx_badhdr, sdcnt->rx_badseq, + sdcnt->fc_rcvd, sdcnt->fc_xoff, + sdcnt->fc_xon, sdcnt->rxglomfail, + sdcnt->rxglomframes, sdcnt->rxglompkts, + sdcnt->f2rxhdrs, sdcnt->f2rxdata, + sdcnt->f2txdata, sdcnt->f1regdata, + sdcnt->tickcnt, sdcnt->tx_ctlerrs, + sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs, + sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt); + + return 0; +} + +static void inff_sdio_debugfs_create(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + struct dentry *dentry = inff_debugfs_get_devdir(drvr); + + if (IS_ERR_OR_NULL(dentry)) + return; + + bus->console_interval = INFF_CONSOLE; + + inff_debugfs_add_entry(drvr, "forensics", inff_sdio_forensic_read); + inff_debugfs_add_entry(drvr, "counters", + inff_debugfs_sdio_count_read); + debugfs_create_u32("console_interval", 0644, dentry, + &bus->console_interval); +} +#else +static int inff_sdio_checkdied(struct inff_sdio *bus) +{ + return 0; +} + +static void inff_sdio_debugfs_create(struct device *dev) +{ +} +#endif /* DEBUG */ + +static int +inff_sdio_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) +{ + int timeleft; + uint rxlen = 0; + bool pending; + u8 *buf; + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + + inff_dbg(TRACE, "Enter\n"); + if (sdiodev->state != INFF_SDIOD_DATA) + return -EIO; + + /* Wait until control frame is available */ + timeleft = inff_sdio_dcmd_resp_wait(bus, &bus->rxlen, &pending); + + spin_lock_bh(&bus->rxctl_lock); + rxlen = bus->rxlen; + memcpy(msg, bus->rxctl, min(msglen, rxlen)); + bus->rxctl = NULL; + buf = bus->rxctl_orig; + bus->rxctl_orig = NULL; + bus->rxlen = 0; + spin_unlock_bh(&bus->rxctl_lock); + vfree(buf); + + if (rxlen) { + inff_dbg(CTL, "resumed on rxctl frame, got %d expected %d\n", + rxlen, msglen); + } else if (timeleft == 0) { + inff_err("resumed on timeout\n"); + inff_sdio_checkdied(bus); + } else if (!pending) { + inff_dbg(CTL, "resumed for unknown reason?\n"); + inff_sdio_checkdied(bus); + } else { + inff_dbg(CTL, "cancelled\n"); + return -ERESTARTSYS; + } + + if (rxlen) + bus->sdcnt.rx_ctlpkts++; + else + bus->sdcnt.rx_ctlerrs++; + + return rxlen ? (int)rxlen : -ETIMEDOUT; +} + +#ifdef DEBUG +static bool +inff_sdio_verifymemory(struct inff_sdio_dev *sdiodev, u32 ram_addr, + u8 *ram_data, uint ram_sz) +{ + char *ram_cmp; + int err; + bool ret = true; + int address; + int offset; + int len; + + /* read back and verify */ + inff_dbg(INFO, "Compare RAM dl & ul at 0x%08x; size=%d\n", ram_addr, + ram_sz); + ram_cmp = kmalloc(MEMBLOCK, GFP_KERNEL); + /* do not proceed while no memory but */ + if (!ram_cmp) + return true; + + address = ram_addr; + offset = 0; + while (offset < ram_sz) { + len = ((offset + MEMBLOCK) < ram_sz) ? MEMBLOCK : + ram_sz - offset; + err = inff_sdiod_ramrw(sdiodev, false, address, ram_cmp, len); + if (err) { + inff_err("error %d on reading %d membytes at 0x%08x\n", + err, len, address); + ret = false; + break; + } else if (memcmp(ram_cmp, &ram_data[offset], len)) { + inff_err("Downloaded RAM image is corrupted, block offset is %d, len is %d\n", + offset, len); + ret = false; + break; + } + offset += len; + address += len; + } + + kfree(ram_cmp); + + return ret; +} +#else /* DEBUG */ +static bool +inff_sdio_verifymemory(struct inff_sdio_dev *sdiodev, u32 ram_addr, + u8 *ram_data, uint ram_sz) +{ + return true; +} +#endif /* DEBUG */ + +/* In 43022:secure-mode TRX header should be copied at 512Kb RAM location + * Because for CM3 based chip rtecdc.bin(fw) should be at starting of RAM + */ +#define TRX_HDR_START_ADDR 0x7fd4c /* TRX header start address */ +#define TRX_HDR_SZ 0x2b4 /* TRX header size */ + +static int inff_sdio_download_code_file(struct inff_sdio *bus, + const struct firmware *fw) +{ + struct trx_header_le *trx = (struct trx_header_le *)fw->data; + u32 fw_size; + u32 address; + u8 *image = NULL; + int err; + + inff_dbg(TRACE, "Enter\n"); + + address = bus->ci->rambase; + fw_size = fw->size; + + if (bus->ci->chip == INF_CC_43022_CHIP_ID) { + if (trx->magic == cpu_to_le32(TRX_MAGIC)) { + err = inff_sdiod_ramrw(bus->sdiodev, true, TRX_HDR_START_ADDR, + (u8 *)fw->data, sizeof(struct trx_header_le)); + fw_size -= sizeof(struct trx_header_le); + image = (u8 *)fw->data; + image = image + TRX_HDR_SZ; + } + err = inff_sdiod_ramrw(bus->sdiodev, true, address, + image, fw_size); + if (err) + inff_err("error %d on writing %d membytes at 0x%08x\n", + err, (int)fw_size, address); + else if (!inff_sdio_verifymemory(bus->sdiodev, address, + image, fw_size)) + err = -EIO; + } else { + if (trx->magic == cpu_to_le32(TRX_MAGIC)) { + if ((trx->flag_version >> 16) == TRX_VERSION5) + address -= sizeof(struct trxv5_header_le); + else + address -= sizeof(struct trx_header_le); + fw_size = le32_to_cpu(trx->len); + } + + err = inff_sdiod_ramrw(bus->sdiodev, true, address, + (u8 *)fw->data, fw_size); + + if (err) + inff_err("error %d on writing %d membytes at 0x%08x\n", + err, (int)fw_size, address); + else if (!inff_sdio_verifymemory(bus->sdiodev, address, + (u8 *)fw->data, fw_size)) + err = -EIO; + } + + return err; +} + +static int inff_sdio_download_nvram(struct inff_sdio *bus, + void *vars, u32 varsz) +{ + int address; + int err; + + inff_dbg(TRACE, "Enter\n"); + + /* In 43022:secure-mode NVRAM should be copied to 512KB RAM area */ + if (bus->ci->chip == INF_CC_43022_CHIP_ID) + address = CM3_SOCRAM_WRITE_END_LOCATION - varsz + bus->ci->rambase; + else + address = bus->ci->ramsize - varsz + bus->ci->rambase; + + err = inff_sdiod_ramrw(bus->sdiodev, true, address, vars, varsz); + if (err) + inff_err("error %d on writing %d nvram bytes at 0x%08x\n", + err, varsz, address); + else if (!inff_sdio_verifymemory(bus->sdiodev, address, vars, varsz)) + err = -EIO; + + return err; +} + +static int inff_sdio_download_firmware_43022(struct inff_sdio *bus, + const struct firmware *fw, + void *nvram, u32 nvlen) +{ + int inferror; + u32 rstvec; + + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_clkctl(bus, CLK_AVAIL, false); + + rstvec = get_unaligned_le32(fw->data); + inff_dbg(SDIO, "firmware rstvec: %x\n", rstvec); + + inferror = bus->ci->blhs->pre_nvramdl(bus->ci); + if (inferror) { + inff_err("NVRAM download preparation failed\n"); + goto err; + } + + inferror = inff_sdio_download_nvram(bus, nvram, nvlen); + if (inferror) { + inff_err("dongle nvram file download failed\n"); + goto err; + } + + inferror = bus->ci->blhs->post_nvramdl(bus->ci); + if (inferror) { + inff_err("error in post NVRAM download hs err=%d\n", + inferror); + goto err; + } + inferror = bus->ci->blhs->prep_fwdl(bus->ci); + + inferror = inff_sdio_download_code_file(bus, fw); + release_firmware(fw); + if (inferror) { + inff_err("dongle image file download failed\n"); + inff_fw_nvram_free(nvram); + goto err; + } + + inferror = bus->ci->blhs->post_fwdl(bus->ci); + if (inferror) { + inff_err("FW download failed, err=%d\n", inferror); + inff_fw_nvram_free(nvram); + goto err; + } + inferror = bus->ci->blhs->chk_validation(bus->ci); + if (inferror) { + inff_err("FW validation failed, err=%d\n", inferror); + inff_fw_nvram_free(nvram); + goto err; + } + +err: + inff_sdio_clkctl(bus, CLK_SDONLY, false); + sdio_release_host(bus->sdiodev->func1); + return inferror; +} + +static int inff_sdio_download_firmware(struct inff_sdio *bus, + const struct firmware *fw, + void *nvram, u32 nvlen) +{ + int inferror; + u32 rstvec; + + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_clkctl(bus, CLK_AVAIL, false); + + rstvec = get_unaligned_le32(fw->data); + inff_dbg(SDIO, "firmware rstvec: %x\n", rstvec); + + inferror = bus->ci->blhs->prep_fwdl(bus->ci); + if (inferror) { + inff_err("FW download preparation failed\n"); + release_firmware(fw); + inff_fw_nvram_free(nvram); + goto err; + } + + inferror = inff_sdio_download_code_file(bus, fw); + release_firmware(fw); + if (inferror) { + inff_err("dongle image file download failed\n"); + inff_fw_nvram_free(nvram); + goto err; + } + + inferror = bus->ci->blhs->post_fwdl(bus->ci); + if (inferror) { + inff_err("FW download failed, err=%d\n", inferror); + inff_fw_nvram_free(nvram); + goto err; + } + + inferror = bus->ci->blhs->chk_validation(bus->ci); + if (inferror) { + inff_err("FW valication failed, err=%d\n", inferror); + inff_fw_nvram_free(nvram); + goto err; + } + + inferror = inff_sdio_download_nvram(bus, nvram, nvlen); + inff_fw_nvram_free(nvram); + if (inferror) { + inff_err("dongle nvram file download failed\n"); + goto err; + } + + bus->ci->blhs->post_nvramdl(bus->ci); + +err: + inff_sdio_clkctl(bus, CLK_SDONLY, false); + sdio_release_host(bus->sdiodev->func1); + return inferror; +} + +static bool inff_sdio_aos_no_decode(struct inff_sdio *bus) +{ + if (bus->ci->chip == INF_CC_5557X_CHIP_ID || + bus->ci->chip == INF_CC_5551X_CHIP_ID || + bus->ci->chip == INF_CC_43022_CHIP_ID) + return true; + else + return false; +} + +static void inff_sdio_sr_init(struct inff_sdio *bus) +{ + int err = 0; + u8 val; + u8 wakeupctrl; + u8 cardcap; + u8 chipclkcsr; + + inff_dbg(TRACE, "Enter\n"); + + if (inff_sdio_use_ht_avail(bus->ci) || + inff_chip_is_ulp(bus->ci)) { + wakeupctrl = SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; + chipclkcsr = SBSDIO_HT_AVAIL_REQ; + } else { + wakeupctrl = SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + chipclkcsr = SBSDIO_FORCE_HT; + } + + if (inff_sdio_aos_no_decode(bus)) { + cardcap = SDIO_CCCR_INFF_CARDCAP_CMD_NODEC; + } else { + cardcap = (SDIO_CCCR_INFF_CARDCAP_CMD14_SUPPORT | + SDIO_CCCR_INFF_CARDCAP_CMD14_EXT); + } + + val = inff_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err); + if (err) { + inff_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n"); + return; + } + val |= 1 << wakeupctrl; + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err); + if (err) { + inff_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n"); + return; + } + + /* Add CMD14 Support */ + inff_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_INFF_CARDCAP, + cardcap, + &err); + if (err) { + inff_err("error writing SDIO_CCCR_INFF_CARDCAP\n"); + return; + } + + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + chipclkcsr, &err); + if (err) { + inff_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n"); + return; + } + + /* set flag */ + bus->sr_enabled = true; + inff_dbg(INFO, "SR enabled\n"); +} + +/* enable KSO bit */ +static int inff_sdio_kso_init(struct inff_sdio *bus) +{ + struct inff_core *core = bus->sdio_core; + u8 kso_mask, kso_shift; + u8 val; + int err = 0; + + inff_dbg(TRACE, "Enter\n"); + + /* KSO bit added in SDIO core rev 12 */ + if (core->rev < 12) + return 0; + + val = inff_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err); + if (err) { + inff_err("error reading SBSDIO_FUNC1_SLEEPCSR\n"); + return err; + } + + switch (bus->ci->socitype) { + case SOCI_AI: + kso_mask = SBSDIO_FUNC1_SLEEPCSR_KSO_MASK; + kso_shift = SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT; + break; + case SOCI_CP: + kso_mask = SBSDIO_FUNC1_SLEEPCSR_BT_KSO_MASK; + kso_shift = SBSDIO_FUNC1_SLEEPCSR_BT_KSO_SHIFT; + break; + default: + inff_err("chip type %u is not supported\n", + bus->ci->socitype); + return -ENODEV; + } + + if (!(val & SBSDIO_FUNC1_SLEEPCSR_KSO_MASK)) { + val |= (SBSDIO_FUNC1_SLEEPCSR_KSO_EN << + SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, + val, &err); + if (err) { + inff_err("error writing SBSDIO_FUNC1_SLEEPCSR\n"); + return err; + } + } + + return 0; +} + +static int inff_sdio_bus_preinit(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + struct inff_core *core = bus->sdio_core; + u32 value; + __le32 iovar; + int err; + + /* maxctl provided by common layer */ + if (WARN_ON(!bus_if->maxctl)) + return -EINVAL; + + /* Allocate control receive buffer */ + bus_if->maxctl += bus->roundup; + value = roundup((bus_if->maxctl + SDPCM_HDRLEN), ALIGNMENT); + value += bus->head_align; + bus->rxbuf = kmalloc(value, GFP_ATOMIC); + if (bus->rxbuf) + bus->rxblen = value; + + /* the commands below use the terms tx and rx from + * a device perspective, ie. bus:txglom affects the + * bus transfers from device to host. + */ + if (core->rev < 12) { + /* for sdio core rev < 12, disable txgloming */ + iovar = 0; + err = inff_iovar_data_set(dev, "bus:txglom", &iovar, + sizeof(iovar)); + } else { + /* otherwise, set txglomalign */ + value = sdiodev->settings->bus.sdio.sd_sgentry_align; + /* SDIO ADMA requires at least 32 bit alignment */ + iovar = cpu_to_le32(max_t(u32, value, ALIGNMENT)); + err = inff_iovar_data_set(dev, "bus:txglomalign", &iovar, + sizeof(iovar)); + } + + if (err < 0) + goto done; + + /* initialize SHM address from firmware for DS1 */ + if (!bus->sdiodev->ulp) + inff_sdio_ulp_preinit(dev); + + bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; + if (sdiodev->sg_support) { + bus->txglom = false; + iovar = cpu_to_le32(1); + err = inff_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", + &iovar, sizeof(iovar)); + if (err < 0) { + /* bus:rxglom is allowed to fail */ + err = 0; + } else { + bus->txglom = true; + bus->tx_hdrlen += SDPCM_HWEXT_LEN; + } + } + inff_bus_add_txhdrlen(bus->sdiodev->dev, bus->tx_hdrlen); + +done: + return err; +} + +static size_t inff_sdio_bus_get_ramsize(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + + return bus->ci->ramsize - bus->ci->srsize; +} + +static int inff_sdio_bus_get_memdump(struct device *dev, void *data, + size_t mem_size) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + int err; + int address; + int offset; + int len; + int shift; + + inff_dbg(INFO, "dump at 0x%08x: size=%zu\n", bus->ci->rambase, + mem_size); + + address = bus->ci->rambase; + shift = address % MEMBLOCK; + offset = 0; + err = 0; + sdio_claim_host(sdiodev->func1); + while (offset < mem_size) { + if (unlikely(shift)) { + /* if starting address is not 2K alignment, read to 2K boundary */ + len = ((offset + MEMBLOCK - shift) < mem_size) ? MEMBLOCK - shift : + mem_size - offset; + shift = 0; + } else { + len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK : + mem_size - offset; + } + + err = inff_sdiod_ramrw(sdiodev, false, address, data, len); + if (err) { + inff_err("error %d on reading %d membytes at 0x%08x\n", + err, len, address); + /* Probably in non-readable region, ignore error */ + if (err == -EILSEQ) + err = 0; + else + goto done; + } + data += len; + offset += len; + address += len; + } + +done: + sdio_release_host(sdiodev->func1); + return err; +} + +void inff_sdio_trigger_dpc(struct inff_sdio *bus) +{ + bus->dpc_triggered = true; + queue_work(bus->inff_wq, &bus->datawork); +} + +void inff_sdio_isr(struct inff_sdio *bus, bool in_isr) +{ + struct inff_sdio_dev *sdiod = bus->sdiodev; + struct inff_pub *drvr = sdiod->bus_if->drvr; + struct inff_if *ifp = inff_get_ifp(drvr, 0); // wlan0 + + inff_dbg(SDIO, "Enter\n"); + + if (!bus) { + inff_err("bus is null pointer, exiting\n"); + return; + } + + /* Count the interrupt call */ + bus->sdcnt.intrcount++; + if (in_isr) { + atomic_set(&bus->ipend, 1); + } else { + /* Wake up the bus if in sleep */ + if (inff_sdio_bus_sleep_state(bus)) + inff_sdio_bus_sleep(bus, false, false); + + if (inff_sdio_intr_rstatus(bus)) + inff_err("failed backplane access\n"); + } + + if (bus->sdiodev->settings->sdio_in_isr) { + if (!mutex_trylock(&bus->sdsem)) { + bus->dpc_triggered = true; + queue_work(bus->inff_wq, &bus->datawork); + } else { + bus->dpc_triggered = true; + + /* make sure dpc_triggered is true */ + wmb(); + while (READ_ONCE(bus->dpc_triggered)) { + bus->dpc_triggered = false; + if (ifp && ifp->drvr->settings->napi_enable && ifp->napi.poll) { + if (napi_schedule_prep(&ifp->napi)) { + __napi_schedule(&ifp->napi); + inff_dbg(SDIO, "NAPI scheduled on active if (%s)\n", + ifp->ndev->name); + } + } + inff_sdio_dpc(bus); + bus->idlecount = 0; + } + mutex_unlock(&bus->sdsem); + } + } else { + bus->dpc_triggered = true; + queue_work(bus->inff_wq, &bus->datawork); + } +} + +static void inff_sdio_bus_watchdog(struct inff_sdio *bus) +{ + inff_dbg(TIMER, "Enter\n"); + +#ifdef DEBUG + /* Poll for console output periodically */ + if (bus->sdiodev->state == INFF_SDIOD_DATA && INFF_FWCON_ON() && + bus->console_interval != 0) { + bus->console.count += jiffies_to_msecs(INFF_WD_POLL); + if (bus->console.count >= bus->console_interval) { + bus->console.count -= bus->console_interval; + sdio_claim_host(bus->sdiodev->func1); + /* Make sure backplane clock is on */ + inff_sdio_bus_sleep(bus, false, false); + if (inff_sdio_readconsole(bus) < 0) + /* stop on error */ + bus->console_interval = 0; + sdio_release_host(bus->sdiodev->func1); + } + } +#endif /* DEBUG */ + + /* On idle timeout clear activity flag and/or turn off clock */ + if (!bus->dpc_triggered && + #ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + !inff_btsdio_is_active(bus->sdiodev->bus_if)) { + #else + true) { + #endif + /* Ensure read completed */ + rmb(); + if (!bus->dpc_running && bus->idletime > 0 && + bus->clkstate == CLK_AVAIL) { + bus->idlecount++; + if (bus->idlecount >= bus->idletime) { + inff_dbg(SDIO, "idle\n"); + sdio_claim_host(bus->sdiodev->func1); +#ifdef DEBUG + if (!INFF_FWCON_ON() || + bus->console_interval == 0) +#endif + inff_sdio_wd_timer(bus, false); + bus->idlecount = 0; + + if (!bus->dpc_triggered && !bus->dpc_running) + inff_sdio_bus_sleep(bus, true, false); + else + inff_err("DPC active Skip sleep"); + + sdio_release_host(bus->sdiodev->func1); + } + } else { + bus->idlecount = 0; + } + } else { + bus->idlecount = 0; + } +} + +static void inff_sdio_dataworker(struct work_struct *work) +{ + struct inff_sdio *bus = container_of(work, struct inff_sdio, + datawork); + + if (bus->sdiodev->settings->sdio_in_isr) { + if (mutex_trylock(&bus->sdsem)) { + bus->dpc_running = true; + + /* make sure dpc_running is true */ + wmb(); + while (READ_ONCE(bus->dpc_triggered)) { + bus->dpc_triggered = false; + inff_sdio_dpc(bus); + bus->idlecount = 0; + } + mutex_unlock(&bus->sdsem); + bus->dpc_running = false; + if (inff_sdiod_freezing(bus->sdiodev)) { + inff_sdiod_change_state(bus->sdiodev, INFF_SDIOD_DOWN); + inff_sdiod_try_freeze(bus->sdiodev); + inff_sdiod_change_state(bus->sdiodev, INFF_SDIOD_DATA); + } + } + } else { + bus->dpc_running = true; + + /* make sure dpc_running is true */ + wmb(); + while (READ_ONCE(bus->dpc_triggered)) { + bus->dpc_triggered = false; + inff_sdio_dpc(bus); + bus->idlecount = 0; + } + bus->dpc_running = false; + if (inff_sdiod_freezing(bus->sdiodev)) { + inff_sdiod_change_state(bus->sdiodev, INFF_SDIOD_DOWN); + inff_sdiod_try_freeze(bus->sdiodev); + inff_sdiod_change_state(bus->sdiodev, INFF_SDIOD_DATA); + } + } +} + +static u32 +inff_sdio_ccsec_get_buscorebase(struct inff_sdio_dev *sdiodev) +{ + u8 devctl = 0; + u32 addr = 0; + int err = 0; + + devctl = inff_sdiod_readb(sdiodev, SBSDIO_DEVICE_CTL, NULL); + inff_sdiod_writeb(sdiodev, SBSDIO_DEVICE_CTL, devctl | SBSDIO_DEVCTL_ADDR_RESET, &err); + if (err) + goto exit; + + addr |= (inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_SBADDRLOW, NULL) << 8) | + (inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_SBADDRMID, NULL) << 16) | + (inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_SBADDRHIGH, NULL) << 24); + + inff_dbg(INFO, "sdiod core address is 0x%x\n", addr); +exit: + if (err) { + inff_err("Get SDIO core base address failed, err=%d", err); + addr = 0; + } + inff_sdiod_writeb(sdiodev, SBSDIO_DEVICE_CTL, devctl, &err); + + return addr; +} + +static u32 inff_sdio_buscore_blhs_read(void *ctx, u32 reg_offset) +{ + struct inff_sdio_dev *sdiodev = (struct inff_sdio_dev *)ctx; + + return (u32)inff_sdiod_readb(sdiodev, reg_offset, NULL); +} + +static void inff_sdio_buscore_blhs_write(void *ctx, u32 reg_offset, u32 value) +{ + struct inff_sdio_dev *sdiodev = (struct inff_sdio_dev *)ctx; + + inff_sdiod_writeb(sdiodev, reg_offset, (u8)value, NULL); +} + +static int inff_sdio_buscoreprep(void *ctx) +{ + struct inff_sdio_dev *sdiodev = ctx; + int err = 0; + u8 clkval, clkset; + + /* Try forcing SDIO core to do ALPAvail request only */ + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ; + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + if (err) { + inff_err("error writing for HT off\n"); + return err; + } + + /* If register supported, wait for ALPAvail and then force ALP */ + /* This may take up to 15 milliseconds */ + clkval = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, NULL); + + if ((clkval & ~SBSDIO_AVBITS) != clkset) { + inff_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n", + clkset, clkval); + return -EACCES; + } + + SPINWAIT(((clkval = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + NULL)), + !SBSDIO_ALPAV(clkval)), + PMU_MAX_TRANSITION_DLY); + + if (!SBSDIO_ALPAV(clkval)) { + inff_err("timeout on ALPAV wait, clkval 0x%02x\n", + clkval); + return -EBUSY; + } + + clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP; + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err); + usleep_range(ALP_WAIT_MIN, ALP_WAIT_MAX); + + /* Also, disable the extra SDIO pull-ups */ + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL); + + return 0; +} + +static void inff_sdio_buscore_activate(void *ctx, struct inff_chip *chip, + u32 rstvec) +{ + struct inff_sdio_dev *sdiodev = ctx; + struct inff_core *core = sdiodev->bus->sdio_core; + u32 reg_addr; + + /* clear all interrupts */ + reg_addr = core->base + SD_REG(intstatus); + inff_sdiod_writel(sdiodev, reg_addr, 0xFFFFFFFF, NULL); + + if (rstvec) + /* Write reset vector to address 0 */ + inff_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec, + sizeof(rstvec)); +} + +static u32 inff_sdio_buscore_read32(void *ctx, u32 addr) +{ + struct inff_sdio_dev *sdiodev = ctx; + u32 val; + + val = inff_sdiod_readl(sdiodev, addr, NULL); + + return val; +} + +static void inff_sdio_buscore_write32(void *ctx, u32 addr, u32 val) +{ + struct inff_sdio_dev *sdiodev = ctx; + + inff_sdiod_writel(sdiodev, addr, val, NULL); +} + +static int +inff_sdio_buscore_sec_attach(void *ctx, struct inff_blhs **blhs, struct inff_ccsec **ccsec, + u32 flag, uint timeout, uint interval) +{ + struct inff_sdio_dev *sdiodev = (struct inff_sdio_dev *)ctx; + struct inff_blhs *blhsh = NULL; + struct inff_ccsec *ccsech = NULL; + u32 reg_addr; + u32 regdata; + u8 cardcap; + int err = 0, secure_mode; + + if (sdiodev->func1->vendor != SDIO_VENDOR_ID_CYPRESS) + return 0; + + /* 43022: Secure-mode OTP bit reading */ + secure_mode = !inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_SECURE_MODE, &err); + if (err) + inff_err("Failed to read SecureModeRegister = %d\n", err); + + if (secure_mode) { + blhsh = kzalloc(sizeof(*blhsh), GFP_KERNEL); + if (!blhsh) + return -ENOMEM; + /* TODO : Get the address from si_backplane */ + blhsh->d2h = INFF_SDIO_REG_D2H_MSG_0; + blhsh->h2d = INFF_SDIO_REG_H2D_MSG_0; + blhsh->read = inff_sdio_buscore_read32; + blhsh->write = inff_sdio_buscore_write32; + + *blhs = blhsh; + } + + cardcap = inff_sdiod_func0_rb(sdiodev, SDIO_CCCR_INFF_CARDCAP, NULL); + if (cardcap & SDIO_CCCR_INFF_CARDCAP_SECURE_MODE) { + blhsh = kzalloc(sizeof(*blhsh), GFP_KERNEL); + if (!blhsh) + return -ENOMEM; + + blhsh->d2h = INFF_SDIO_REG_DAR_D2H_MSG_0; + blhsh->h2d = INFF_SDIO_REG_DAR_H2D_MSG_0; + blhsh->read = inff_sdio_buscore_blhs_read; + blhsh->write = inff_sdio_buscore_blhs_write; + + blhsh->write(ctx, blhsh->h2d, 0); + + SPINWAIT_MS((blhsh->read(ctx, blhsh->d2h) & flag) == 0, + timeout, interval); + + regdata = blhsh->read(ctx, blhsh->d2h); + if (!(regdata & flag)) { + inff_err("Timeout waiting for bootloader ready\n"); + kfree(blhsh); + return -EPERM; + } + *blhs = blhsh; + } + + if (cardcap & SDIO_CCCR_INFF_CARDCAP_CHIPID_PRESENT) { + ccsech = kzalloc(sizeof(*ccsech), GFP_KERNEL); + if (!ccsech) { + kfree(blhsh); + return -ENOMEM; + } + ccsech->bus_corebase = inff_sdio_ccsec_get_buscorebase(sdiodev); + reg_addr = ccsech->bus_corebase + SD_REG(eromptr); + ccsech->erombase = inff_sdio_buscore_read32(ctx, reg_addr); + reg_addr = ccsech->bus_corebase + SD_REG(chipid); + ccsech->chipid = inff_sdio_buscore_read32(ctx, reg_addr); + *ccsec = ccsech; + } + + if (cardcap & SDIO_CCCR_INFF_CARDCAP_CHIPID_PRESENT) { + u32 reg_val; + u32 err; + + /* Get SDIO Bus Mode*/ + reg_val = inff_sdiod_func0_rb(sdiodev, SDIO_CCCR_SPEED, &err); + if (err) { + inff_err("error getting sdio bus speed\n"); + } else { + if (reg_val & SDIO_SPEED_DDR50) + sdiodev->bus->h1_ddr50_mode = true; + } + } + + return 0; +} + +#define LOOP_TO_CHECK_FOR_BP_ENABLE 50000 /* Wait for 500msec */ +#define INTR_F1_WAIT_MIN 10 +#define INTR_F1_WAIT_MAX 20 + +static int inff_get_intr_pending_data(void *ctx) +{ + struct inff_sdio_dev *sdiodev = (struct inff_sdio_dev *)ctx; + int loop = 0, status = 0, err = 0; + u32 reg_val = 0; + + for (loop = 0; loop < LOOP_TO_CHECK_FOR_BP_ENABLE; loop++) { + sdio_claim_host(sdiodev->func1); + reg_val = inff_sdiod_func0_rb(sdiodev, SDIO_CCCR_INTx, &err); + sdio_release_host(sdiodev->func1); + status = reg_val & INTR_STATUS_FUNC1; + + if (status) { + inff_err("[%d]: Backplane enabled.\n", loop); + break; + } + usleep_range(INTR_F1_WAIT_MIN, INTR_F1_WAIT_MAX); + } + + /* Bootloader hung after backplane disable */ + if (loop == LOOP_TO_CHECK_FOR_BP_ENABLE) { + err = -EBUSY; + inff_err("Device hung, return failure. time out %d ms\n", + (LOOP_TO_CHECK_FOR_BP_ENABLE * 10) / 1000); + } + + return 0; +} + +static const struct inff_buscore_ops inff_sdio_buscore_ops = { + .prepare = inff_sdio_buscoreprep, + .activate = inff_sdio_buscore_activate, + .read32 = inff_sdio_buscore_read32, + .write32 = inff_sdio_buscore_write32, + .sec_attach = inff_sdio_buscore_sec_attach, + .get_intr_pend = inff_get_intr_pending_data, +}; + +static int +inff_sdio_probe_attach(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiodev; + u8 clkctl = 0; + int err = 0; + u32 reg_val; + u32 enum_base; + int ret = -EBADE; + + sdiodev = bus->sdiodev; + sdio_claim_host(sdiodev->func1); + + enum_base = inff_chip_enum_base(sdiodev->func1->device); + + /* + * Force PLL off until inff_chip_attach() + * programs PLL control regs + */ + + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, INFF_INIT_CLKCTL1, + &err); + if (!err) + clkctl = inff_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + &err); + + if (err || ((clkctl & ~SBSDIO_AVBITS) != INFF_INIT_CLKCTL1)) { + inff_err("ChipClkCSR access: err %d wrote 0x%02x read 0x%02x\n", + err, INFF_INIT_CLKCTL1, clkctl); + goto fail; + } + + bus->ci = inff_chip_attach(sdiodev, sdiodev->func1->device, + &inff_sdio_buscore_ops); + if (IS_ERR(bus->ci)) { + inff_err("inff_chip_attach failed!\n"); + bus->ci = NULL; + goto fail; + } + + if (!bus->ci->ccsec) + pr_debug("F1 signature read @0x18000000=0x%4x\n", + inff_sdiod_readl(sdiodev, enum_base, NULL)); + + /* Pick up the SDIO core info struct from chip.c */ + bus->sdio_core = inff_chip_get_core(bus->ci, INF_CORE_SDIO_DEV); + if (!bus->sdio_core) + goto fail; + + /* Pick up the CHIPCOMMON core info struct, for bulk IO in infsdh.c */ + sdiodev->cc_core = inff_chip_get_core(bus->ci, INF_CORE_CHIPCOMMON); + if (!sdiodev->cc_core) + goto fail; + + sdiodev->settings = inff_get_module_param(sdiodev->dev, + INFF_BUSTYPE_SDIO, + bus->ci->chip, + bus->ci->chiprev); + if (IS_ERR_OR_NULL(sdiodev->settings)) { + inff_err("Failed to get device parameters\n"); + ret = PTR_ERR_OR_ZERO(sdiodev->settings); + goto fail; + } + + if (sdiodev->settings->bus.sdio.oob_irq_supported) { + /*Below Module Params are not supported in OOB mode*/ + sdiodev->settings->sdio_in_isr = 0; + sdiodev->settings->sdio_rxf_in_kthread_enabled = 0; + inff_dbg(TRACE, "OOB Enabled, Disable sdio_in_isr\n"); + } + + /* platform specific configuration: + * alignments must be at least 4 bytes for ADMA + */ + bus->head_align = ALIGNMENT; + bus->sgentry_align = ALIGNMENT; + if (sdiodev->settings->bus.sdio.sd_head_align > ALIGNMENT) + bus->head_align = sdiodev->settings->bus.sdio.sd_head_align; + if (sdiodev->settings->bus.sdio.sd_sgentry_align > ALIGNMENT) + bus->sgentry_align = + sdiodev->settings->bus.sdio.sd_sgentry_align; + + /* allocate scatter-gather table. sg support + * will be disabled upon allocation failure. + */ + inff_sdiod_sgtable_alloc(sdiodev); + + /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ + * is true or when platform data OOB irq is true). + */ + if (IS_ENABLED(CONFIG_PM_SLEEP) && + (sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) && + ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) || + sdiodev->settings->bus.sdio.oob_irq_supported)) + sdiodev->bus_if->wowl_supported = true; + + if (inff_sdio_kso_init(bus)) { + inff_err("error enabling KSO\n"); + goto fail; + } + + if (bus->ci->socitype == SOCI_AI) { + /* Set card control so an SDIO card reset does a WLAN backplane reset */ + reg_val = inff_sdiod_func0_rb(sdiodev, SDIO_CCCR_INFF_CARDCTRL, &err); + if (err) + goto fail; + + reg_val |= SDIO_CCCR_INFF_CARDCTRL_WLANRESET; + + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_INFF_CARDCTRL, reg_val, &err); + if (err) + goto fail; + } + + sdio_release_host(sdiodev->func1); + + inff_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); + + /* allocate header buffer */ + bus->hdrbuf = kzalloc(MAX_HDR_READ + bus->head_align, GFP_KERNEL); + if (!bus->hdrbuf) + return -ENOMEM; + /* Locate an appropriately-aligned portion of hdrbuf */ + bus->rxhdr = (u8 *)roundup((unsigned long)&bus->hdrbuf[0], + bus->head_align); + + sdiodev->bus_if->chip_pub = bus->ci; + + return 0; + +fail: + sdio_release_host(sdiodev->func1); + return ret; +} + +static int inff_sdio_napi_poll(struct napi_struct *napi, int budget) +{ + struct inff_if *ifp; + unsigned int work_done = 0; + + ifp = container_of(napi, struct inff_if, napi); + if (!ifp) + return budget; + + inff_dbg(SDIO, "NAPI poll called, work_done %d, budget %d\n", work_done, budget); + napi_complete_done(napi, 0); + + return work_done; +} + +static void +inff_sched_rxf(struct inff_sdio *bus, struct sk_buff *skb) +{ + inff_dbg(SDIO, "Enter\n"); + do { + if (!inff_rxf_enqueue(bus, skb)) + break; + + inff_err("inff_rxf_enqueue failed\n"); + goto done; + } while (1); + + if (bus->thr_rxf_ctl.p_task) + complete(&bus->thr_rxf_ctl.comp); + +done: + return; +} + +static int +inff_sdio_rxf_thread(void *data) +{ + struct inff_sdio *bus = (struct inff_sdio *)data; + struct sched_param param; + + allow_signal(SIGTERM); + /* This thread doesn't need any user-level access, + * so get rid of all our resources + */ + memset(¶m, 0, sizeof(struct sched_param)); + param.sched_priority = 1; + if (param.sched_priority >= MAX_RT_PRIO / 2) + /* If the priority is MAX_RT_PRIO/2 or higher, + * it is considered as high priority. + * sched_priority of FIFO task dosen't + * exceed MAX_RT_PRIO/2. + */ + sched_set_fifo(current); + else + /* For when you don't much care about FIFO, + * but want to be above SCHED_NORMAL. + */ + sched_set_fifo_low(current); + + while (1) { + if (kthread_should_stop()) + break; + + if (wait_for_completion_interruptible(&bus->thr_rxf_ctl.comp) == 0) { + struct sk_buff *skb = NULL; + + smp_mb();/* ensure skb null */ + skb = inff_rxf_dequeue(bus); + if (!skb) { + inff_err("nothing is dequeued, thread terminate\n"); + break; + } + + while (skb) { + struct sk_buff *skbnext = skb->next; + + skb->next = NULL; + netif_rx(skb); + skb = skbnext; + } + } else { + break; + } + } + return 0; +} + +static int +inff_sdio_watchdog_thread(void *data) +{ + struct inff_sdio *bus = (struct inff_sdio *)data; + int wait; + + allow_signal(SIGTERM); + /* Run until signal received */ + inff_sdiod_freezer_count(bus->sdiodev); + while (1) { + if (kthread_should_stop()) + break; + inff_sdiod_freezer_uncount(bus->sdiodev); + wait = wait_for_completion_interruptible(&bus->watchdog_wait); + inff_sdiod_freezer_count(bus->sdiodev); + inff_sdiod_try_freeze(bus->sdiodev); + if (!wait) { + inff_sdio_bus_watchdog(bus); + /* Count the tick for reference */ + bus->sdcnt.tickcnt++; + reinit_completion(&bus->watchdog_wait); + } else { + break; + } + } + return 0; +} + +static void +inff_sdio_watchdog(struct timer_list *t) +{ + struct inff_sdio *bus = timer_container_of(bus, t, timer); + + if (bus->watchdog_tsk) { + complete(&bus->watchdog_wait); + /* Reschedule the watchdog */ + if (bus->wd_active) + mod_timer(&bus->timer, + jiffies + INFF_WD_POLL); + } +} + +static int inff_sdio_get_blob(struct device *dev, const struct firmware **fw, + enum inff_blob_type type) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiodev->bus; + struct inff_chip_specific *chip_spec = &bus->ci->chip_spec; + + switch (type) { + case INFF_BLOB_CLM: + *fw = chip_spec->clm_fw; + chip_spec->clm_fw = NULL; + break; + default: + return -ENOENT; + } + + if (!*fw) + return -ENOENT; + + return 0; +} + +static int inff_sdio_bus_reset(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + inff_dbg(SDIO, "Enter\n"); + + /* start by unregistering irqs */ + inff_sdiod_intr_unregister(sdiodev); + + inff_sdiod_remove(sdiodev); + + /* reset the adapter */ + sdio_claim_host(sdiodev->func1); + mmc_hw_reset(sdiodev->func1->card); + sdio_release_host(sdiodev->func1); + + inff_bus_change_state(sdiodev->bus_if, INFF_BUS_DOWN); + return 0; +} + +static void inff_sdio_bus_remove(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiod = bus_if->bus_priv.sdio; + + device_release_driver(&sdiod->func2->dev); + device_release_driver(&sdiod->func1->dev); +} + +static int inff_sdio_bus_set_fcmode(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + if (!inff_feat_is_enabled(bus_if->drvr->iflist[0], INFF_FEAT_PROPTXSTATUS)) { + bus_if->drvr->settings->fcmode = 0; + sdiodev->settings->fcmode = bus_if->drvr->settings->fcmode; + inff_dbg(INFO, "Set fcmode = %d\n", sdiodev->settings->fcmode); + } + + return sdiodev->settings->fcmode; +} + +static const struct inff_bus_ops inff_sdio_bus_ops = { + .stop = inff_sdio_bus_stop, + .preinit = inff_sdio_bus_preinit, + .txdata = inff_sdio_bus_txdata, + .txctl = inff_sdio_bus_txctl, + .rxctl = inff_sdio_bus_rxctl, + .gettxq = inff_sdio_bus_gettxq, + .wowl_config = inff_sdio_wowl_config, + .get_ramsize = inff_sdio_bus_get_ramsize, + .get_memdump = inff_sdio_bus_get_memdump, + .get_blob = inff_sdio_get_blob, + .debugfs_create = inff_sdio_debugfs_create, + .reset = inff_sdio_bus_reset, + .remove = inff_sdio_bus_remove, + .set_fcmode = inff_sdio_bus_set_fcmode, + .napi_poll = inff_sdio_napi_poll +}; + +static void inff_sdio_reset_cp(struct inff_sdio *bus) +{ + struct inff_sdio_dev *sdiodev = bus->sdiodev; + u32 reg_val = 0; + int err = 0; + + /* Configure registers to trigger BT reset on + * "SDIO Soft Reset", and set RES bit to trigger + * SDIO as well as BT reset + */ + + /* Set card control so an SDIO card reset + * does a BT backplane reset + */ + reg_val = inff_sdiod_func0_rb(sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + &err); + if (err) { + inff_err("Failed to read SDIO_CCCR_INFF_CARDCTRL: %d\n", err); + return; + } + + reg_val |= SDIO_CCCR_INFF_CARDCTRL_BTRESET; + + inff_dbg(INFO, "Write CARDCTRL = 0x%x\n", reg_val); + + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_INFF_CARDCTRL, + reg_val, &err); + if (err) { + inff_err("Failed to write SDIO_CCCR_INFF_CARDCTRL: %d\n", err); + return; + } + + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, + sdiodev->func1->num | SDIO_IO_CARD_RESET, + &err); + if (err) { + inff_err("Failed to write SDIO_IO_CARD_RESET: %d\n", err); + return; + } +} + +static void inff_sdio_reset(struct inff_sdio *bus) +{ +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + struct inff_bus *bus_if = bus->sdiodev->bus_if; +#endif + u32 reg_val, read_reg; + int err = 0; + + if (bus->ci->socitype == SOCI_CP) { + inff_sdio_reset_cp(bus); + } else if (bus->ci->chip == INF_CC_43022_CHIP_ID) { + /* Set card control so an SDIO card + * reset does a WLAN backplane reset + */ + reg_val = inff_sdiod_func0_rb(bus->sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + &err); + if (!err) { + /* For 43022 bit 1 and bit 2 are required + * to be set for SDIO reset + */ + reg_val |= SDIO_CCCR_INFF_CARDCTRL_WLANRESET; +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + if (inff_btsdio_set_bt_reset(bus_if)) + reg_val |= SDIO_CCCR_INFF_CARDCTRL_BTRESET; +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + inff_sdiod_func0_wb(bus->sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + reg_val, &err); + inff_dbg(INFO, "Write CARDCTRL = 0x%x err:%d\n", + reg_val, err); + read_reg = inff_sdiod_func0_rb(bus->sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + &err); + inff_dbg(INFO, "Card CTRL reg dump [0x%x], read err:%d\n", + read_reg, err); + } else { + inff_err("Failed to read Card CTRL [err = %d]\n", err); + } + /* For 43022, bit 3 needs to be set + * for IO Card reset + */ + reg_val = inff_sdiod_func0_rb(bus->sdiodev, + SDIO_CCCR_IO_ABORT, + &err); + if (!err) { + reg_val |= SDIO_CCCR_IO_ABORT_RES; + /* Setting IO Card Reset (RES) bit + * in IOAbort register, after this point no + * SDIO access is allowed till full SDIO init + */ + inff_sdiod_func0_wb(bus->sdiodev, + SDIO_CCCR_IO_ABORT, + reg_val, &err); + } else { + inff_err("Failed to read IO Abort [err = %d]\n", err); + } + mdelay(20); + } else { + bus->ci->blhs->init(bus->ci); + + /* Configure registers to trigger WLAN reset on + * "SDIO Soft Reset", and set RES bit to trigger + * SDIO as well as WLAN reset + * (instead of using PMU/CC Watchdog register) + */ + if (bus->ci->ccsec) { + struct inff_sdio_dev *sdiodev; + int err = 0; + u32 reg_val = 0; + + sdiodev = bus->sdiodev; + /* Set card control so an SDIO card reset + * does a WLAN backplane reset + */ + reg_val = inff_sdiod_func0_rb(sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + &err); + reg_val |= SDIO_CCCR_INFF_CARDCTRL_WLANRESET; +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + if (inff_btsdio_set_bt_reset(bus_if)) + reg_val |= SDIO_CCCR_INFF_CARDCTRL_BTRESET; +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ + inff_sdiod_func0_wb(sdiodev, + SDIO_CCCR_INFF_CARDCTRL, + reg_val, &err); + inff_dbg(INFO, "Write CARDCTRL = 0x%x err:%d\n", + reg_val, err); + inff_sdiod_func0_wb(sdiodev, + SDIO_CCCR_ABORT, + sdiodev->func1->num | + SDIO_IO_CARD_RESET, + NULL); + } else { + /* Reset the PMU, backplane and all the + * cores by using the PMUWatchdogCounter. + */ + inff_chip_reset_watchdog(bus->ci); + } + + bus->ci->blhs->post_wdreset(bus->ci); + } +} + +#define INFF_SDIO_FW_CODE 0 +#define INFF_SDIO_FW_NVRAM 1 +#define INFF_SDIO_FW_CLM 2 + +static int inff_sdio_enable_func2(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiod = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiod->bus; + struct inff_core *core = bus->sdio_core; + u8 saveclk, bpreq; + u8 devctl; + int err; + + sdio_claim_host(sdiod->func1); + + /* Make sure backplane clock is on, needed to generate F2 interrupt */ + inff_sdio_clkctl(bus, CLK_AVAIL, false); + if (bus->clkstate != CLK_AVAIL) + goto release; + + /* Force clocks on backplane to be sure F2 interrupt propagates */ + saveclk = inff_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err); + if (!err) { + bpreq = saveclk; + bpreq |= (inff_sdio_use_ht_avail(bus->ci) || + inff_chip_is_ulp(bus->ci)) ? + SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; + inff_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, + bpreq, &err); + } + if (err) { + inff_err("Failed to force clock for F2: err %d\n", err); + goto release; + } + + /* Enable function 2 (frame transfers) */ + inff_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata), + SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL); + + err = sdio_enable_func(sdiod->func2); + + inff_dbg(INFO, "enable F2: err=%d\n", err); + + /* If F2 successfully enabled, set core and enable interrupts */ + if (!err) { + /* Set up the interrupt mask and enable interrupts */ + bus->hostintmask = HOSTINTMASK; + inff_sdiod_writel(sdiod, core->base + SD_REG(hostintmask), + bus->hostintmask, NULL); + + switch (sdiod->func1->device) { + case SDIO_DEVICE_ID_CYPRESS_55572: + case SDIO_DEVICE_ID_CYPRESS_55500: + inff_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + INF55572_F2_WATERMARK); + inff_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + INF55572_F2_WATERMARK, &err); + devctl = inff_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + inff_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + inff_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + INF55572_F1_MESBUSYCTRL, &err); + break; + case SDIO_DEVICE_ID_CYPRESS_43022: + inff_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + INF43022_F2_WATERMARK); + inff_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + INF43022_F2_WATERMARK, &err); + devctl = inff_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + inff_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + inff_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + INF43022_MESBUSYCTRL, &err); + break; + case SDIO_DEVICE_ID_CYPRESS_55900: + inff_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + INF55900_F2_WATERMARK); + inff_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + INF55900_F2_WATERMARK, &err); + devctl = inff_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + inff_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + inff_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + INF55900_F1_MESBUSYCTRL, &err); + break; + default: + inff_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + DEFAULT_F2_WATERMARK, &err); + break; + } + } else { + /* Disable F2 again */ + sdio_disable_func(sdiod->func2); + goto checkdied; + } + if (inff_chip_sr_capable(bus->ci)) { + inff_sdio_sr_init(bus); + } else { + /* Restore previous clock setting */ + inff_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, + saveclk, &err); + } + + if (err == 0) { + /* Assign bus interface call back */ + sdiod->bus_if->dev = sdiod->dev; + sdiod->bus_if->ops = &inff_sdio_bus_ops; + sdiod->bus_if->chip = bus->ci->chip; + sdiod->bus_if->chiprev = bus->ci->chiprev; + + /* Allow full data communication using DPC from now on. */ + inff_sdiod_change_state(bus->sdiodev, INFF_SDIOD_DATA); + + err = inff_sdiod_intr_register(sdiod); + if (err != 0) + inff_err("intr register failed:%d\n", err); + bus->sdiodev->redownload_fw = false; + } + + /* If we didn't come up, turn off backplane clock */ + if (err != 0) { + inff_sdio_clkctl(bus, CLK_NONE, false); + goto checkdied; + } + + /* Start the watchdog timer */ + bus->sdcnt.tickcnt = 0; + inff_sdio_wd_timer(bus, true); + sdio_release_host(sdiod->func1); + + if (!bus->sdiodev->ulp || + (bus->sdiodev->ulp && bus->sdiodev->fmac_ulp.ulp_state != FMAC_ULP_TRIGGERED)) { + err = inff_alloc(sdiod->dev, sdiod->settings); + if (err) { + inff_err("inff_alloc failed\n"); + goto claim; + } + + /* Attach to the common layer, reserve hdr space */ + err = inff_attach(sdiod->dev, !bus->sdiodev->ulp); + if (err != 0) { + inff_err("inff_attach failed\n"); + goto free; + } + +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + inff_btsdio_init(bus_if); +#endif + + /* Register for ULP events */ + if (sdiod->func1->device == SDIO_DEVICE_ID_CYPRESS_43022) + inff_fweh_register(bus_if->drvr, INFF_E_ULP, + inff_ulp_event_notify); + } + + if (bus->sdiodev->ulp) { + /* For ULP, after firmware redownload complete + * set ULP state to IDLE + */ + if (bus->sdiodev->fmac_ulp.ulp_state == FMAC_ULP_TRIGGERED) + bus->sdiodev->fmac_ulp.ulp_state = FMAC_ULP_IDLE; + } + + return 0; + +free: + inff_free(sdiod->dev); +claim: + sdio_claim_host(sdiod->func1); +checkdied: + inff_sdio_checkdied(bus); +release: + sdio_release_host(sdiod->func1); + return err; +} + +static int inff_sdio_enable_func3(struct inff_bus *bus_if) +{ + struct inff_sdio_dev *sdiod = bus_if->bus_priv.sdio; + u8 val = 0; + int err = 0; + + sdio_claim_host(sdiod->func1); + + /* download CP FW first if user provide this module parameter */ + if (sdiod->settings->firmware_path[0] != '\0') { + err = inff_dfu_start(sdiod); + if (err) { + inff_err("DFU first download failed: %d\n", err); + goto release; + } + + /* wait for image ready */ + msleep(5000); + } + + /* Enable SDIO function 3 */ + val = inff_sdiod_func0_rb(sdiod, SDIO_CCCR_IOEx, &err); + if (err) { + inff_err("Failed to read SDIO_CCCR_IOEx: err %d\n", err); + goto release; + } + + val |= SDIO_CCCR_IEN_FUNC3; + inff_sdiod_func0_wb(sdiod, SDIO_CCCR_IOEx, val, &err); + if (err) { + inff_err("Failed to write SDIO_CCCR_IOEx: err %d\n", err); + goto release; + } + + inff_dbg(INFO, "Enable F3 successfully\n"); + + sdio_release_host(sdiod->func1); + + err = inff_sdio_enable_func2(bus_if); + if (err) + goto fail; + + return 0; + +release: + sdio_release_host(sdiod->func1); +fail: + return err; +} + +static void inff_sdio_firmware_callback(struct device *dev, int err, + struct inff_fw_request *fwreq) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiod = bus_if->bus_priv.sdio; + struct inff_sdio *bus = sdiod->bus; + struct inff_chip_specific *chip_spec = &bus->ci->chip_spec; + const struct firmware *code; + void *nvram; + u32 nvram_len; + + inff_dbg(ULP, "Enter: dev=%s, err=%d\n", dev_name(dev), err); + + if (err) + goto fail; + + code = fwreq->items[INFF_SDIO_FW_CODE].binary; + nvram = fwreq->items[INFF_SDIO_FW_NVRAM].nv_data.data; + nvram_len = fwreq->items[INFF_SDIO_FW_NVRAM].nv_data.len; + chip_spec->clm_fw = fwreq->items[INFF_SDIO_FW_CLM].binary; + kfree(fwreq); + + /* try to download image and nvram to the dongle */ + bus->alp_only = true; + if (bus->ci->chip == INF_CC_43022_CHIP_ID) + err = inff_sdio_download_firmware_43022(bus, code, nvram, nvram_len); + else + err = inff_sdio_download_firmware(bus, code, nvram, nvram_len); + + if (err) + goto fail; + bus->alp_only = false; + + err = inff_sdio_enable_func2(bus_if); + if (err) + goto fail; + + /* ready */ + return; + +fail: + inff_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); + device_release_driver(&sdiod->func2->dev); + device_release_driver(dev); +} + +struct inff_sdio *inff_sdio_probe(struct inff_sdio_dev *sdiodev) +{ + int ret; + struct inff_sdio *bus; + struct workqueue_struct *wq; + struct inff_fw_request *fwreq; + + inff_dbg(TRACE, "Enter\n"); + + /* Allocate private bus interface state */ + bus = kzalloc(sizeof(*bus), GFP_ATOMIC); + if (!bus) { + ret = -ENOMEM; + goto fail; + } + + bus->sdiodev = sdiodev; + sdiodev->bus = bus; + skb_queue_head_init(&bus->glom); + bus->txbound = INFF_TXBOUND; + bus->rxbound = INFF_RXBOUND; + bus->txminmax = INFF_TXMINMAX; + bus->tx_seq = SDPCM_SEQ_WRAP - 1; + + /* single-threaded workqueue */ + wq = alloc_ordered_workqueue("inff_wq/%s", WQ_MEM_RECLAIM | WQ_HIGHPRI, + dev_name(&sdiodev->func1->dev)); + if (!wq) { + inff_err("insufficient memory to create txworkqueue\n"); + ret = -ENOMEM; + goto fail; + } + inff_sdiod_freezer_count(sdiodev); + INIT_WORK(&bus->datawork, inff_sdio_dataworker); + bus->inff_wq = wq; + + /* attempt to attach to the dongle */ + ret = inff_sdio_probe_attach(bus); + if (ret < 0) { + inff_err("inff_sdio_probe_attach failed\n"); + goto fail; + } + + spin_lock_init(&bus->rxctl_lock); + spin_lock_init(&bus->txq_lock); + init_waitqueue_head(&bus->ctrl_wait); + init_waitqueue_head(&bus->dcmd_resp_wait); + /* Initialize thread based operation and lock */ + mutex_init(&bus->sdsem); + + /* too early to have drvr */ + if (sdiodev->settings->sdio_rxf_in_kthread_enabled) { + memset(&bus->skbbuf[0], 0, sizeof(void *) * MAXSKBPEND); + init_completion(&bus->thr_rxf_ctl.comp); + spin_lock_init(&bus->rxf_lock); + bus->thr_rxf_ctl.p_task = kthread_run(inff_sdio_rxf_thread, + bus, "inff_rxf/%s", + dev_name(&sdiodev->func1->dev)); + if (IS_ERR(bus->thr_rxf_ctl.p_task)) { + inff_err("inff_sdio_rxf_thread failed to start\n"); + bus->thr_rxf_ctl.p_task = NULL; + } + } + + /* Set up the watchdog timer */ + timer_setup(&bus->timer, inff_sdio_watchdog, 0); + /* Initialize watchdog thread */ + init_completion(&bus->watchdog_wait); + bus->watchdog_tsk = kthread_run(inff_sdio_watchdog_thread, + bus, "inff_wdog/%s", + dev_name(&sdiodev->func1->dev)); + if (IS_ERR(bus->watchdog_tsk)) { + pr_warn("inff_watchdog thread failed to start\n"); + bus->watchdog_tsk = NULL; + } + /* Initialize DPC thread */ + bus->dpc_triggered = false; + bus->dpc_running = false; + + /* default sdio bus header length for tx packet */ + bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; + + /* Query the F2 block size, set roundup accordingly */ + bus->blocksize = bus->sdiodev->func2->cur_blksize; + bus->roundup = min(max_roundup, bus->blocksize); + + sdio_claim_host(bus->sdiodev->func1); + + /* Disable F2 to clear any intermediate frame state on the dongle */ + sdio_disable_func(bus->sdiodev->func2); + + bus->rxflow = false; + + /* Done with backplane-dependent accesses, can drop clock... */ + inff_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL); + + sdio_release_host(bus->sdiodev->func1); + + /* ...and initialize clock/power states */ + bus->clkstate = CLK_SDONLY; + + bus->idletime = sdiodev->settings->sdio_bus_idle_time; + + if (sdiodev->settings->idleclk_disable == INFFMAC_DISABLE) { + bus->idleclock = INFF_IDLE_ACTIVE; + } else if (sdiodev->settings->idleclk_disable == INFFMAC_ENABLE) { + bus->idleclock = INFF_IDLE_STOP; + } else if (sdiodev->settings->idleclk_disable == INFFMAC_AUTO) { + if (sdiodev->func1->device == SDIO_DEVICE_ID_CYPRESS_43022) + bus->idleclock = INFF_IDLE_STOP; + else + bus->idleclock = INFF_IDLE_ACTIVE; + } else { + inff_err("unexpected idleclk_disable%d\n", sdiodev->settings->idleclk_disable); + } + + /* SR state */ + bus->sr_enabled = false; + + inff_dbg(INFO, "completed!!\n"); + + switch (bus->ci->socitype) { + case SOCI_AI: + fwreq = inff_prepare_fw_request(sdiodev->settings->firmware_path, + bus->ci, inff_sdio_fwnames, + ARRAY_SIZE(inff_sdio_fwnames), + bus->sdiodev->settings->board_type); + if (!fwreq) { + ret = -ENOMEM; + goto fail; + } + + ret = inff_fw_get_firmwares(sdiodev->dev, fwreq, + inff_sdio_firmware_callback); + if (ret != 0) { + inff_err("async firmware request failed: %d\n", ret); + kfree(fwreq); + goto fail; + } + break; + case SOCI_CP: + ret = inff_sdio_enable_func3(sdiodev->bus_if); + if (ret) { + inff_err("sdio function init failed: %d\n", ret); + goto fail; + } + break; + default: + inff_err("chip type %u is not supported\n", + bus->ci->socitype); + goto fail; + } + + return bus; + +fail: + inff_sdio_remove(bus); + return ERR_PTR(ret); +} + +/* Detach and free everything */ +void inff_sdio_remove(struct inff_sdio *bus) +{ + struct inff_chip_specific *chip_spec = &bus->ci->chip_spec; + + inff_dbg(TRACE, "Enter\n"); + if (bus) { + /* Stop watchdog task */ + if (bus->watchdog_tsk) { + send_sig(SIGTERM, bus->watchdog_tsk, 1); + kthread_stop(bus->watchdog_tsk); + bus->watchdog_tsk = NULL; + } + + if (bus->thr_rxf_ctl.p_task) { + send_sig(SIGTERM, bus->thr_rxf_ctl.p_task, 1); + kthread_stop(bus->thr_rxf_ctl.p_task); + bus->thr_rxf_ctl.p_task = NULL; + } + + /* De-register interrupt handler */ + inff_sdiod_intr_unregister(bus->sdiodev); + + inff_detach(bus->sdiodev->dev); + inff_free(bus->sdiodev->dev); + + cancel_work_sync(&bus->datawork); + if (bus->inff_wq) + destroy_workqueue(bus->inff_wq); + + if (bus->ci) { + if (bus->sdiodev->state != INFF_SDIOD_NOMEDIUM) { + sdio_claim_host(bus->sdiodev->func1); + inff_sdio_wd_timer(bus, false); + inff_sdio_clkctl(bus, CLK_AVAIL, false); + /* Leave the device in state where it is + * 'passive'. This is done by resetting all + * necessary cores. + */ + msleep(20); + if (bus->sdiodev->fmac_ulp.ulp_state == + FMAC_ULP_ENTRY_RECV) { + inff_chip_ulp_reset_lhl_regs(bus->ci); + inff_chip_reset_pmu_regs(bus->ci); + } else { + inff_chip_set_passive(bus->ci); + } + + inff_sdio_reset(bus); + + inff_sdio_clkctl(bus, CLK_NONE, false); + sdio_release_host(bus->sdiodev->func1); + } + inff_chip_detach(bus->ci); + } + if (bus->sdiodev->settings) + inff_release_module_param(bus->sdiodev->settings); + + release_firmware(chip_spec->clm_fw); + chip_spec->clm_fw = NULL; +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + inff_btsdio_deinit(bus->sdiodev->bus_if); +#endif /* CONFIG_INFFMAC_BT_SHARED_SDIO */ +#if IS_BUILTIN(CONFIG_MMC) + sdio_claim_host(bus->sdiodev->func1); + mmc_hw_reset(bus->sdiodev->func1->card); + sdio_release_host(bus->sdiodev->func1); +#endif + + kfree(bus->rxbuf); + kfree(bus->hdrbuf); + kfree(bus); + } + + inff_dbg(TRACE, "Disconnected\n"); +} + +void inff_sdio_wd_timer(struct inff_sdio *bus, bool active) +{ + /* Totally stop the timer */ + if (!active && bus->wd_active) { + timer_delete_sync(&bus->timer); + bus->wd_active = false; + return; + } + + /* don't start the wd until fw is loaded */ + if (bus->sdiodev->state != INFF_SDIOD_DATA) + return; + + if (active) { + if (!bus->wd_active) { + /* Create timer again when watchdog period is + * dynamically changed or in the first instance + */ + bus->timer.expires = jiffies + INFF_WD_POLL; + add_timer(&bus->timer); + bus->wd_active = true; + } else { + /* Re arm the timer, at last watchdog period */ + mod_timer(&bus->timer, jiffies + INFF_WD_POLL); + } + } +} + +int inff_sdio_sleep(struct inff_sdio *bus, bool sleep) +{ + int ret; + + sdio_claim_host(bus->sdiodev->func1); + ret = inff_sdio_bus_sleep(bus, sleep, false); + sdio_release_host(bus->sdiodev->func1); + + return ret; +} + +/* Check F2 Ready bit before sending data to Firmware */ +static int +inff_sdio_f2_ready(struct inff_sdio *bus) +{ + int ret = -1; + int iordy_status = 0; + + sdio_claim_host(bus->sdiodev->func1); + /* Read the status of IOR2 */ + iordy_status = inff_sdiod_func0_rb(bus->sdiodev, SDIO_CCCR_IORx, NULL); + + sdio_release_host(bus->sdiodev->func1); + ret = iordy_status & SDIO_FUNC_ENABLE_2; + return ret; +} + +static int inff_ulp_event_notify(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data) +{ + int err = 0; + struct inff_bus *bus_if = ifp->drvr->bus_if; + struct inff_sdio_dev *sdiodev; + struct inff_sdio *bus; + struct inff_ulp_event *ulp_event = (struct inff_ulp_event *)data; + + sdiodev = bus_if->bus_priv.sdio; + bus = sdiodev->bus; + + inff_dbg(ULP, "Chip went to DS1 state : action %d\n", + ulp_event->ulp_dongle_action); + if (ulp_event->ulp_dongle_action == FMAC_ULP_ENTRY) + bus->sdiodev->fmac_ulp.ulp_state = FMAC_ULP_ENTRY_RECV; + + return err; +} + +u32 inff_sdio_get_enum_addr(struct inff_sdio *bus) +{ + return bus->sdio_core->base; +} diff --git a/drivers/net/wireless/infineon/inffmac/sdio.h b/drivers/net/wireless/infineon/inffmac/sdio.h new file mode 100644 index 000000000000..d9f7865b5a56 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/sdio.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_SDIO_H +#define INFF_SDIO_H + +#include +#include +#include "firmware.h" + +#define SDIOD_FBR_SIZE 0x100 + +/* io_en */ +#define SDIO_FUNC_ENABLE_1 0x02 +#define SDIO_FUNC_ENABLE_2 0x04 + +/* io_rdys */ +#define SDIO_FUNC_READY_1 0x02 +#define SDIO_FUNC_READY_2 0x04 + +/* intr_status */ +#define INTR_STATUS_FUNC1 0x2 +#define INTR_STATUS_FUNC2 0x4 + +/* mask of register map */ +#define REG_F0_REG_MASK 0x7FF +#define REG_F1_MISC_MASK 0x1FFFF + +#define INFF_SDIO_REG_DAR_H2D_MSG_0 0x10030 +#define INFF_SDIO_REG_DAR_D2H_MSG_0 0x10038 + +#define INFF_SDIO_REG_D2H_MSG_0 0x1800204C +#define INFF_SDIO_REG_H2D_MSG_0 0x18002048 + +#define CM3_SOCRAM_WRITE_END_LOCATION 0x80000 + +/* Sdio rev 27 only */ +/* To read secure-mode bit */ +#define SBSDIO_FUNC1_SECURE_MODE 0x10001 + +/* function 0 vendor specific CCCR registers */ + +#define SDIO_CCCR_INTR_PND 0x05 +#define SDIO_CCCR_IO_ABORT 0x06 +#define SDIO_CCCR_IO_ABORT_RES BIT(3) +#define SDIO_CCCR_INFF_CARDCAP 0xf0 +#define SDIO_CCCR_INFF_CARDCAP_CMD14_SUPPORT BIT(1) +#define SDIO_CCCR_INFF_CARDCAP_CMD14_EXT BIT(2) +#define SDIO_CCCR_INFF_CARDCAP_CMD_NODEC BIT(3) +#define SDIO_CCCR_INFF_CARDCAP_CHIPID_PRESENT BIT(6) +#define SDIO_CCCR_INFF_CARDCAP_SECURE_MODE BIT(7) + +/* Interrupt enable bits for each function */ +#define SDIO_CCCR_IEN_FUNC0 BIT(0) +#define SDIO_CCCR_IEN_FUNC1 BIT(1) +#define SDIO_CCCR_IEN_FUNC2 BIT(2) +#define SDIO_CCCR_IEN_FUNC3 BIT(3) + +#define SDIO_CCCR_INFF_CARDCTRL 0xf1 +#define SDIO_CCCR_INFF_CARDCTRL_WLANRESET BIT(1) +#define SDIO_CCCR_INFF_CARDCTRL_BTRESET BIT(2) + +#define SDIO_CCCR_INFF_SEPINT 0xf2 +#define SDIO_CCCR_INFF_SEPINT_MASK BIT(0) +#define SDIO_CCCR_INFF_SEPINT_OE BIT(1) +#define SDIO_CCCR_INFF_SEPINT_ACT_HI BIT(2) + +/* function 1 miscellaneous registers */ + +/* sprom command and status */ +#define SBSDIO_SPROM_CS 0x10000 +/* sprom info register */ +#define SBSDIO_SPROM_INFO 0x10001 +/* sprom indirect access data byte 0 */ +#define SBSDIO_SPROM_DATA_LOW 0x10002 +/* sprom indirect access data byte 1 */ +#define SBSDIO_SPROM_DATA_HIGH 0x10003 +/* sprom indirect access addr byte 0 */ +#define SBSDIO_SPROM_ADDR_LOW 0x10004 +/* gpio select */ +#define SBSDIO_GPIO_SELECT 0x10005 +/* gpio output */ +#define SBSDIO_GPIO_OUT 0x10006 +/* gpio enable */ +#define SBSDIO_GPIO_EN 0x10007 +/* rev < 7, watermark for sdio device TX path */ +#define SBSDIO_WATERMARK 0x10008 +/* control busy signal generation */ +#define SBSDIO_DEVICE_CTL 0x10009 + +/* SB Address Window Low (b15) */ +#define SBSDIO_FUNC1_SBADDRLOW 0x1000A +/* SB Address Window Mid (b23:b16) */ +#define SBSDIO_FUNC1_SBADDRMID 0x1000B +/* SB Address Window High (b31:b24) */ +#define SBSDIO_FUNC1_SBADDRHIGH 0x1000C +/* Frame Control (frame term/abort) */ +#define SBSDIO_FUNC1_FRAMECTRL 0x1000D +/* ChipClockCSR (ALP/HT ctl/status) */ +#define SBSDIO_FUNC1_CHIPCLKCSR 0x1000E +/* SdioPullUp (on cmd, d0-d2) */ +#define SBSDIO_FUNC1_SDIOPULLUP 0x1000F +/* Write Frame Byte Count Low */ +#define SBSDIO_FUNC1_WFRAMEBCLO 0x10019 +/* Write Frame Byte Count High */ +#define SBSDIO_FUNC1_WFRAMEBCHI 0x1001A +/* Read Frame Byte Count Low */ +#define SBSDIO_FUNC1_RFRAMEBCLO 0x1001B +/* Read Frame Byte Count High */ +#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C +/* MesBusyCtl (rev 11) */ +#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D +/* Watermark for sdio device RX path */ +#define SBSDIO_MESBUSY_RXFIFO_WM_MASK 0x7F +#define SBSDIO_MESBUSY_RXFIFO_WM_SHIFT 0 +/* Enable busy capability for MES access */ +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 +#define SBSDIO_MESBUSYCTRL_ENAB_SHIFT 7 + +/* Sdio Core Rev 12 */ +#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 +#define SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT 0 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_MASK 0x2 +#define SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR 0x1001F +#define SBSDIO_FUNC1_SLEEPCSR_KSO_MASK 0x1 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT 0 +#define SBSDIO_FUNC1_SLEEPCSR_KSO_EN 1 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_MASK 0x2 +#define SBSDIO_FUNC1_SLEEPCSR_DEVON_SHIFT 1 +#define SBSDIO_FUNC1_SLEEPCSR_BT_KSO_MASK 0x4 +#define SBSDIO_FUNC1_SLEEPCSR_BT_KSO_SHIFT 2 + +#define SBSDIO_FUNC1_MISC_REG_START 0x10000 /* f1 misc register start */ +#define SBSDIO_FUNC1_MISC_REG_LIMIT 0x1001F /* f1 misc register end */ + +/* Sdio Core Rev 31 */ +/* Hard Reset SDIO core, output soft reset signal which should cause backplane reset */ +#define SDIO_IO_CARD_RESET 0x08 + +/* function 1 OCP space */ + +/* sb offset addr is <= 15 bits, 32k */ +#define SBSDIO_SB_OFT_ADDR_MASK 0x07FFF +#define SBSDIO_SB_OFT_ADDR_LIMIT 0x08000 +/* with b15, maps to 32-bit SB access */ +#define SBSDIO_SB_ACCESS_2_4B_FLAG 0x08000 + +/* Address bits from SBADDR regs */ +#define SBSDIO_SBWINDOW_MASK 0xffff8000 + +#define SDIOH_READ 0 /* Read request */ +#define SDIOH_WRITE 1 /* Write request */ + +#define SDIOH_DATA_FIX 0 /* Fixed addressing */ +#define SDIOH_DATA_INC 1 /* Incremental addressing */ + +/* internal return code */ +#define SUCCESS 0 +#define ERROR 1 + +/* Packet alignment for most efficient SDIO (can change based on platform) */ +#define INFF_SDALIGN BIT(6) + +/* watchdog polling interval */ +#define INFF_WD_POLL msecs_to_jiffies(10) + +/* SDIO function number definition */ +#define SDIO_FUNC_0 0 +#define SDIO_FUNC_1 1 +#define SDIO_FUNC_2 2 +#define SDIO_FUNC_3 3 +#define SDIO_FUNC_4 4 +#define SDIO_FUNC_5 5 +#define SDIO_FUNC_6 6 +#define SDIO_FUNC_7 7 + +/** + * enum inff_sdiod_state - the state of the bus. + * + * @INFF_SDIOD_DOWN: Device can be accessed, no DPC. + * @INFF_SDIOD_DATA: Ready for data transfers, DPC enabled. + * @INFF_SDIOD_NOMEDIUM: No medium access to dongle possible. + */ +enum inff_sdiod_state { + INFF_SDIOD_DOWN, + INFF_SDIOD_DATA, + INFF_SDIOD_NOMEDIUM +}; + +struct inff_sdreg { + int func; + int offset; + int value; +}; + +struct inff_sdio; +struct inff_sdiod_freezer; + +/* ULP SHM Offsets info */ +struct ulp_shm_info { + u32 m_ulp_ctrl_sdio; + u32 m_ulp_wakeevt_ind; + u32 m_ulp_wakeind; + u32 m_ulp_phytxblk; +}; + +/* FMAC ULP state machine */ +#define FMAC_ULP_IDLE (0) +#define FMAC_ULP_ENTRY_RECV (1) +#define FMAC_ULP_TRIGGERED (2) + +/* INFF_E_ULP event data */ +#define FMAC_ULP_EVENT_VERSION 1 +#define FMAC_ULP_DISABLE_CONSOLE 1 /* Disable console */ +#define FMAC_ULP_UCODE_DOWNLOAD 2 /* Download ULP ucode file */ +#define FMAC_ULP_ENTRY 3 /* Inform ulp entry to Host */ + +struct inff_ulp { + uint ulp_state; + struct ulp_shm_info ulp_shm_offset; +}; + +struct inff_ulp_event { + u16 version; + u16 ulp_dongle_action; +}; + +struct inff_sdio_dev { + struct sdio_func *func1; + struct sdio_func *func2; + struct sdio_func *func3; + u32 sbwad; /* Save backplane window address */ + bool sbwad_valid; /* Save backplane window address */ + struct inff_core *cc_core; /* chipcommon core info struct */ + struct inff_sdio *bus; + struct device *dev; + struct inff_bus *bus_if; + struct inff_mp_device *settings; + bool oob_irq_requested; + bool sd_irq_requested; + bool irq_en; /* irq enable flags */ + spinlock_t irq_en_lock; + bool sg_support; + uint max_request_size; + ushort max_segment_count; + uint max_segment_size; + uint txglomsz; + struct sg_table sgtable; + bool wowl_enabled; + bool func1_power_manageable; + bool func2_power_manageable; + enum inff_sdiod_state state; + struct inff_sdiod_freezer *freezer; + struct inff_ulp fmac_ulp; + bool ulp; + bool redownload_fw; + bool ignore_bus_error; /* Ignore SDIO Bus access error*/ +}; + +/* sdio core registers */ +struct sdpcmd_regs { + u32 corecontrol; /* 0x00, rev8 */ + u32 corestatus; /* rev8 */ + u32 PAD[1]; + u32 biststatus; /* rev8 */ + + /* PCMCIA access */ + u16 pcmciamesportaladdr; /* 0x010, rev8 */ + u16 PAD[1]; + u16 pcmciamesportalmask; /* rev8 */ + u16 PAD[1]; + u16 pcmciawrframebc; /* rev8 */ + u16 PAD[1]; + u16 pcmciaunderflowtimer; /* rev8 */ + u16 PAD[1]; + + /* interrupt */ + u32 intstatus; /* 0x020, rev8 */ + u32 hostintmask; /* rev8 */ + u32 intmask; /* rev8 */ + u32 sbintstatus; /* rev8 */ + u32 sbintmask; /* rev8 */ + u32 funcintmask; /* rev4 */ + u32 PAD[2]; + u32 tosbmailbox; /* 0x040, rev8 */ + u32 tohostmailbox; /* rev8 */ + u32 tosbmailboxdata; /* rev8 */ + u32 tohostmailboxdata; /* rev8 */ + + /* synchronized access to registers in SDIO clock domain */ + u32 sdioaccess; /* 0x050, rev8 */ + u32 PAD[3]; + + /* PCMCIA frame control */ + u8 pcmciaframectrl; /* 0x060, rev8 */ + u8 PAD[3]; + u8 pcmciawatermark; /* rev8 */ + u8 PAD[155]; + + /* interrupt batching control */ + u32 intrcvlazy; /* 0x100, rev8 */ + u32 PAD[3]; + + /* counters */ + u32 cmd52rd; /* 0x110, rev8 */ + u32 cmd52wr; /* rev8 */ + u32 cmd53rd; /* rev8 */ + u32 cmd53wr; /* rev8 */ + u32 abort; /* rev8 */ + u32 datacrcerror; /* rev8 */ + u32 rdoutofsync; /* rev8 */ + u32 wroutofsync; /* rev8 */ + u32 writebusy; /* rev8 */ + u32 readwait; /* rev8 */ + u32 readterm; /* rev8 */ + u32 writeterm; /* rev8 */ + u32 PAD[40]; + u32 clockctlstatus; /* rev8 */ + u32 PAD[7]; + + u32 PAD[76]; /* DMA engines */ + + u32 chipid; /* SDIO ChipID Register, 0x330, rev31 */ + u32 eromptr; /* SDIO EromPtrOffset Register, 0x334, rev31 */ + u32 PAD[50]; + + /* SDIO/PCMCIA CIS region */ + char cis[512]; /* 0x400-0x5ff, rev6 */ + + /* PCMCIA function control registers */ + char pcmciafcr[256]; /* 0x600-6ff, rev6 */ + u16 PAD[55]; + + /* PCMCIA backplane access */ + u16 backplanecsr; /* 0x76E, rev6 */ + u16 backplaneaddr0; /* rev6 */ + u16 backplaneaddr1; /* rev6 */ + u16 backplaneaddr2; /* rev6 */ + u16 backplaneaddr3; /* rev6 */ + u16 backplanedata0; /* rev6 */ + u16 backplanedata1; /* rev6 */ + u16 backplanedata2; /* rev6 */ + u16 backplanedata3; /* rev6 */ + u16 PAD[31]; + + /* sprom "size" & "blank" info */ + u16 spromstatus; /* 0x7BE, rev2 */ + u32 PAD[464]; + + u16 PAD[0x80]; +}; + +/* Register/deregister interrupt handler. */ +int inff_sdiod_intr_register(struct inff_sdio_dev *sdiodev); +void inff_sdiod_intr_unregister(struct inff_sdio_dev *sdiodev); + +/* SDIO device register access interface */ +/* Accessors for SDIO Function 0 */ +#define inff_sdiod_func0_rb_ext(sdiodev, addr, r) \ + sdio_f0_readb((sdiodev)->func1, (addr), (r)) + +#define inff_sdiod_func0_wb_ext(sdiodev, addr, v, ret) \ + sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret)) + +/* Accessors for SDIO Function 1 */ +#define inff_sdiod_readb_ext(sdiodev, addr, r) \ + sdio_readb((sdiodev)->func1, (addr), (r)) + +#define inff_sdiod_writeb_ext(sdiodev, addr, v, ret) \ + sdio_writeb((sdiodev)->func1, (v), (addr), (ret)) + +/* Accessors for SDIO specific function number */ +#define inff_sdiod_func_rb_ext(func, addr, r) \ + sdio_readb((func), (addr), (r)) + +#define inff_sdiod_func_wb_ext(func, addr, v, ret) \ + sdio_writeb((func), (v), (addr), (ret)) + +u8 inff_sdiod_func0_rb(struct inff_sdio_dev *sdiodev, u32 addr, int *ret); +void inff_sdiod_func0_wb(struct inff_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret); +u8 inff_sdiod_readb(struct inff_sdio_dev *sdiodev, u32 addr, int *ret); +void inff_sdiod_writeb(struct inff_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret); +u8 inff_sdiod_func_rb(struct inff_sdio_dev *sdiodev, struct sdio_func *func, u32 addr, int *ret); +void inff_sdiod_func_wb(struct inff_sdio_dev *sdiodev, struct sdio_func *func, u32 addr, + u32 data, int *ret); + +u32 inff_sdiod_readl(struct inff_sdio_dev *sdiodev, u32 addr, int *ret); +void inff_sdiod_writel(struct inff_sdio_dev *sdiodev, u32 addr, u32 data, + int *ret); + +/* Buffer transfer to/from device (client) core via cmd53. + * fn: function number + * flags: backplane width, address increment, sync/async + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * pkt: pointer to packet associated with buf (if any) + * complete: callback function for command completion (async only) + * handle: handle for completion callback (first arg in callback) + * Returns 0 or error code. + * NOTE: Async operation is not currently supported. + */ +int inff_sdiod_send_pkt(struct inff_sdio_dev *sdiodev, + struct sk_buff_head *pktq); +int inff_sdiod_send_buf(struct inff_sdio_dev *sdiodev, u8 fn, + u8 *buf, uint nbytes); + +int inff_sdiod_recv_pkt(struct inff_sdio_dev *sdiodev, u8 fn, + struct sk_buff *pkt); +int inff_sdiod_recv_buf(struct inff_sdio_dev *sdiodev, u8 fn, + u8 *buf, uint nbytes); +int inff_sdiod_recv_chain(struct inff_sdio_dev *sdiodev, + struct sk_buff_head *pktq, uint totlen); + +/* Flags bits */ + +/* Four-byte target (backplane) width (vs. two-byte) */ +#define SDIO_REQ_4BYTE 0x1 +/* Fixed address (FIFO) (vs. incrementing address) */ +#define SDIO_REQ_FIXED 0x2 + +/* Read/write to memory block (F1, no FIFO) via CMD53 (sync only). + * rw: read or write (0/1) + * addr: direct SDIO address + * buf: pointer to memory data buffer + * nbytes: number of bytes to transfer to/from buf + * Returns 0 or error code. + */ +int inff_sdiod_ramrw(struct inff_sdio_dev *sdiodev, bool write, u32 address, + u8 *data, uint size); + +/* Issue an abort to the specified function */ +int inff_sdiod_abort(struct inff_sdio_dev *sdiodev, struct sdio_func *func); + +void inff_sdiod_sgtable_alloc(struct inff_sdio_dev *sdiodev); +void inff_sdiod_change_state(struct inff_sdio_dev *sdiodev, + enum inff_sdiod_state state); +bool inff_sdiod_freezing(struct inff_sdio_dev *sdiodev); +void inff_sdiod_try_freeze(struct inff_sdio_dev *sdiodev); +void inff_sdiod_freezer_count(struct inff_sdio_dev *sdiodev); +void inff_sdiod_freezer_uncount(struct inff_sdio_dev *sdiodev); + +int inff_sdiod_probe(struct inff_sdio_dev *sdiodev); +int inff_sdiod_remove(struct inff_sdio_dev *sdiodev); + +struct inff_sdio *inff_sdio_probe(struct inff_sdio_dev *sdiodev); +void inff_sdio_remove(struct inff_sdio *bus); +void inff_sdio_isr(struct inff_sdio *bus, bool in_isr); + +void inff_sdio_wd_timer(struct inff_sdio *bus, bool active); +void inff_sdio_wowl_config(struct device *dev, bool enabled); +int inff_sdio_sleep(struct inff_sdio *bus, bool sleep); +int inff_sdio_set_sdbus_clk_width(struct inff_sdio *bus, unsigned int flags); +int inff_sdio_clkctl(struct inff_sdio *bus, uint target, bool pendok); +bool inff_sdio_bus_sleep_state(struct inff_sdio *bus); +void inff_sdio_trigger_dpc(struct inff_sdio *bus); +u32 inff_sdio_get_enum_addr(struct inff_sdio *bus); + +/* SHM offsets */ +#define M_DS1_CTRL_SDIO(ptr) ((ptr).ulp_shm_offset.m_ulp_ctrl_sdio) +#define M_WAKEEVENT_IND(ptr) ((ptr).ulp_shm_offset.m_ulp_wakeevt_ind) +#define M_ULP_WAKE_IND(ptr) ((ptr).ulp_shm_offset.m_ulp_wakeind) +#define M_DS1_PHYTX_ERR_BLK(ptr) ((ptr).ulp_shm_offset.m_ulp_phytxblk) + +#define D11_BASE_ADDR 0x18001000 +#define D11_AXI_BASE_ADDR 0xE8000000 +#define D11_SHM_BASE_ADDR (D11_AXI_BASE_ADDR + 0x4000) + +#define D11REG_ADDR(offset) (D11_BASE_ADDR + (offset)) +#define D11IHR_ADDR(offset) (D11_AXI_BASE_ADDR + 0x400 + (2 * (offset))) +#define D11SHM_ADDR(offset) (D11_SHM_BASE_ADDR + (offset)) + +/* MacControl register */ +#define D11_MACCONTROL_REG D11REG_ADDR(0x120) +#define D11_MACCONTROL_REG_WAKE 0x4000000 + +/* HUDI Sequence SHM bits */ +#define C_DS1_CTRL_SDIO_DS1_SLEEP 0x1 +#define C_DS1_CTRL_SDIO_MAC_ON 0x2 +#define C_DS1_CTRL_SDIO_RADIO_PHY_ON 0x4 +#define C_DS1_CTRL_SDIO_DS1_EXIT 0x8 +#define C_DS1_CTRL_PROC_DONE 0x100 +#define C_DS1_CTRL_REQ_VALID 0x200 + +/* M_ULP_WAKEIND bits */ +#define C_WATCHDOG_EXPIRY BIT(0) +#define C_FCBS_ERROR BIT(1) +#define C_RETX_FAILURE BIT(2) +#define C_HOST_WAKEUP BIT(3) +#define C_INVALID_FCBS_BLOCK BIT(4) +#define C_HUDI_DS1_EXIT BIT(5) +#define C_LOB_SLEEP BIT(6) +#define C_DS1_PHY_TXERR BIT(9) +#define C_DS1_WAKE_TIMER BIT(10) + +#define PHYTX_ERR_BLK_SIZE 18 +#define D11SHM_FIRST2BYTE_MASK 0xFFFF0000 +#define D11SHM_SECOND2BYTE_MASK 0x0000FFFF +#define D11SHM_2BYTE_SHIFT 16 + +#define D11SHM_RD(sdh, offset, ret) \ + inff_sdiod_readl(sdh, D11SHM_ADDR(offset), ret) + +/* SHM Read is motified based on SHM 4 byte alignment as SHM size is 2 bytes and + * 2 byte is currently not working on FMAC + * If SHM address is not 4 byte aligned, then right shift by 16 + * otherwise, mask the first two MSB bytes + * Suppose data in address 7260 is 0x440002 and it is 4 byte aligned + * Correct SHM value is 0x2 for this SHM offset and next SHM value is 0x44 + */ +#define D11SHM_RDW(sdh, offset, ret) ({ \ + typeof(sdh) _sdh = (sdh); \ + typeof(offset) _offset = (offset); \ + typeof(ret) _ret = (ret); \ + ((_offset % 4) ? \ + (inff_sdiod_readl(_sdh, D11SHM_ADDR(_offset), _ret) \ + >> D11SHM_2BYTE_SHIFT) : \ + (inff_sdiod_readl(_sdh, D11SHM_ADDR(_offset), _ret) \ + & D11SHM_SECOND2BYTE_MASK)); \ + }) + +/* SHM is of size 2 bytes, 4 bytes write will overwrite other SHM's + * First read 4 bytes and then clear the required two bytes based on + * 4 byte alignment, then update the required value and write the + * 4 byte value now + */ +#define D11SHM_WR(sdh, offset, val, mask, ret) \ + do { \ + typeof(offset) _offet = (offset); \ + typeof(val) _val = (val); \ + typeof(mask) _mask = (mask); \ + if ((_offet) % 4) \ + _val = (_val & D11SHM_SECOND2BYTE_MASK) | \ + ((_mask) << D11SHM_2BYTE_SHIFT); \ + else \ + _val = (_mask) | (_val & D11SHM_FIRST2BYTE_MASK); \ + inff_sdiod_writel(sdh, D11SHM_ADDR(_offet), _val, ret); \ + } while (0) +#define D11REG_WR(sdh, addr, val, ret) \ + inff_sdiod_writel(sdh, addr, val, ret) + +#define D11REG_RD(sdh, addr, ret) \ + inff_sdiod_readl(sdh, addr, ret) + +#endif /* INFF_SDIO_H */ -- 2.25.1 Driver implementation of the BCDC protocol used for the Control and Data communication with Infineon's WLAN Device over the SDIO BUS. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/bcdc.c | 521 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/bcdc.h | 24 + 2 files changed, 545 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/bcdc.c create mode 100644 drivers/net/wireless/infineon/inffmac/bcdc.h diff --git a/drivers/net/wireless/infineon/inffmac/bcdc.c b/drivers/net/wireless/infineon/inffmac/bcdc.c new file mode 100644 index 000000000000..c6e78d8645d5 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/bcdc.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +/******************************************************************************* + * Communicates with the dongle by using dcmd codes. + * For certain dcmd codes, the dongle interprets string data from the host. + ******************************************************************************/ + +#include +#include + +#include "utils.h" +#include "core.h" +#include "bus.h" +#include "fwsignal.h" +#include "debug.h" +#include "tracepoint.h" +#include "proto.h" +#include "bcdc.h" +#include "common.h" + +struct inff_proto_bcdc_dcmd { + __le32 cmd; /* dongle command value */ + __le32 len; /* lower 16: output buflen; + * upper 16: input buflen (excludes header) + */ + __le32 flags; /* flag defns given below */ + __le32 status; /* status code returned from the device */ +}; + +/* BCDC flag definitions */ +#define BCDC_DCMD_ERROR 0x01 /* 1=cmd failed */ +#define BCDC_DCMD_SET 0x02 /* 0=get, 1=set cmd */ +#define BCDC_DCMD_IF_MASK 0xF000 /* I/F index */ +#define BCDC_DCMD_IF_SHIFT 12 +#define BCDC_DCMD_ID_MASK 0xFFFF0000 /* id an cmd pairing */ +#define BCDC_DCMD_ID_SHIFT 16 /* ID Mask shift bits */ +#define BCDC_DCMD_ID(flags) \ + (((flags) & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT) + +/* + * BCDC header - Infineon specific extension of CDC. + * Used on data packets to convey priority. + */ +#define BCDC_HEADER_LEN 4 +#define BCDC_PROTO_VER 2 /* Protocol version */ +#define BCDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ +#define BCDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ +#define BCDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */ +#define BCDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */ +#define BCDC_PRIORITY_MASK 0x7 +#define BCDC_FLAG2_IF_MASK 0x0f /* packet rx interface in APSTA */ +#define BCDC_FLAG2_IF_SHIFT 0 + +#define BCDC_GET_IF_IDX(hdr) \ + ((int)((((hdr)->flags2) & BCDC_FLAG2_IF_MASK) >> BCDC_FLAG2_IF_SHIFT)) +#define BCDC_SET_IF_IDX(hdr, idx) {\ + typeof(hdr) _hdr = (hdr); \ + ((_hdr)->flags2 = (((_hdr)->flags2 & ~BCDC_FLAG2_IF_MASK) | \ + ((idx) << BCDC_FLAG2_IF_SHIFT))); \ + } + +/** + * struct inff_proto_bcdc_header - BCDC header format + * + * @flags: flags contain protocol and checksum info. + * @priority: 802.1d priority. + * @flags2: additional flags containing dongle interface index. + * @data_offset: start of packet data. header is following by firmware signals. + */ +struct inff_proto_bcdc_header { + u8 flags; + u8 priority; + u8 flags2; + u8 data_offset; +}; + +/* + * maximum length of firmware signal data between + * the BCDC header and packet data in the tx path. + */ +#define INFF_PROT_FW_SIGNAL_MAX_TXBYTES 12 + +#define RETRIES 2 /* # of retries to retrieve matching dcmd response */ +#define BUS_HEADER_LEN (16 + 64) /* Must be at least SDPCM_RESERVE + * (amount of header tha might be added) + * plus any space that might be needed + * for bus alignment padding. + */ +#define ROUND_UP_MARGIN 2048 + +struct inff_bcdc { + u16 reqid; + u8 bus_header[BUS_HEADER_LEN]; + struct inff_proto_bcdc_dcmd msg; + unsigned char buf[INFF_DCMD_MAXLEN]; + struct inff_fws_info *fws; +}; + +struct inff_fws_info *drvr_to_fws(struct inff_pub *drvr) +{ + struct inff_bcdc *bcdc = drvr->proto->pd; + + return bcdc->fws; +} + +static int +inff_proto_bcdc_msg(struct inff_pub *drvr, int ifidx, uint cmd, void *buf, + uint len, bool set) +{ + struct inff_bcdc *bcdc = (struct inff_bcdc *)drvr->proto->pd; + struct inff_proto_bcdc_dcmd *msg = &bcdc->msg; + u32 flags; + + inff_dbg(BCDC, "Enter\n"); + + memset(msg, 0, sizeof(struct inff_proto_bcdc_dcmd)); + + msg->cmd = cpu_to_le32(cmd); + msg->len = cpu_to_le32(len); + flags = (++bcdc->reqid << BCDC_DCMD_ID_SHIFT); + if (set) + flags |= BCDC_DCMD_SET; + flags = (flags & ~BCDC_DCMD_IF_MASK) | + (ifidx << BCDC_DCMD_IF_SHIFT); + msg->flags = cpu_to_le32(flags); + + if (buf) + memcpy(bcdc->buf, buf, len); + + len += sizeof(*msg); + if (len > INFF_TX_IOCTL_MAX_MSG_SIZE) + len = INFF_TX_IOCTL_MAX_MSG_SIZE; + + /* Send request */ + return inff_bus_txctl(drvr->bus_if, (unsigned char *)&bcdc->msg, len); +} + +static int inff_proto_bcdc_cmplt(struct inff_pub *drvr, u32 id, u32 len) +{ + int ret; + struct inff_bcdc *bcdc = (struct inff_bcdc *)drvr->proto->pd; + + inff_dbg(BCDC, "Enter\n"); + len += sizeof(struct inff_proto_bcdc_dcmd); + do { + ret = inff_bus_rxctl(drvr->bus_if, (unsigned char *)&bcdc->msg, + len); + if (ret < 0) + break; + } while (BCDC_DCMD_ID(le32_to_cpu(bcdc->msg.flags)) != id); + + return ret; +} + +static int +inff_proto_bcdc_query_dcmd(struct inff_pub *drvr, int ifidx, uint cmd, + void *buf, uint len, int *fwerr) +{ + struct inff_bcdc *bcdc = (struct inff_bcdc *)drvr->proto->pd; + struct inff_proto_bcdc_dcmd *msg = &bcdc->msg; + void *info; + int ret = 0, retries = 0; + u32 id, flags; + + inff_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len); + + *fwerr = 0; + ret = inff_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, false); + if (ret < 0) { + iphy_err(drvr, "inff_proto_bcdc_msg failed w/status %d\n", + ret); + goto done; + } + +retry: + /* wait for interrupt and get first fragment */ + ret = inff_proto_bcdc_cmplt(drvr, bcdc->reqid, len); + if (ret < 0) + goto done; + + flags = le32_to_cpu(msg->flags); + id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT; + + if (id < bcdc->reqid && (++retries < RETRIES)) + goto retry; + if (id != bcdc->reqid) { + iphy_err(drvr, "%s: unexpected request id %d (expected %d)\n", + inff_ifname(inff_get_ifp(drvr, ifidx)), id, + bcdc->reqid); + ret = -EINVAL; + goto done; + } + + /* Check info buffer */ + info = (void *)&bcdc->buf[0]; + + /* Copy info buffer */ + if (buf) { + if (ret < (int)len) + len = ret; + memcpy(buf, info, len); + } + + ret = 0; + + /* Check the ERROR flag */ + if (flags & BCDC_DCMD_ERROR) + *fwerr = le32_to_cpu(msg->status); +done: + return ret; +} + +static int +inff_proto_bcdc_set_dcmd(struct inff_pub *drvr, int ifidx, uint cmd, + void *buf, uint len, int *fwerr) +{ + struct inff_bcdc *bcdc = (struct inff_bcdc *)drvr->proto->pd; + struct inff_proto_bcdc_dcmd *msg = &bcdc->msg; + int ret; + u32 flags, id; + + inff_dbg(BCDC, "Enter, cmd %d len %d\n", cmd, len); + + *fwerr = 0; + ret = inff_proto_bcdc_msg(drvr, ifidx, cmd, buf, len, true); + if (ret < 0) + goto done; + + ret = inff_proto_bcdc_cmplt(drvr, bcdc->reqid, len); + if (ret < 0) + goto done; + + flags = le32_to_cpu(msg->flags); + id = (flags & BCDC_DCMD_ID_MASK) >> BCDC_DCMD_ID_SHIFT; + + if (id != bcdc->reqid) { + iphy_err(drvr, "%s: unexpected request id %d (expected %d)\n", + inff_ifname(inff_get_ifp(drvr, ifidx)), id, + bcdc->reqid); + ret = -EINVAL; + goto done; + } + + ret = 0; + + /* Check the ERROR flag */ + if (flags & BCDC_DCMD_ERROR) + *fwerr = le32_to_cpu(msg->status); + +done: + return ret; +} + +static void +inff_proto_bcdc_hdrpush(struct inff_pub *drvr, int ifidx, u8 offset, + struct sk_buff *pktbuf) +{ + struct inff_proto_bcdc_header *h; + + inff_dbg(BCDC, "Enter\n"); + + /* Push BDC header used to convey priority for buses that don't */ + skb_push(pktbuf, BCDC_HEADER_LEN); + + h = (struct inff_proto_bcdc_header *)(pktbuf->data); + + h->flags = (BCDC_PROTO_VER << BCDC_FLAG_VER_SHIFT); + if (pktbuf->ip_summed == CHECKSUM_PARTIAL) + h->flags |= BCDC_FLAG_SUM_NEEDED; + + h->priority = (pktbuf->priority & BCDC_PRIORITY_MASK); + h->flags2 = 0; + h->data_offset = offset; + BCDC_SET_IF_IDX(h, ifidx); + trace_inff_bcdchdr(pktbuf->data); +} + +static int +inff_proto_bcdc_hdrpull(struct inff_pub *drvr, bool do_fws, + struct sk_buff *pktbuf, struct inff_if **ifp) +{ + struct inff_proto_bcdc_header *h; + struct inff_if *tmp_if; + + inff_dbg(BCDC, "Enter\n"); + + /* Pop BCDC header used to convey priority for buses that don't */ + if (pktbuf->len <= BCDC_HEADER_LEN) { + inff_dbg(INFO, "rx data too short (%d <= %d)\n", + pktbuf->len, BCDC_HEADER_LEN); + return -EBADE; + } + + trace_inff_bcdchdr(pktbuf->data); + h = (struct inff_proto_bcdc_header *)(pktbuf->data); + + tmp_if = inff_get_ifp(drvr, BCDC_GET_IF_IDX(h)); + if (!tmp_if) { + inff_dbg(INFO, "no matching ifp found for ifidx: %d\n", + BCDC_GET_IF_IDX(h)); + return -EBADE; + } + if (((h->flags & BCDC_FLAG_VER_MASK) >> BCDC_FLAG_VER_SHIFT) != + BCDC_PROTO_VER) { + iphy_err(drvr, "%s: non-BCDC packet received, flags 0x%x\n", + inff_ifname(tmp_if), h->flags); + return -EBADE; + } + + if (h->flags & BCDC_FLAG_SUM_GOOD) { + inff_dbg(BCDC, "%s: BDC rcv, good checksum, flags 0x%x\n", + inff_ifname(tmp_if), h->flags); + pktbuf->ip_summed = CHECKSUM_UNNECESSARY; + } + + pktbuf->priority = h->priority & BCDC_PRIORITY_MASK; + + skb_pull(pktbuf, BCDC_HEADER_LEN); + if (do_fws) + inff_fws_hdrpull(tmp_if, h->data_offset << 2, pktbuf); + else + skb_pull(pktbuf, h->data_offset << 2); + + if (pktbuf->len == 0) + return -ENODATA; + + if (ifp) + *ifp = tmp_if; + return 0; +} + +static int inff_proto_bcdc_tx_queue_data(struct inff_pub *drvr, int ifidx, + struct sk_buff *skb) +{ + struct inff_if *ifp = inff_get_ifp(drvr, ifidx); + struct inff_bcdc *bcdc = drvr->proto->pd; + + if (!inff_fws_queue_skbs(bcdc->fws)) + return inff_proto_txdata(drvr, ifidx, 0, skb); + + return inff_fws_process_skb(ifp, skb); +} + +static int +inff_proto_bcdc_txdata(struct inff_pub *drvr, int ifidx, u8 offset, + struct sk_buff *pktbuf) +{ + inff_proto_bcdc_hdrpush(drvr, ifidx, offset, pktbuf); + return inff_bus_txdata(drvr->bus_if, pktbuf); +} + +void inff_proto_bcdc_txflowblock(struct device *dev, bool state) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + inff_dbg(TRACE, "Enter\n"); + + inff_fws_bus_blocked(drvr, state); +} + +void +inff_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, + bool success) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_bcdc *bcdc = bus_if->drvr->proto->pd; + struct inff_if *ifp; + + /* await txstatus signal for firmware if active */ + if (inff_fws_fc_active(bcdc->fws)) { + inff_fws_bustxcomplete(bcdc->fws, txp, success); + } else { + if (inff_proto_bcdc_hdrpull(bus_if->drvr, false, txp, &ifp)) + inff_pkt_buf_free_skb(txp); + else + inff_txfinalize(ifp, txp, success); + } +} + +static void +inff_proto_bcdc_configure_addr_mode(struct inff_pub *drvr, int ifidx, + enum proto_addr_mode addr_mode) +{ +} + +static void +inff_proto_bcdc_delete_peer(struct inff_pub *drvr, int ifidx, + u8 peer[ETH_ALEN]) +{ +} + +static void +inff_proto_bcdc_add_tdls_peer(struct inff_pub *drvr, int ifidx, + u8 peer[ETH_ALEN]) +{ +} + +static void inff_proto_bcdc_rxreorder(struct inff_if *ifp, + struct sk_buff *skb, bool inirq) +{ + inff_fws_rxreorder(ifp, skb, inirq); +} + +static void +inff_proto_bcdc_add_if(struct inff_if *ifp) +{ + struct net_device *ndev; + + inff_dbg(TRACE, "Enter\n"); + inff_fws_add_interface(ifp); + ndev = ifp->ndev; + /* Need to enable both napi_enable & sdio_in_isr to avoid duplicate processing */ + if (!ndev || !ifp->drvr->settings->napi_enable || !ifp->drvr->settings->sdio_in_isr) + return; + netif_napi_add(ndev, &ifp->napi, ifp->drvr->bus_if->ops->napi_poll); + napi_enable(&ifp->napi); + ifp->napi_gro = false; + inff_dbg(TRACE, "Exit\n"); +} + +static void +inff_proto_bcdc_del_if(struct inff_if *ifp) +{ + inff_dbg(TRACE, "Enter\n"); + if (ifp->drvr->settings->napi_enable && ifp->napi.poll) { + napi_synchronize(&ifp->napi); + napi_disable(&ifp->napi); + netif_napi_del(&ifp->napi); + } + + inff_fws_del_interface(ifp); + inff_dbg(TRACE, "Exit\n"); +} + +static void +inff_proto_bcdc_reset_if(struct inff_if *ifp) +{ + inff_fws_reset_interface(ifp); +} + +static void +inff_proto_bcdc_cleanup_if(struct inff_if *ifp) +{ + inff_fws_cleanup_interface(ifp); +} + +static int +inff_proto_bcdc_init_done(struct inff_pub *drvr) +{ + struct inff_bcdc *bcdc = drvr->proto->pd; + struct inff_fws_info *fws; + + fws = inff_fws_attach(drvr); + if (IS_ERR(fws)) + return PTR_ERR(fws); + + bcdc->fws = fws; + return 0; +} + +static void inff_proto_bcdc_debugfs_create(struct inff_pub *drvr) +{ + inff_fws_debugfs_create(drvr); +} + +int inff_proto_bcdc_attach(struct inff_pub *drvr) +{ + struct inff_bcdc *bcdc; + + bcdc = kzalloc(sizeof(*bcdc), GFP_ATOMIC); + if (!bcdc) + goto fail; + + /* ensure that the msg buf directly follows the cdc msg struct */ + if ((unsigned long)(&bcdc->msg + 1) != (unsigned long)bcdc->buf) { + iphy_err(drvr, "struct inff_proto_bcdc is not correctly defined\n"); + goto fail; + } + + drvr->proto->hdrpull = inff_proto_bcdc_hdrpull; + drvr->proto->query_dcmd = inff_proto_bcdc_query_dcmd; + drvr->proto->set_dcmd = inff_proto_bcdc_set_dcmd; + drvr->proto->tx_queue_data = inff_proto_bcdc_tx_queue_data; + drvr->proto->txdata = inff_proto_bcdc_txdata; + drvr->proto->configure_addr_mode = inff_proto_bcdc_configure_addr_mode; + drvr->proto->delete_peer = inff_proto_bcdc_delete_peer; + drvr->proto->add_tdls_peer = inff_proto_bcdc_add_tdls_peer; + drvr->proto->rxreorder = inff_proto_bcdc_rxreorder; + drvr->proto->add_if = inff_proto_bcdc_add_if; + drvr->proto->del_if = inff_proto_bcdc_del_if; + drvr->proto->reset_if = inff_proto_bcdc_reset_if; + drvr->proto->cleanup_if = inff_proto_bcdc_cleanup_if; + drvr->proto->init_done = inff_proto_bcdc_init_done; + drvr->proto->debugfs_create = inff_proto_bcdc_debugfs_create; + drvr->proto->pd = bcdc; + + drvr->hdrlen += BCDC_HEADER_LEN + INFF_PROT_FW_SIGNAL_MAX_TXBYTES; + drvr->bus_if->maxctl = INFF_DCMD_MAXLEN + + sizeof(struct inff_proto_bcdc_dcmd) + ROUND_UP_MARGIN; + return 0; + +fail: + kfree(bcdc); + return -ENOMEM; +} + +void inff_proto_bcdc_detach(struct inff_pub *drvr) +{ + struct inff_bcdc *bcdc = drvr->proto->pd; + + drvr->proto->pd = NULL; + inff_fws_detach(bcdc->fws); + kfree(bcdc); +} diff --git a/drivers/net/wireless/infineon/inffmac/bcdc.h b/drivers/net/wireless/infineon/inffmac/bcdc.h new file mode 100644 index 000000000000..a610639c6928 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/bcdc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_BCDC_H +#define INFF_BCDC_H + +#ifdef CONFIG_INFFMAC_PROTO_BCDC +int inff_proto_bcdc_attach(struct inff_pub *drvr); +void inff_proto_bcdc_detach(struct inff_pub *drvr); +void inff_proto_bcdc_txflowblock(struct device *dev, bool state); +void inff_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp, + bool success); +struct inff_fws_info *drvr_to_fws(struct inff_pub *drvr); +#else +static inline int inff_proto_bcdc_attach(struct inff_pub *drvr) { return 0; } +static inline void inff_proto_bcdc_detach(struct inff_pub *drvr) {} +#endif + +#endif /* INFF_BCDC_H */ -- 2.25.1 Implementation for managing the SDIO Device through the SD Host Controller Driver. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/infsdh.c | 1367 +++++++++++++++++ 1 file changed, 1367 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/infsdh.c diff --git a/drivers/net/wireless/infineon/inffmac/infsdh.c b/drivers/net/wireless/infineon/inffmac/infsdh.c new file mode 100644 index 000000000000..96b034443b3e --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/infsdh.c @@ -0,0 +1,1367 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +/* ****************** SDIO CARD Interface Functions **************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "defs.h" +#include "hw_ids.h" +#include "utils.h" +#include "chipcommon.h" +#include "chip.h" +#include "bus.h" +#include "debug.h" +#include "sdio.h" +#include "core.h" +#include "common.h" +#include "cfg80211.h" +#include "fwsignal.h" +#include "chip_5557x.h" +#include "chip_5591x.h" + +#define SDIOH_API_ACCESS_RETRY_LIMIT 2 + +#define DMA_ALIGN_MASK 0x03 + +#define SDIO_FUNC1_BLOCKSIZE 64 +#define SDIO_FUNC2_BLOCKSIZE 512 + +/* Maximum milliseconds to wait for F2 to come up */ +#define SDIO_WAIT_F2RDY 3000 + +#define INFF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ + +struct inff_sdiod_freezer { + atomic_t freezing; + atomic_t thread_count; + u32 frozen_count; + wait_queue_head_t thread_freeze; + struct completion resumed; +}; + +static irqreturn_t inff_sdiod_oob_irqhandler(int irq, void *dev_id) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev_id); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + inff_dbg(INTR, "OOB intr triggered\n"); + + /* out-of-band interrupt is level-triggered which won't + * be cleared until dpc + */ + if (sdiodev->irq_en) { + disable_irq_nosync(irq); + sdiodev->irq_en = false; + } + + inff_sdio_isr(sdiodev->bus, true); + + return IRQ_HANDLED; +} + +/* interrupt handler for SDIO function 1 interrupt */ +static void inff_sdiod_ib_irqhandler(struct sdio_func *func) +{ + struct inff_bus *bus_if = dev_get_drvdata(&func->dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + inff_dbg(INTR, "F%d IB intr triggered\n", func->num); + + inff_sdio_isr(sdiodev->bus, false); +} + +/* dummy handler for SDIO function 2 interrupt */ +static void inff_sdiod_dummy_irqhandler(struct sdio_func *func) +{ +} + +int inff_sdiod_intr_register(struct inff_sdio_dev *sdiodev) +{ + struct inff_sdio_platform_data *pdata; + int ret = 0; + u8 data; + + pdata = &sdiodev->settings->bus.sdio; + if (pdata->oob_irq_supported) { + inff_dbg(SDIO, "Enter, register OOB IRQ %d\n", + pdata->oob_irq_nr); + if (!sdiodev->oob_irq_requested) { + spin_lock_init(&sdiodev->irq_en_lock); + sdiodev->irq_en = true; + + ret = request_irq(pdata->oob_irq_nr, + inff_sdiod_oob_irqhandler, + pdata->oob_irq_flags, "inff_oob_intr", + &sdiodev->func1->dev); + if (ret != 0) { + inff_err("request_irq failed %d\n", ret); + return ret; + } + sdiodev->oob_irq_requested = true; + + ret = enable_irq_wake(pdata->oob_irq_nr); + if (ret != 0) { + inff_err("enable_irq_wake failed %d\n", ret); + return ret; + } + disable_irq_wake(pdata->oob_irq_nr); + } + sdio_claim_host(sdiodev->func1); + + /* must configure SDIO_CCCR_IENx to enable irq */ + data = inff_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret); + data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 | + SDIO_CCCR_IEN_FUNC0; + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret); + + /* redirect, configure and enable io for interrupt signal */ + data = SDIO_CCCR_INFF_SEPINT_MASK | SDIO_CCCR_INFF_SEPINT_OE; + if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) + data |= SDIO_CCCR_INFF_SEPINT_ACT_HI; + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_INFF_SEPINT, + data, &ret); + sdio_release_host(sdiodev->func1); + } else { + inff_dbg(SDIO, "Entering\n"); + sdio_claim_host(sdiodev->func1); + sdio_claim_irq(sdiodev->func1, inff_sdiod_ib_irqhandler); + sdio_claim_irq(sdiodev->func2, inff_sdiod_dummy_irqhandler); + sdio_release_host(sdiodev->func1); + sdiodev->sd_irq_requested = true; + } + + return 0; +} + +void inff_sdiod_intr_unregister(struct inff_sdio_dev *sdiodev) +{ + inff_dbg(SDIO, "Entering oob=%d sd=%d\n", + sdiodev->oob_irq_requested, + sdiodev->sd_irq_requested); + + if (sdiodev->oob_irq_requested) { + struct inff_sdio_platform_data *pdata; + + pdata = &sdiodev->settings->bus.sdio; + sdio_claim_host(sdiodev->func1); + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_INFF_SEPINT, 0, NULL); + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL); + sdio_release_host(sdiodev->func1); + + sdiodev->oob_irq_requested = false; + free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev); + sdiodev->irq_en = false; + sdiodev->oob_irq_requested = false; + } + + if (sdiodev->sd_irq_requested) { + sdio_claim_host(sdiodev->func1); + sdio_release_irq(sdiodev->func2); + sdio_release_irq(sdiodev->func1); + sdio_release_host(sdiodev->func1); + sdiodev->sd_irq_requested = false; + } +} + +void inff_sdiod_change_state(struct inff_sdio_dev *sdiodev, + enum inff_sdiod_state state) +{ + if (sdiodev->state == INFF_SDIOD_NOMEDIUM || + state == sdiodev->state) + return; + + inff_dbg(TRACE, "%d -> %d\n", sdiodev->state, state); + switch (sdiodev->state) { + case INFF_SDIOD_DATA: + /* any other state means bus interface is down */ + inff_bus_change_state(sdiodev->bus_if, INFF_BUS_DOWN); + break; + case INFF_SDIOD_DOWN: + /* transition from DOWN to DATA means bus interface is up */ + if (state == INFF_SDIOD_DATA) + inff_bus_change_state(sdiodev->bus_if, INFF_BUS_UP); + break; + default: + break; + } + sdiodev->state = state; +} + +static int inff_sdiod_set_backplane_window(struct inff_sdio_dev *sdiodev, + u32 addr) +{ + u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK; + int err = 0, i; + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: Write operation when bus is in sleep state\n"); + return -EPERM; + } + + if (sdiodev->sbwad_valid && bar0 == sdiodev->sbwad) + return 0; + + v = bar0 >> 8; + + for (i = 0 ; i < 3 && !err ; i++, v >>= 8) + inff_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i, + v & 0xff, &err); + + if (!err) { + sdiodev->sbwad_valid = 1; + sdiodev->sbwad = bar0; + } + + return err; +} + +u32 inff_sdiod_readl(struct inff_sdio_dev *sdiodev, u32 addr, int *ret) +{ + u32 data = 0; + int retval; + + inff_dbg(SDIOEXT, "addr 0x%x\n", addr); + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: Read operation when bus is in sleep state\n"); + if (ret) + *ret = -EPERM; + return data; + } + + retval = inff_sdiod_set_backplane_window(sdiodev, addr); + if (retval) + goto out; + + inff_dbg(SDIO, "reading from addr 0x%x bar0 0x%08x ", addr, sdiodev->sbwad); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + data = sdio_readl(sdiodev->func1, addr, &retval); + + if (retval) + data = 0; + + inff_dbg(SDIO, "data 0x%08x\n", data); +out: + if (ret) + *ret = retval; + + return data; +} + +void inff_sdiod_writel(struct inff_sdio_dev *sdiodev, u32 addr, + u32 data, int *ret) +{ + int retval; + + inff_dbg(SDIOEXT, "addr 0x%x val 0x%x\n", addr, data); + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: Write operation when bus is in sleep state\n"); + if (ret) + *ret = -EPERM; + return; + } + + retval = inff_sdiod_set_backplane_window(sdiodev, addr); + if (retval) + goto out; + + inff_dbg(SDIO, "writing 0x%08x to addr 0x%x bar0 0x%08x\n", data, addr, sdiodev->sbwad); + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + sdio_writel(sdiodev->func1, data, addr, &retval); + +out: + if (ret) + *ret = retval; +} + +static int inff_sdiod_skbuff_read(struct inff_sdio_dev *sdiodev, + struct sdio_func *func, u32 addr, + struct sk_buff *skb) +{ + unsigned int req_sz; + int err; + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: Read operation when bus is in sleep state\n"); + return -EPERM; + } + + /* Single skb use the standard mmc interface */ + req_sz = skb->len + 3; + req_sz &= (uint)~3; + + switch (func->num) { + case SDIO_FUNC_1: + err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr, + req_sz); + break; + case SDIO_FUNC_2: + case SDIO_FUNC_3: + err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz); + break; + default: + /* bail out as things are really fishy here */ + WARN(1, "invalid sdio function number: %d\n", func->num); + err = -ENOMEDIUM; + } + + if (err == -ENOMEDIUM) + inff_sdiod_change_state(sdiodev, INFF_SDIOD_NOMEDIUM); + + if (err && sdiodev->func2->device == SDIO_DEVICE_ID_CYPRESS_55572) + inff_fws_recv_err(sdiodev->bus_if->drvr); + + return err; +} + +static int inff_sdiod_skbuff_write(struct inff_sdio_dev *sdiodev, + struct sdio_func *func, u32 addr, + struct sk_buff *skb) +{ + unsigned int req_sz; + int err; + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: Write operation when bus is in sleep state\n"); + return -EPERM; + } + + /* Single skb use the standard mmc interface */ + req_sz = skb->len + 3; + req_sz &= (uint)~3; + + if (func->num == SDIO_FUNC_1 || func->num == SDIO_FUNC_2) + err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz); + else if (func->num == SDIO_FUNC_3) + err = sdio_writesb(func, addr, ((u8 *)(skb->data)), req_sz); + else + return -EINVAL; + + if (err == -ENOMEDIUM) + inff_sdiod_change_state(sdiodev, INFF_SDIOD_NOMEDIUM); + + return err; +} + +static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr, + struct mmc_command *mc, int sg_cnt, int req_sz, + int func_blk_sz, u32 *addr, + struct inff_sdio_dev *sdiodev, + struct sdio_func *func, int write) +{ + int ret; + + if (inff_sdio_bus_sleep_state(sdiodev->bus)) { + if (!sdiodev->ignore_bus_error) + inff_err("ERROR: %s operation when bus is in sleep state\n", + write ? "Write" : "Read"); + return -EPERM; + } + + md->sg_len = sg_cnt; + md->blocks = req_sz / func_blk_sz; + mc->arg |= (*addr & 0x1FFFF) << 9; /* address */ + mc->arg |= md->blocks & 0x1FF; /* block count */ + /* incrementing addr for function 1 */ + if (func->num == SDIO_FUNC_1) + *addr += req_sz; + + mmc_set_data_timeout(md, func->card); + mmc_wait_for_req(func->card->host, mr); + + ret = mc->error ? mc->error : md->error; + if (ret == -ENOMEDIUM) { + inff_sdiod_change_state(sdiodev, INFF_SDIOD_NOMEDIUM); + } else if (ret != 0) { + inff_err("CMD53 sg block %s failed %d\n", + write ? "write" : "read", ret); + ret = -EIO; + } + + return ret; +} + +/** + * inff_sdiod_sglist_rw - SDIO interface function for block data access + * @sdiodev: inffmac sdio device + * @func: SDIO function + * @write: direction flag + * @addr: dongle memory address as source/destination + * @pktlist: skb buffer head pointer + * + * This function takes the respbonsibility as the interface function to MMC + * stack for block data access. It assumes that the skb passed down by the + * caller has already been padded and aligned. + */ +static int inff_sdiod_sglist_rw(struct inff_sdio_dev *sdiodev, + struct sdio_func *func, + bool write, u32 addr, + struct sk_buff_head *pktlist) +{ + unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; + unsigned int max_req_sz, src_offset, dst_offset; + unsigned char *pkt_data, *orig_data, *dst_data; + struct sk_buff_head local_list, *target_list; + struct sk_buff *pkt_next = NULL, *src; + unsigned short max_seg_cnt; + struct mmc_request mmc_req; + struct mmc_command mmc_cmd; + struct mmc_data mmc_dat; + struct scatterlist *sgl; + int ret = 0; + + if (!pktlist->qlen) + return -EINVAL; + + target_list = pktlist; + /* for host with broken sg support, prepare a page aligned list */ + __skb_queue_head_init(&local_list); + if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { + req_sz = 0; + skb_queue_walk(pktlist, pkt_next) + req_sz += pkt_next->len; + req_sz = ALIGN(req_sz, func->cur_blksize); + while (req_sz > PAGE_SIZE) { + pkt_next = inff_pkt_buf_get_skb(PAGE_SIZE); + if (!pkt_next) { + ret = -ENOMEM; + goto exit; + } + __skb_queue_tail(&local_list, pkt_next); + req_sz -= PAGE_SIZE; + } + pkt_next = inff_pkt_buf_get_skb(req_sz); + if (!pkt_next) { + ret = -ENOMEM; + goto exit; + } + __skb_queue_tail(&local_list, pkt_next); + target_list = &local_list; + } + + func_blk_sz = func->cur_blksize; + max_req_sz = sdiodev->max_request_size; + max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, + target_list->qlen); + + memset(&mmc_req, 0, sizeof(struct mmc_request)); + memset(&mmc_cmd, 0, sizeof(struct mmc_command)); + memset(&mmc_dat, 0, sizeof(struct mmc_data)); + + mmc_dat.sg = sdiodev->sgtable.sgl; + mmc_dat.blksz = func_blk_sz; + mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; + mmc_cmd.opcode = SD_IO_RW_EXTENDED; + mmc_cmd.arg = write ? 1 << 31 : 0; /* write flag */ + mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */ + mmc_cmd.arg |= 1 << 27; /* block mode */ + /* for function 1 the addr will be incremented */ + mmc_cmd.arg |= (func->num == SDIO_FUNC_1) ? 1 << 26 : 0; + mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; + mmc_req.cmd = &mmc_cmd; + mmc_req.data = &mmc_dat; + + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; + skb_queue_walk(target_list, pkt_next) { + pkt_offset = 0; + while (pkt_offset < pkt_next->len) { + pkt_data = pkt_next->data + pkt_offset; + sg_data_sz = pkt_next->len - pkt_offset; + if (sg_data_sz > sdiodev->max_segment_size) + sg_data_sz = sdiodev->max_segment_size; + if (sg_data_sz > max_req_sz - req_sz) + sg_data_sz = max_req_sz - req_sz; + + if (!sgl) { + /* out of (pre-allocated) scatterlist entries */ + ret = -ENOMEM; + goto exit; + } + sg_set_buf(sgl, pkt_data, sg_data_sz); + sg_cnt++; + + sgl = sg_next(sgl); + req_sz += sg_data_sz; + pkt_offset += sg_data_sz; + if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) { + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); + if (ret) + goto exit_queue_walk; + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; + } + } + } + if (sg_cnt) + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); +exit_queue_walk: + if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { + src = __skb_peek(&local_list); + src_offset = 0; + skb_queue_walk(pktlist, pkt_next) { + dst_offset = 0; + + /* This is safe because we must have enough SKB data + * in the local list to cover everything in pktlist. + */ + while (1) { + req_sz = pkt_next->len - dst_offset; + if (req_sz > src->len - src_offset) + req_sz = src->len - src_offset; + + orig_data = src->data + src_offset; + dst_data = pkt_next->data + dst_offset; + memcpy(dst_data, orig_data, req_sz); + + src_offset += req_sz; + if (src_offset == src->len) { + src_offset = 0; + src = skb_peek_next(src, &local_list); + } + dst_offset += req_sz; + if (dst_offset == pkt_next->len) + break; + } + } + } + +exit: + sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents); + while ((pkt_next = __skb_dequeue(&local_list)) != NULL) + inff_pkt_buf_free_skb(pkt_next); + + if (ret && sdiodev->func2->device == SDIO_DEVICE_ID_CYPRESS_55572) + inff_fws_recv_err(sdiodev->bus_if->drvr); + + return ret; +} + +int inff_sdiod_recv_buf(struct inff_sdio_dev *sdiodev, u8 fn, + u8 *buf, uint nbytes) +{ + struct sk_buff *mypkt = NULL; + int err = 0; + + mypkt = inff_pkt_buf_get_skb(nbytes); + if (!mypkt) { + inff_err("inff_pkt_buf_get_skb failed: len %d\n", + nbytes); + return -EIO; + } + + err = inff_sdiod_recv_pkt(sdiodev, fn, mypkt); + if (!err) + memcpy(buf, mypkt->data, nbytes); + + inff_pkt_buf_free_skb(mypkt); + return err; +} + +int inff_sdiod_recv_pkt(struct inff_sdio_dev *sdiodev, u8 fn, + struct sk_buff *pkt) +{ + struct sdio_func *func = NULL; + u32 base_addr = 0; + u32 recv_addr = 0; + int err = 0; + + if (fn == SDIO_FUNC_2) { + /* F2 is only DMA. HW ignore the address field in the cmd53 /cmd52. */ + base_addr = sdiodev->cc_core->base; + recv_addr = base_addr & SBSDIO_SB_OFT_ADDR_MASK; + recv_addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + func = sdiodev->func2; + } else if (fn == SDIO_FUNC_3) { + /* F3 has registers and DMA. A DMA access is identified using the + * address value 0x0. If the address field has any other value, it + * won't be considered as F3 packet transfer. If the address corresponds + * to a valid F3 register address, driver will get proper response, + * otherwise driver will get error response. + */ + base_addr = 0; + recv_addr = 0; + func = sdiodev->func3; + } else { + inff_err("invalid function number: %d\n", fn); + return -EINVAL; + } + + err = inff_sdiod_set_backplane_window(sdiodev, base_addr); + if (err) + goto done; + + err = inff_sdiod_skbuff_read(sdiodev, func, recv_addr, pkt); + + inff_dbg(DATA, "F%d, base addr: 0x%x, recv addr: 0x%x, size: %d, err: %d\n", + fn, base_addr, recv_addr, pkt->len, err); +done: + return err; +} + +int inff_sdiod_recv_chain(struct inff_sdio_dev *sdiodev, + struct sk_buff_head *pktq, uint totlen) +{ + struct sk_buff *glom_skb = NULL; + struct sk_buff *skb; + u32 addr = sdiodev->cc_core->base; + int err = 0; + + inff_dbg(SDIO, "addr = 0x%x, size = %d\n", + addr, pktq->qlen); + + err = inff_sdiod_set_backplane_window(sdiodev, addr); + if (err) + goto done; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + if (pktq->qlen == 1) { + err = inff_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, + __skb_peek(pktq)); + } else if (!sdiodev->sg_support) { + glom_skb = inff_pkt_buf_get_skb(totlen); + if (!glom_skb) + return -ENOMEM; + err = inff_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, + glom_skb); + if (err) + goto done; + + skb_queue_walk(pktq, skb) { + memcpy(skb->data, glom_skb->data, skb->len); + skb_pull(glom_skb, skb->len); + } + } else { + err = inff_sdiod_sglist_rw(sdiodev, sdiodev->func2, false, + addr, pktq); + } + +done: + inff_pkt_buf_free_skb(glom_skb); + return err; +} + +int inff_sdiod_send_buf(struct inff_sdio_dev *sdiodev, u8 fn, + u8 *buf, uint nbytes) +{ + struct sk_buff *mypkt = NULL; + struct sdio_func *func = NULL; + u32 base_addr = 0; + u32 send_addr = 0; + int err = 0; + + if (fn == 2) { + /* F2 is only DMA. HW ignore the address field in the cmd53 /cmd52. */ + base_addr = sdiodev->cc_core->base; + send_addr = base_addr & SBSDIO_SB_OFT_ADDR_MASK; + send_addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + func = sdiodev->func2; + } else if (fn == 3) { + /* F3 has registers and DMA. A DMA access is identified using the + * address value 0x0. If the address field has any other value, it + * won't be considered as F3 packet transfer. If the address corresponds + * to a valid F3 register address, driver will get proper response, + * otherwise driver will get error response. + */ + base_addr = 0; + send_addr = 0; + func = sdiodev->func3; + } else { + inff_err("invalid function number: %d\n", fn); + return -EINVAL; + } + + mypkt = inff_pkt_buf_get_skb(nbytes); + + if (!mypkt) { + inff_err("inff_pkt_buf_get_skb failed: len %d\n", + nbytes); + return -EIO; + } + + memcpy(mypkt->data, buf, nbytes); + + err = inff_sdiod_set_backplane_window(sdiodev, base_addr); + if (err) + goto out; + + err = inff_sdiod_skbuff_write(sdiodev, func, send_addr, mypkt); + + inff_dbg(DATA, "F%d, base addr: 0x%x, send addr: 0x%x, size: %d, err: %d\n", + fn, base_addr, send_addr, mypkt->len, err); +out: + inff_pkt_buf_free_skb(mypkt); + + return err; +} + +int inff_sdiod_send_pkt(struct inff_sdio_dev *sdiodev, + struct sk_buff_head *pktq) +{ + struct sk_buff *skb; + u32 addr = sdiodev->cc_core->base; + int err; + + inff_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); + + err = inff_sdiod_set_backplane_window(sdiodev, addr); + if (err) + return err; + + addr &= SBSDIO_SB_OFT_ADDR_MASK; + addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + if (pktq->qlen == 1 || !sdiodev->sg_support) { + skb_queue_walk(pktq, skb) { + err = inff_sdiod_skbuff_write(sdiodev, sdiodev->func2, + addr, skb); + if (err) + break; + } + } else { + err = inff_sdiod_sglist_rw(sdiodev, sdiodev->func2, true, + addr, pktq); + } + + return err; +} + +int +inff_sdiod_ramrw(struct inff_sdio_dev *sdiodev, bool write, u32 address, + u8 *data, uint size) +{ + int err = 0; + struct sk_buff *pkt; + u32 sdaddr; + uint dsize; + + dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); + pkt = __dev_alloc_skb(dsize, GFP_KERNEL); + if (!pkt) { + inff_err("dev_alloc_skb failed: len %d\n", dsize); + return -EIO; + } + pkt->priority = 0; + + /* Determine initial transfer parameters */ + sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; + if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) + dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); + else + dsize = size; + + sdio_claim_host(sdiodev->func1); + + /* Do the transfer(s) */ + while (size) { + /* Set the backplane window to include the start address */ + err = inff_sdiod_set_backplane_window(sdiodev, address); + if (err) + break; + + inff_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n", + write ? "write" : "read", dsize, + sdaddr, address & SBSDIO_SBWINDOW_MASK); + + sdaddr &= SBSDIO_SB_OFT_ADDR_MASK; + sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG; + + skb_put(pkt, dsize); + + if (write) { + memcpy(pkt->data, data, dsize); + err = inff_sdiod_skbuff_write(sdiodev, sdiodev->func1, + sdaddr, pkt); + } else { + err = inff_sdiod_skbuff_read(sdiodev, sdiodev->func1, + sdaddr, pkt); + } + + if (err) { + inff_err("membytes transfer failed write=%d err=%d\n", write, err); + break; + } + if (!write) + memcpy(data, pkt->data, dsize); + skb_trim(pkt, 0); + + /* Adjust for next transfer (if any) */ + size -= dsize; + if (size) { + data += dsize; + address += dsize; + sdaddr = 0; + dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); + } + } + + dev_kfree_skb(pkt); + + sdio_release_host(sdiodev->func1); + + return err; +} + +int inff_sdiod_abort(struct inff_sdio_dev *sdiodev, struct sdio_func *func) +{ + inff_dbg(SDIO, "Enter\n"); + + /* Issue abort cmd52 command through F0 */ + inff_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL); + + inff_dbg(SDIO, "Exit\n"); + return 0; +} + +void inff_sdiod_sgtable_alloc(struct inff_sdio_dev *sdiodev) +{ + struct sdio_func *func; + struct mmc_host *host; + uint max_blocks; + uint nents; + int err; + + func = sdiodev->func2; + host = func->card->host; + sdiodev->sg_support = host->max_segs > 1; + max_blocks = min_t(uint, host->max_blk_count, 511u); + sdiodev->max_request_size = min_t(uint, host->max_req_size, + max_blocks * func->cur_blksize); + sdiodev->max_segment_count = min_t(uint, host->max_segs, + SG_MAX_SINGLE_ALLOC); + sdiodev->max_segment_size = host->max_seg_size; + + if (!sdiodev->sg_support) + return; + + nents = max_t(uint, INFF_DEFAULT_RXGLOM_SIZE, + sdiodev->settings->bus.sdio.txglomsz); + nents *= 2; + + WARN_ON(nents > sdiodev->max_segment_count); + + inff_dbg(TRACE, "nents=%d\n", nents); + err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL); + if (err < 0) { + inff_err("allocation failed: disable scatter-gather"); + sdiodev->sg_support = false; + } + + sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz; +} + +static int inff_sdiod_freezer_attach(struct inff_sdio_dev *sdiodev) +{ + if (!IS_ENABLED(CONFIG_PM_SLEEP)) + return 0; + + sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL); + if (!sdiodev->freezer) + return -ENOMEM; + atomic_set(&sdiodev->freezer->thread_count, 0); + atomic_set(&sdiodev->freezer->freezing, 0); + init_waitqueue_head(&sdiodev->freezer->thread_freeze); + init_completion(&sdiodev->freezer->resumed); + return 0; +} + +static void inff_sdiod_freezer_detach(struct inff_sdio_dev *sdiodev) +{ + if (sdiodev->freezer) { + WARN_ON(atomic_read(&sdiodev->freezer->freezing)); + kfree(sdiodev->freezer); + sdiodev->freezer = NULL; + } +} + +static int inff_sdiod_freezer_on(struct inff_sdio_dev *sdiodev) +{ + atomic_t *expect = &sdiodev->freezer->thread_count; + int res = 0; + + sdiodev->freezer->frozen_count = 0; + reinit_completion(&sdiodev->freezer->resumed); + atomic_set(&sdiodev->freezer->freezing, 1); + inff_sdio_trigger_dpc(sdiodev->bus); + wait_event(sdiodev->freezer->thread_freeze, + atomic_read(expect) == sdiodev->freezer->frozen_count); + sdio_claim_host(sdiodev->func1); + res = inff_sdio_sleep(sdiodev->bus, true); + sdio_release_host(sdiodev->func1); + return res; +} + +static void inff_sdiod_freezer_off(struct inff_sdio_dev *sdiodev) +{ + sdio_claim_host(sdiodev->func1); + inff_sdio_sleep(sdiodev->bus, false); + sdio_release_host(sdiodev->func1); + atomic_set(&sdiodev->freezer->freezing, 0); + complete_all(&sdiodev->freezer->resumed); +} + +bool inff_sdiod_freezing(struct inff_sdio_dev *sdiodev) +{ + return IS_ENABLED(CONFIG_PM_SLEEP) && + atomic_read(&sdiodev->freezer->freezing); +} + +void inff_sdiod_try_freeze(struct inff_sdio_dev *sdiodev) +{ + if (!inff_sdiod_freezing(sdiodev)) + return; + sdiodev->freezer->frozen_count++; + wake_up(&sdiodev->freezer->thread_freeze); + wait_for_completion(&sdiodev->freezer->resumed); +} + +void inff_sdiod_freezer_count(struct inff_sdio_dev *sdiodev) +{ + if (IS_ENABLED(CONFIG_PM_SLEEP)) + atomic_inc(&sdiodev->freezer->thread_count); +} + +void inff_sdiod_freezer_uncount(struct inff_sdio_dev *sdiodev) +{ + if (IS_ENABLED(CONFIG_PM_SLEEP)) + atomic_dec(&sdiodev->freezer->thread_count); +} + +int inff_sdiod_remove(struct inff_sdio_dev *sdiodev) +{ + sdiodev->state = INFF_SDIOD_DOWN; + if (sdiodev->bus) { + inff_sdio_remove(sdiodev->bus); + sdiodev->bus = NULL; + } + + inff_sdiod_freezer_detach(sdiodev); + + /* Disable functions 2 then 1. */ + sdio_claim_host(sdiodev->func1); + sdio_disable_func(sdiodev->func2); + sdio_disable_func(sdiodev->func1); + sdio_release_host(sdiodev->func1); + + sg_free_table(&sdiodev->sgtable); + sdiodev->sbwad = 0; + sdiodev->sbwad_valid = 0; + + pm_runtime_allow(sdiodev->func1->card->host->parent); + return 0; +} + +static void inff_sdiod_host_fixup(struct mmc_host *host) +{ + /* runtime-pm powers off the device */ + pm_runtime_forbid(host->parent); + /* avoid removal detection upon resume */ + host->caps |= MMC_CAP_NONREMOVABLE; +} + +int inff_sdiod_probe(struct inff_sdio_dev *sdiodev) +{ + int ret = 0; + unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE; + + sdio_claim_host(sdiodev->func1); + + ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE); + if (ret) { + inff_err("Failed to set F1 blocksize\n"); + sdio_release_host(sdiodev->func1); + return ret; + } + switch (sdiodev->func2->device) { + case SDIO_DEVICE_ID_CYPRESS_55572: + case SDIO_DEVICE_ID_CYPRESS_55500: + f2_blksz = SDIO_INF55572_FUNC2_BLOCKSIZE; + break; + case SDIO_DEVICE_ID_CYPRESS_55900: + f2_blksz = SDIO_INF55900_FUNC2_BLOCKSIZE; + break; + default: + break; + } + + ret = sdio_set_block_size(sdiodev->func2, f2_blksz); + if (ret) { + inff_err("Failed to set F2 blocksize\n"); + sdio_release_host(sdiodev->func1); + return ret; + } + inff_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz); + + /* increase F2 timeout */ + sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY; + + /* Enable Function 1 */ + ret = sdio_enable_func(sdiodev->func1); + sdio_release_host(sdiodev->func1); + if (ret) { + inff_err("Failed to enable F1: err=%d\n", ret); + goto out; + } + + ret = inff_sdiod_freezer_attach(sdiodev); + if (ret) + goto out; + + /* try to attach to the target device */ + sdiodev->bus = inff_sdio_probe(sdiodev); + if (IS_ERR(sdiodev->bus)) { + ret = PTR_ERR(sdiodev->bus); + goto out; + } + inff_sdiod_host_fixup(sdiodev->func2->card->host); +out: + if (ret) + inff_sdiod_remove(sdiodev); + + return ret; +} + +#define INFF_SDIO_DEVICE(dev_id) \ + {SDIO_DEVICE(SDIO_VENDOR_ID_CYPRESS, dev_id)} + +/* devices we support, null terminated */ +static const struct sdio_device_id inff_sdmmc_ids[] = { + INFF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_55572), + INFF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_55500), + INFF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43022), + INFF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_55900), + { /* end: all zeroes */ } +}; +MODULE_DEVICE_TABLE(sdio, inff_sdmmc_ids); + +static void inff_sdiod_acpi_save_power_manageable(struct inff_sdio_dev *sdiodev) +{ +#if IS_ENABLED(CONFIG_ACPI) + struct acpi_device *adev; + + adev = ACPI_COMPANION(&sdiodev->func1->dev); + if (adev) + sdiodev->func1_power_manageable = adev->flags.power_manageable; + + adev = ACPI_COMPANION(&sdiodev->func2->dev); + if (adev) + sdiodev->func2_power_manageable = adev->flags.power_manageable; +#endif +} + +static void inff_sdiod_acpi_set_power_manageable(struct inff_sdio_dev *sdiodev, + int enable) +{ +#if IS_ENABLED(CONFIG_ACPI) + struct acpi_device *adev; + + adev = ACPI_COMPANION(&sdiodev->func1->dev); + if (adev) + adev->flags.power_manageable = enable ? sdiodev->func1_power_manageable : 0; + + adev = ACPI_COMPANION(&sdiodev->func2->dev); + if (adev) + adev->flags.power_manageable = enable ? sdiodev->func2_power_manageable : 0; +#endif +} + +static int inff_ops_sdio_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + int err; + struct inff_sdio_dev *sdiodev; + struct inff_bus *bus_if; + + if (!id) { + dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", + func->vendor, func->device); + return -ENODEV; + } + + inff_dbg(SDIO, "Enter\n"); + inff_dbg(SDIO, "Class=%x\n", func->class); + inff_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); + inff_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); + inff_dbg(SDIO, "Function#: %d\n", func->num); + + /* Set MMC_QUIRK_LENIENT_FN0 for this card */ + func->card->quirks |= MMC_QUIRK_LENIENT_FN0; + + /* Set MMC_QUIRK_BLKSZ_FOR_BYTE_MODE for this card + * Use func->cur_blksize by default + */ + func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE; + + /* Consume func num 1 but dont do anything with it. */ + if (func->num == SDIO_FUNC_1 || func->num == SDIO_FUNC_3) + return 0; + + /* Ignore anything but func 2 */ + if (func->num != SDIO_FUNC_2) + return -ENODEV; + + bus_if = kzalloc(sizeof(*bus_if), GFP_KERNEL); + if (!bus_if) + return -ENOMEM; + sdiodev = kzalloc(sizeof(*sdiodev), GFP_KERNEL); + if (!sdiodev) { + kfree(bus_if); + return -ENOMEM; + } + + /* store refs to functions used. mmc_card does + * not hold the F0 function pointer. + */ + sdiodev->func1 = func->card->sdio_func[0]; + sdiodev->func2 = func; + sdiodev->func3 = func->card->sdio_func[2]; + + sdiodev->bus_if = bus_if; + bus_if->bus_priv.sdio = sdiodev; + bus_if->proto_type = INFF_PROTO_BCDC; + dev_set_drvdata(&func->dev, bus_if); + dev_set_drvdata(&sdiodev->func1->dev, bus_if); + sdiodev->dev = &sdiodev->func1->dev; + dev_set_drvdata(&sdiodev->func2->dev, bus_if); + if (sdiodev->func3) { + inff_dbg(SDIO, "Set F3 dev\n"); + dev_set_drvdata(&sdiodev->func3->dev, bus_if); + } + + inff_sdiod_acpi_save_power_manageable(sdiodev); + inff_sdiod_change_state(sdiodev, INFF_SDIOD_DOWN); + + inff_dbg(SDIO, "F2 found, calling inff_sdiod_probe...\n"); + err = inff_sdiod_probe(sdiodev); + if (err) { + inff_err("F2 error, probe failed %d...\n", err); + goto fail; + } + + inff_dbg(SDIO, "F2 init completed...\n"); + return 0; + +fail: + dev_set_drvdata(&func->dev, NULL); + dev_set_drvdata(&sdiodev->func1->dev, NULL); + dev_set_drvdata(&sdiodev->func2->dev, NULL); + kfree(sdiodev); + kfree(bus_if); + return err; +} + +static void inff_ops_sdio_remove(struct sdio_func *func) +{ + struct inff_bus *bus_if; + struct inff_sdio_dev *sdiodev; + + inff_dbg(SDIO, "Enter\n"); + inff_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); + inff_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); + inff_dbg(SDIO, "Function: %d\n", func->num); + + bus_if = dev_get_drvdata(&func->dev); + if (bus_if) { + sdiodev = bus_if->bus_priv.sdio; + + /* start by unregistering irqs */ + inff_sdiod_intr_unregister(sdiodev); + + if (func->num != SDIO_FUNC_1) + return; + + /* only proceed with rest of cleanup if func 1 */ + inff_sdiod_remove(sdiodev); + + dev_set_drvdata(&sdiodev->func1->dev, NULL); + dev_set_drvdata(&sdiodev->func2->dev, NULL); + if (sdiodev->func3) { + inff_dbg(SDIO, "Remove F3 dev\n"); + dev_set_drvdata(&sdiodev->func3->dev, NULL); + } + + kfree(bus_if); + kfree(sdiodev); + } + + inff_dbg(SDIO, "Exit\n"); +} + +void inff_sdio_wowl_config(struct device *dev, bool enabled) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1); + + /* Power must be preserved to be able to support WOWL. */ + if (!(pm_caps & MMC_PM_KEEP_POWER)) + goto notsup; + + if (sdiodev->settings->bus.sdio.oob_irq_supported || + pm_caps & MMC_PM_WAKE_SDIO_IRQ) { + /* Stop ACPI from turning off the device when wowl is enabled */ + inff_sdiod_acpi_set_power_manageable(sdiodev, !enabled); + sdiodev->wowl_enabled = enabled; + inff_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); + return; + } + +notsup: + inff_dbg(SDIO, "WOWL not supported\n"); +} + +static int inff_ops_sdio_suspend(struct device *dev) +{ + struct sdio_func *func; + struct inff_bus *bus_if; + struct inff_sdio_dev *sdiodev; + mmc_pm_flag_t sdio_flags; + struct inff_cfg80211_info *config; + int retry = INFF_PM_WAIT_MAXRETRY; + bool cap_power_off; + int ret = 0; + + func = container_of(dev, struct sdio_func, dev); + + cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); + + bus_if = dev_get_drvdata(dev); + config = bus_if->drvr->config; + + inff_dbg(SDIO, "Enter: F%d\n", func->num); + + while (retry && + config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) { + usleep_range(10000, 20000); + retry--; + } + if (!retry && config->pm_state == INFF_CFG80211_PM_STATE_SUSPENDING) + inff_err("timed out wait for cfg80211 suspended\n"); + + if (func->num != SDIO_FUNC_1) + return 0; + + sdiodev = bus_if->bus_priv.sdio; + + if (sdiodev->wowl_enabled || !cap_power_off) { + inff_sdiod_freezer_on(sdiodev); + inff_sdio_wd_timer(sdiodev->bus, 0); + + sdio_flags = MMC_PM_KEEP_POWER; + if (sdiodev->wowl_enabled) { + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + } + + if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) + inff_err("Failed to set pm_flags %x\n", sdio_flags); + + } else { + /* power will be cut so remove device, probe again in resume */ + inff_sdiod_intr_unregister(sdiodev); + ret = inff_sdiod_remove(sdiodev); + if (ret) + inff_err("Failed to remove device on suspend\n"); + } + + return ret; +} + +static int inff_ops_sdio_resume(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + struct sdio_func *func = container_of(dev, struct sdio_func, dev); + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); + int ret = 0; + + inff_dbg(SDIO, "Enter: F%d\n", func->num); + if (func->num != SDIO_FUNC_2) + return 0; + + if (!sdiodev->wowl_enabled && cap_power_off) { + /* bus was powered off and device removed, probe again */ + ret = inff_sdiod_probe(sdiodev); + if (ret) + inff_err("Failed to probe device on resume\n"); + } else { + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) + disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + + inff_sdiod_freezer_off(sdiodev); + } + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(inff_sdio_pm_ops, + inff_ops_sdio_suspend, + inff_ops_sdio_resume); + +static struct sdio_driver inff_sdmmc_driver = { + .probe = inff_ops_sdio_probe, + .remove = inff_ops_sdio_remove, + .name = KBUILD_MODNAME, + .id_table = inff_sdmmc_ids, + .drv = { + .pm = pm_sleep_ptr(&inff_sdio_pm_ops), + .coredump = inff_dev_coredump, + }, +}; + +int inff_sdio_register(void) +{ + return sdio_register_driver(&inff_sdmmc_driver); +} + +void inff_sdio_exit(void) +{ + inff_dbg(SDIO, "Enter\n"); + + sdio_unregister_driver(&inff_sdmmc_driver); +} -- 2.25.1 Driver implementation of the Device Firmware interface layer used for the control path communication. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/fwil.c | 539 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/fwil.h | 119 ++++ 2 files changed, 658 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/fwil.c create mode 100644 drivers/net/wireless/infineon/inffmac/fwil.h diff --git a/drivers/net/wireless/infineon/inffmac/fwil.c b/drivers/net/wireless/infineon/inffmac/fwil.c new file mode 100644 index 000000000000..de8090f871e0 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fwil.c @@ -0,0 +1,539 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +/* FWIL is the Firmware Interface Layer. In this module the support functions + * are located to set and get variables to and from the firmware. + */ + +#include +#include +#include "utils.h" +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "tracepoint.h" +#include "xtlv.h" +#include "fwil.h" +#include "proto.h" + +#define MAX_HEX_DUMP_LEN 64 +#define MAX_CMD_RESEND 3 + +#ifdef DEBUG +static const char * const inff_fil_errstr[] = { + "INFE_OK", + "INFE_ERROR", + "INFE_BADARG", + "INFE_BADOPTION", + "INFE_NOTUP", + "INFE_NOTDOWN", + "INFE_NOTAP", + "INFE_NOTSTA", + "INFE_BADKEYIDX", + "INFE_RADIOOFF", + "INFE_NOTBANDLOCKED", + "INFE_NOCLK", + "INFE_BADRATESET", + "INFE_BADBAND", + "INFE_BUFTOOSHORT", + "INFE_BUFTOOLONG", + "INFE_BUSY", + "INFE_NOTASSOCIATED", + "INFE_BADSSIDLEN", + "INFE_OUTOFRANGECHAN", + "INFE_BADCHAN", + "INFE_BADADDR", + "INFE_NORESOURCE", + "INFE_UNSUPPORTED", + "INFE_BADLEN", + "INFE_NOTREADY", + "INFE_EPERM", + "INFE_NOMEM", + "INFE_ASSOCIATED", + "INFE_RANGE", + "INFE_NOTFOUND", + "INFE_WME_NOT_ENABLED", + "INFE_TSPEC_NOTFOUND", + "INFE_ACM_NOTSUPPORTED", + "INFE_NOT_WME_ASSOCIATION", + "INFE_SDIO_ERROR", + "INFE_DONGLE_DOWN", + "INFE_VERSION", + "INFE_TXFAIL", + "INFE_RXFAIL", + "INFE_NODEVICE", + "INFE_NMODE_DISABLED", + "INFE_NONRESIDENT", + "INFE_SCANREJECT", + "INFE_USAGE_ERROR", + "INFE_IOCTL_ERROR", + "INFE_SERIAL_PORT_ERR", + "INFE_DISABLED", + "INFE_DECERR", + "INFE_ENCERR", + "INFE_MICERR", + "INFE_REPLAY", + "INFE_IE_NOTFOUND", +}; + +static const char *inff_fil_get_errstr(u32 err) +{ + if (err >= ARRAY_SIZE(inff_fil_errstr)) + return "(unknown)"; + + return inff_fil_errstr[err]; +} +#else +static const char *inff_fil_get_errstr(u32 err) +{ + return ""; +} +#endif /* DEBUG */ + +static s32 +inff_fil_cmd_data(struct inff_if *ifp, u32 cmd, void *data, u32 len, bool set) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err, fwerr; + u8 resend_cnt = 1; + + if (drvr->bus_if->state != INFF_BUS_UP) { + iphy_err(drvr, "bus is down. we have nothing to do.\n"); + return -EIO; + } + + if (data) + len = min_t(uint, len, INFF_DCMD_MAXLEN); + + do { + if (set) + err = inff_proto_set_dcmd(drvr, ifp->ifidx, cmd, + data, len, &fwerr); + else + err = inff_proto_query_dcmd(drvr, ifp->ifidx, cmd, + data, len, &fwerr); + if (!err || resend_cnt > MAX_CMD_RESEND) + break; + + inff_err("cmd error %d, resend cmd count %d\n", err, resend_cnt++); + } while (true); + + if (err) { + inff_dbg(FIL, "Failed: error=%d\n", err); + } else if (fwerr < 0) { + inff_dbg(FIL, "Firmware error: %s (%d)\n", + inff_fil_get_errstr((u32)(-fwerr)), fwerr); + err = -EBADE; + } + if (ifp->fwil_fwerr) + return fwerr; + + return err; +} + +s32 +inff_fil_cmd_data_set(struct inff_if *ifp, u32 cmd, void *data, u32 len) +{ + s32 err; + + mutex_lock(&ifp->drvr->proto_block); + + inff_dbg(FIL, "ifidx=%d, cmd=%d, len=%d\n", ifp->ifidx, cmd, len); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + err = inff_fil_cmd_data(ifp, cmd, data, len, true); + mutex_unlock(&ifp->drvr->proto_block); + + return err; +} + +s32 +inff_fil_cmd_data_get(struct inff_if *ifp, u32 cmd, void *data, u32 len) +{ + s32 err; + + mutex_lock(&ifp->drvr->proto_block); + err = inff_fil_cmd_data(ifp, cmd, data, len, false); + + inff_dbg(FIL, "ifidx=%d, cmd=%d, len=%d, err=%d\n", ifp->ifidx, cmd, + len, err); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + mutex_unlock(&ifp->drvr->proto_block); + + return err; +} + +s32 +inff_fil_cmd_int_set(struct inff_if *ifp, u32 cmd, u32 data) +{ + s32 err; + __le32 data_le = cpu_to_le32(data); + + mutex_lock(&ifp->drvr->proto_block); + inff_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, data); + err = inff_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), true); + mutex_unlock(&ifp->drvr->proto_block); + + return err; +} + +s32 +inff_fil_cmd_int_get(struct inff_if *ifp, u32 cmd, u32 *data) +{ + s32 err; + __le32 data_le = cpu_to_le32(*data); + + mutex_lock(&ifp->drvr->proto_block); + err = inff_fil_cmd_data(ifp, cmd, &data_le, sizeof(data_le), false); + mutex_unlock(&ifp->drvr->proto_block); + *data = le32_to_cpu(data_le); + inff_dbg(FIL, "ifidx=%d, cmd=%d, value=%d\n", ifp->ifidx, cmd, *data); + + return err; +} + +static u32 +inff_create_iovar(const char *name, const char *data, u32 datalen, + char *buf, u32 buflen) +{ + u32 len; + + len = strlen(name) + 1; + + if ((len + datalen) > buflen) + return 0; + + memcpy(buf, name, len); + + /* append data onto the end of the name string */ + if (data && datalen) + memcpy(&buf[len], data, datalen); + + return len + datalen; +} + +s32 +inff_fil_iovar_data_set(struct inff_if *ifp, const char *name, const void *data, + u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + inff_dbg(FIL, "ifidx=%d, name=%s, len=%d\n", ifp->ifidx, name, len); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + buflen = inff_create_iovar(name, data, len, drvr->proto_buf, + sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_SET_VAR, drvr->proto_buf, + buflen, true); + } else { + err = -EPERM; + iphy_err(drvr, "Creating iovar failed\n"); + } + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 +inff_fil_iovar_data_get(struct inff_if *ifp, const char *name, void *data, + u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + buflen = inff_create_iovar(name, data, len, drvr->proto_buf, + sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_GET_VAR, drvr->proto_buf, + buflen, false); + if (err == 0) + memcpy(data, drvr->proto_buf, len); + } else { + err = -EPERM; + iphy_err(drvr, "Creating iovar failed\n"); + } + + inff_dbg(FIL, "ifidx=%d, name=%s, len=%d, err=%d\n", ifp->ifidx, name, + len, err); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 +inff_fil_iovar_int_set(struct inff_if *ifp, const char *name, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return inff_fil_iovar_data_set(ifp, name, &data_le, sizeof(data_le)); +} + +s32 +inff_fil_iovar_int_get(struct inff_if *ifp, const char *name, u32 *data) +{ + __le32 data_le = cpu_to_le32(*data); + s32 err; + + err = inff_fil_iovar_data_get(ifp, name, &data_le, sizeof(data_le)); + if (err == 0) + *data = le32_to_cpu(data_le); + return err; +} + +static u32 +inff_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen, + char *buf, u32 buflen) +{ + const s8 *prefix = "bsscfg:"; + s8 *p; + u32 prefixlen; + u32 namelen; + u32 iolen; + __le32 bsscfgidx_le; + + if (bsscfgidx == 0) + return inff_create_iovar(name, data, datalen, buf, buflen); + + prefixlen = strlen(prefix); + namelen = strlen(name) + 1; /* length of iovar name + null */ + iolen = prefixlen + namelen + sizeof(bsscfgidx_le) + datalen; + + if (buflen < iolen) { + inff_err("buffer is too short\n"); + return 0; + } + + p = buf; + + /* copy prefix, no null */ + memcpy(p, prefix, prefixlen); + p += prefixlen; + + /* copy iovar name including null */ + memcpy(p, name, namelen); + p += namelen; + + /* bss config index as first data */ + bsscfgidx_le = cpu_to_le32(bsscfgidx); + memcpy(p, &bsscfgidx_le, sizeof(bsscfgidx_le)); + p += sizeof(bsscfgidx_le); + + /* parameter buffer follows */ + if (datalen) + memcpy(p, data, datalen); + + return iolen; +} + +s32 +inff_fil_bsscfg_data_set(struct inff_if *ifp, const char *name, + void *data, u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + inff_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d\n", ifp->ifidx, + ifp->bsscfgidx, name, len); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + buflen = inff_create_bsscfg(ifp->bsscfgidx, name, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_SET_VAR, drvr->proto_buf, + buflen, true); + } else { + err = -EPERM; + iphy_err(drvr, "Creating bsscfg failed\n"); + } + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 +inff_fil_bsscfg_data_get(struct inff_if *ifp, const char *name, + void *data, u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + buflen = inff_create_bsscfg(ifp->bsscfgidx, name, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_GET_VAR, drvr->proto_buf, + buflen, false); + if (err == 0) + memcpy(data, drvr->proto_buf, len); + } else { + err = -EPERM; + iphy_err(drvr, "Creating bsscfg failed\n"); + } + inff_dbg(FIL, "ifidx=%d, bsscfgidx=%d, name=%s, len=%d, err=%d\n", + ifp->ifidx, ifp->bsscfgidx, name, len, err); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 +inff_fil_bsscfg_int_set(struct inff_if *ifp, const char *name, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return inff_fil_bsscfg_data_set(ifp, name, &data_le, + sizeof(data_le)); +} + +s32 +inff_fil_bsscfg_int_get(struct inff_if *ifp, const char *name, u32 *data) +{ + __le32 data_le = cpu_to_le32(*data); + s32 err; + + err = inff_fil_bsscfg_data_get(ifp, name, &data_le, + sizeof(data_le)); + if (err == 0) + *data = le32_to_cpu(data_le); + return err; +} + +static u32 inff_create_xtlv(const char *name, u16 id, char *data, u32 len, + char *buf, u32 buflen) +{ + u32 iolen; + u32 nmlen; + + nmlen = strlen(name) + 1; + iolen = nmlen + inff_xtlv_data_size(len, INFF_XTLV_OPTION_ALIGN32); + + if (iolen > buflen) { + inff_err("buffer is too short\n"); + return 0; + } + + memcpy(buf, name, nmlen); + inff_xtlv_pack_header((void *)(buf + nmlen), id, len, data, + INFF_XTLV_OPTION_ALIGN32); + + return iolen; +} + +s32 inff_fil_xtlv_data_set(struct inff_if *ifp, const char *name, u16 id, + void *data, u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + inff_dbg(FIL, "ifidx=%d, name=%s, id=%u, len=%u\n", ifp->ifidx, name, + id, len); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + buflen = inff_create_xtlv(name, id, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_SET_VAR, drvr->proto_buf, + buflen, true); + } else { + err = -EPERM; + iphy_err(drvr, "Creating xtlv failed\n"); + } + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 inff_fil_xtlv_data_get(struct inff_if *ifp, const char *name, u16 id, + void *data, u32 len) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 buflen; + + mutex_lock(&drvr->proto_block); + + buflen = inff_create_xtlv(name, id, data, len, + drvr->proto_buf, sizeof(drvr->proto_buf)); + if (buflen) { + err = inff_fil_cmd_data(ifp, INFF_C_GET_VAR, drvr->proto_buf, + buflen, false); + if (err == 0) + memcpy(data, drvr->proto_buf, len); + } else { + err = -EPERM; + iphy_err(drvr, "Creating bsscfg failed\n"); + } + inff_dbg(FIL, "ifidx=%d, name=%s, id=%u, len=%u, err=%d\n", + ifp->ifidx, name, id, len, err); + inff_dbg_hex_dump(INFF_FIL_ON(), data, + min_t(uint, len, MAX_HEX_DUMP_LEN), "data\n"); + + mutex_unlock(&drvr->proto_block); + return err; +} + +s32 inff_fil_xtlv_int_set(struct inff_if *ifp, const char *name, u16 id, u32 data) +{ + __le32 data_le = cpu_to_le32(data); + + return inff_fil_xtlv_data_set(ifp, name, id, &data_le, + sizeof(data_le)); +} + +s32 inff_fil_xtlv_int_get(struct inff_if *ifp, const char *name, u16 id, u32 *data) +{ + __le32 data_le = cpu_to_le32(*data); + s32 err; + + err = inff_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le32_to_cpu(data_le); + return err; +} + +s32 inff_fil_xtlv_int8_get(struct inff_if *ifp, const char *name, u16 id, u8 *data) +{ + return inff_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data)); +} + +s32 inff_fil_xtlv_int16_get(struct inff_if *ifp, const char *name, u16 id, u16 *data) +{ + __le16 data_le = cpu_to_le16(*data); + s32 err; + + err = inff_fil_xtlv_data_get(ifp, name, id, &data_le, sizeof(data_le)); + if (err == 0) + *data = le16_to_cpu(data_le); + return err; +} diff --git a/drivers/net/wireless/infineon/inffmac/fwil.h b/drivers/net/wireless/infineon/inffmac/fwil.h new file mode 100644 index 000000000000..21508cbfc6d0 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fwil.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FWIL_H +#define INFF_FWIL_H + +#include "debug.h" + +/******************************************************************************* + * Dongle command codes that are interpreted by firmware + ******************************************************************************/ +#define INFF_C_GET_VERSION 1 +#define INFF_C_UP 2 +#define INFF_C_DOWN 3 +#define INFF_C_SET_PROMISC 10 +#define INFF_C_GET_RATE 12 +#define INFF_C_GET_INFRA 19 +#define INFF_C_SET_INFRA 20 +#define INFF_C_GET_AUTH 21 +#define INFF_C_SET_AUTH 22 +#define INFF_C_GET_BSSID 23 +#define INFF_C_GET_SSID 25 +#define INFF_C_SET_SSID 26 +#define INFF_C_TERMINATED 28 +#define INFF_C_GET_CHANNEL 29 +#define INFF_C_SET_CHANNEL 30 +#define INFF_C_GET_SRL 31 +#define INFF_C_SET_SRL 32 +#define INFF_C_GET_LRL 33 +#define INFF_C_SET_LRL 34 +#define INFF_C_GET_RADIO 37 +#define INFF_C_SET_RADIO 38 +#define INFF_C_GET_PHYTYPE 39 +#define INFF_C_SET_KEY 45 +#define INFF_C_GET_REGULATORY 46 +#define INFF_C_SET_REGULATORY 47 +#define INFF_C_SET_PASSIVE_SCAN 49 +#define INFF_C_SCAN 50 +#define INFF_C_SCAN_RESULTS 51 +#define INFF_C_DISASSOC 52 +#define INFF_C_REASSOC 53 +#define INFF_C_SET_ROAM_TRIGGER 55 +#define INFF_C_SET_ROAM_DELTA 57 +#define INFF_C_GET_BCNPRD 75 +#define INFF_C_SET_BCNPRD 76 +#define INFF_C_GET_DTIMPRD 77 +#define INFF_C_SET_DTIMPRD 78 +#define INFF_C_SET_COUNTRY 84 +#define INFF_C_GET_PM 85 +#define INFF_C_SET_PM 86 +#define INFF_C_GET_REVINFO 98 +#define INFF_C_GET_MONITOR 107 +#define INFF_C_SET_MONITOR 108 +#define INFF_C_GET_CURR_RATESET 114 +#define INFF_C_GET_AP 117 +#define INFF_C_SET_AP 118 +#define INFF_C_SET_SCB_AUTHORIZE 121 +#define INFF_C_SET_SCB_DEAUTHORIZE 122 +#define INFF_C_GET_RSSI 127 +#define INFF_C_GET_WSEC 133 +#define INFF_C_SET_WSEC 134 +#define INFF_C_GET_PHY_NOISE 135 +#define INFF_C_GET_BSS_INFO 136 +#define INFF_C_GET_GET_PKTCNTS 137 +#define INFF_C_GET_BANDLIST 140 +#define INFF_C_SET_SCB_TIMEOUT 158 +#define INFF_C_GET_ASSOCLIST 159 +#define INFF_C_GET_UP 162 +#define INFF_C_GET_PHYLIST 180 +#define INFF_C_SET_SCAN_CHANNEL_TIME 185 +#define INFF_C_SET_SCAN_UNASSOC_TIME 187 +#define INFF_C_SCB_DEAUTHENTICATE_FOR_REASON 201 +#define INFF_C_SET_ASSOC_PREFER 205 +#define INFF_C_GET_VALID_CHANNELS 217 +#define INFF_C_GET_FAKEFRAG 218 +#define INFF_C_SET_FAKEFRAG 219 +#define INFF_C_GET_KEY_PRIMARY 235 +#define INFF_C_SET_KEY_PRIMARY 236 +#define INFF_C_SET_SCAN_PASSIVE_TIME 258 +#define INFF_C_GET_VAR 262 +#define INFF_C_SET_VAR 263 +#define INFF_C_SET_WSEC_PMK 268 + +#define INFF_FW_BADARG 2 +#define INFF_FW_UNSUPPORTED 23 + +s32 inff_fil_cmd_data_set(struct inff_if *ifp, u32 cmd, void *data, u32 len); +s32 inff_fil_cmd_data_get(struct inff_if *ifp, u32 cmd, void *data, u32 len); +s32 inff_fil_cmd_int_set(struct inff_if *ifp, u32 cmd, u32 data); +s32 inff_fil_cmd_int_get(struct inff_if *ifp, u32 cmd, u32 *data); + +s32 inff_fil_iovar_data_set(struct inff_if *ifp, const char *name, const void *data, + u32 len); +s32 inff_fil_iovar_data_get(struct inff_if *ifp, const char *name, void *data, + u32 len); +s32 inff_fil_iovar_int_set(struct inff_if *ifp, const char *name, u32 data); +s32 inff_fil_iovar_int_get(struct inff_if *ifp, const char *name, u32 *data); + +s32 inff_fil_bsscfg_data_set(struct inff_if *ifp, const char *name, void *data, + u32 len); +s32 inff_fil_bsscfg_data_get(struct inff_if *ifp, const char *name, void *data, + u32 len); +s32 inff_fil_bsscfg_int_set(struct inff_if *ifp, const char *name, u32 data); +s32 inff_fil_bsscfg_int_get(struct inff_if *ifp, const char *name, u32 *data); +s32 inff_fil_xtlv_data_set(struct inff_if *ifp, const char *name, u16 id, + void *data, u32 len); +s32 inff_fil_xtlv_data_get(struct inff_if *ifp, const char *name, u16 id, + void *data, u32 len); +s32 inff_fil_xtlv_int_set(struct inff_if *ifp, const char *name, u16 id, u32 data); +s32 inff_fil_xtlv_int_get(struct inff_if *ifp, const char *name, u16 id, u32 *data); +s32 inff_fil_xtlv_int8_get(struct inff_if *ifp, const char *name, u16 id, u8 *data); +s32 inff_fil_xtlv_int16_get(struct inff_if *ifp, const char *name, u16 id, u16 *data); + +#endif /* INFF_FWIL_H */ -- 2.25.1 Driver definitions of the data structures expected by the Device firmware when the driver sends data as part of the control commands to the device. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/fwil_types.h | 1311 +++++++++++++++++ 1 file changed, 1311 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/fwil_types.h diff --git a/drivers/net/wireless/infineon/inffmac/fwil_types.h b/drivers/net/wireless/infineon/inffmac/fwil_types.h new file mode 100644 index 000000000000..335bc86334e6 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fwil_types.h @@ -0,0 +1,1311 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FWIL_TYPES_H +#define INFF_FWIL_TYPES_H + +#include + +#define INFF_FIL_ACTION_FRAME_SIZE 1800 + +#define INFF_AF_PARAM_V2_FW_MAJOR 13 +#define INFF_AF_PARAM_V2_FW_MINOR 2 + +#define INFF_AUTH_STATUS_V2_FW_MAJOR 13 +#define INFF_AUTH_STATUS_V2_FW_MINOR 3 + +#define INFF_BSS_INFO_VERSION 109 /* curr ver of inff_bss_info_le struct */ +#define INFF_BSS_RSSI_ON_CHANNEL 0x0004 + +#define INFF_STA_INF 0x00000001 /* Running a Infineon driver */ +#define INFF_STA_WME 0x00000002 /* WMM association */ +#define INFF_STA_NONERP 0x00000004 /* No ERP */ +#define INFF_STA_AUTHE 0x00000008 /* Authenticated */ +#define INFF_STA_ASSOC 0x00000010 /* Associated */ +#define INFF_STA_AUTHO 0x00000020 /* Authorized */ +#define INFF_STA_WDS 0x00000040 /* Wireless Distribution System */ +#define INFF_STA_WDS_LINKUP 0x00000080 /* WDS traffic/probes flowing properly */ +#define INFF_STA_PS 0x00000100 /* STA is in power save mode from AP's viewpoint */ +#define INFF_STA_APSD_BE 0x00000200 /* APSD delv/trigger for AC_BE is default enabled */ +#define INFF_STA_APSD_BK 0x00000400 /* APSD delv/trigger for AC_BK is default enabled */ +#define INFF_STA_APSD_VI 0x00000800 /* APSD delv/trigger for AC_VI is default enabled */ +#define INFF_STA_APSD_VO 0x00001000 /* APSD delv/trigger for AC_VO is default enabled */ +#define INFF_STA_N_CAP 0x00002000 /* STA 802.11n capable */ +#define INFF_STA_SCBSTATS 0x00004000 /* Per STA debug stats */ +#define INFF_STA_AMPDU_CAP 0x00008000 /* STA AMPDU capable */ +#define INFF_STA_AMSDU_CAP 0x00010000 /* STA AMSDU capable */ +#define INFF_STA_MIMO_PS 0x00020000 /* mimo ps mode is enabled */ +#define INFF_STA_MIMO_RTS 0x00040000 /* send rts in mimo ps mode */ +#define INFF_STA_RIFS_CAP 0x00080000 /* rifs enabled */ +#define INFF_STA_VHT_CAP 0x00100000 /* STA VHT(11ac) capable */ +#define INFF_STA_WPS 0x00200000 /* WPS state */ +#define INFF_STA_DWDS_CAP 0x01000000 /* DWDS CAP */ +#define INFF_STA_DWDS 0x02000000 /* DWDS active */ + +/* size of inff_scan_params not including variable length array */ +#define INFF_SCAN_PARAMS_FIXED_SIZE 64 +#define INFF_SCAN_PARAMS_V2_FIXED_SIZE 72 + +/* version of inff_scan_params structure */ +#define INFF_SCAN_PARAMS_VERSION_V2 2 + +/* masks for channel and ssid count */ +#define INFF_SCAN_PARAMS_COUNT_MASK 0x0000ffff +#define INFF_SCAN_PARAMS_NSSID_SHIFT 16 + +/* scan type definitions */ +#define INFF_SCANTYPE_DEFAULT 0xFF +#define INFF_SCANTYPE_ACTIVE 0 +#define INFF_SCANTYPE_PASSIVE 1 + +#define INFF_WSEC_MAX_PSK_LEN 32 +#define INFF_WSEC_PMK_LEN_SUITEB_192 48 +#define INFF_WSEC_MAX_PMK_LEN 64 /* SUITE-B-192's PMK is 48 bytes */ +#define INFF_WSEC_PASSPHRASE BIT(0) + +#define INFF_WSEC_MAX_SAE_PASSWORD_LEN 128 + +/* primary (ie tx) key */ +#define INFF_PRIMARY_KEY BIT(1) +#define DOT11_BSSTYPE_ANY 2 +#define INFF_ESCAN_REQ_VERSION 1 +#define INFF_ESCAN_REQ_VERSION_V2 2 + +#define INFF_MAXRATES_IN_SET 16 /* max # of rates in rateset */ + +/* OBSS Coex Auto/On/Off */ +#define INFF_OBSS_COEX_AUTO (-1) +#define INFF_OBSS_COEX_OFF 0 +#define INFF_OBSS_COEX_ON 1 + +/* WOWL bits */ +/* Wakeup on Magic packet: */ +#define INFF_WOWL_MAGIC BIT(0) +/* Wakeup on Netpattern */ +#define INFF_WOWL_NET BIT(1) +/* Wakeup on loss-of-link due to Disassoc/Deauth: */ +#define INFF_WOWL_DIS BIT(2) +/* Wakeup on retrograde TSF: */ +#define INFF_WOWL_RETR BIT(3) +/* Wakeup on loss of beacon: */ +#define INFF_WOWL_BCN BIT(4) +/* Wakeup after test: */ +#define INFF_WOWL_TST BIT(5) +/* Wakeup after PTK refresh: */ +#define INFF_WOWL_M1 BIT(6) +/* Wakeup after receipt of EAP-Identity Req: */ +#define INFF_WOWL_EAPID BIT(7) +/* Wakeind via PME(0) or GPIO(1): */ +#define INFF_WOWL_PME_GPIO BIT(8) +/* need tkip phase 1 key to be updated by the driver: */ +#define INFF_WOWL_NEEDTKIP1 BIT(9) +/* enable wakeup if GTK fails: */ +#define INFF_WOWL_GTK_FAILURE BIT(10) +/* support extended magic packets: */ +#define INFF_WOWL_EXTMAGPAT BIT(11) +/* support ARP/NS/keepalive offloading: */ +#define INFF_WOWL_ARPOFFLOAD BIT(12) +/* read protocol version for EAPOL frames: */ +#define INFF_WOWL_WPA2 BIT(13) +/* If the bit is set, use key rotaton: */ +#define INFF_WOWL_KEYROT BIT(14) +/* If the bit is set, frm received was bcast frame: */ +#define INFF_WOWL_BCAST BIT(15) +/* If the bit is set, scan offload is enabled: */ +#define INFF_WOWL_SCANOL BIT(16) +/* Wakeup on tcpkeep alive timeout: */ +#define INFF_WOWL_TCPKEEP_TIME BIT(17) +/* Wakeup on mDNS Conflict Resolution: */ +#define INFF_WOWL_MDNS_CONFLICT BIT(18) +/* Wakeup on mDNS Service Connect: */ +#define INFF_WOWL_MDNS_SERVICE BIT(19) +/* tcp keepalive got data: */ +#define INFF_WOWL_TCPKEEP_DATA BIT(20) +/* Firmware died in wowl mode: */ +#define INFF_WOWL_FW_HALT BIT(21) +/* Enable detection of radio button changes: */ +#define INFF_WOWL_ENAB_HWRADIO BIT(22) +/* Offloads detected MIC failure(s): */ +#define INFF_WOWL_MIC_FAIL BIT(23) +/* Wakeup in Unassociated state (Net/Magic Pattern): */ +#define INFF_WOWL_UNASSOC BIT(24) +/* Wakeup if received matched secured pattern: */ +#define INFF_WOWL_SECURE BIT(25) +/* Wakeup on finding preferred network */ +#define INFF_WOWL_PFN_FOUND BIT(27) +/* Wakeup on receiving pairwise key EAP packets: */ +#define WIPHY_WOWL_EAP_PK BIT(28) +/* Link Down indication in WoWL mode: */ +#define INFF_WOWL_LINKDOWN BIT(31) + +#define INFF_WOWL_MAXPATTERNS 16 +#define INFF_WOWL_MAXPATTERNSIZE 128 + +/* IPV4 address length */ +#define INFF_IPV4_ADDR_LEN 4 +/* IPV6 address length */ +#define INFF_IPV6_ADDR_LEN 16 + +enum { + INFF_UNICAST_FILTER_NUM = 0, + INFF_BROADCAST_FILTER_NUM, + INFF_MULTICAST4_FILTER_NUM, + INFF_MULTICAST6_FILTER_NUM, + INFF_MDNS_FILTER_NUM, + INFF_ARP_FILTER_NUM, + INFF_BROADCAST_ARP_FILTER_NUM, + MAX_PKT_FILTER_COUNT +}; + +#define MAX_PKTFILTER_PATTERN_SIZE 16 +#define MAX_PKTFILTER_PATTERN_FILL_SIZE (MAX_PKTFILTER_PATTERN_SIZE * 2) + +#define INFF_COUNTRY_BUF_SZ 4 +#define INFF_ANT_MAX 4 + +#define INFF_MAX_ASSOCLIST 128 + +#define INFF_TXBF_SU_BFE_CAP BIT(0) +#define INFF_TXBF_MU_BFE_CAP BIT(1) +#define INFF_TXBF_SU_BFR_CAP BIT(0) +#define INFF_TXBF_MU_BFR_CAP BIT(1) + +#define INFF_MAXPMKID 16 /* max # PMKID cache entries */ +#define INFF_NUMCHANNELS 64 + +#define INFF_PFN_MACADDR_CFG_VER 1 +#define INFF_PFN_MAC_OUI_ONLY BIT(0) +#define INFF_PFN_SET_MAC_UNASSOC BIT(1) + +#define INFF_MCSSET_LEN 16 + +#define INFF_RSN_KCK_LENGTH 16 +#define INFF_RSN_KEK_LENGTH 16 +#define INFF_RSN_REPLAY_LEN 8 + +#define INFF_MFP_NONE 0 +#define INFF_MFP_CAPABLE 1 +#define INFF_MFP_REQUIRED 2 + +#define INFF_VHT_CAP_MCS_MAP_NSS_MAX 8 + +#define INFF_HE_CAP_MCS_MAP_NSS_MAX 8 + +#define INFF_PMKSA_VER_2 2 +#define INFF_PMKSA_VER_3 3 +#define INFF_PMKSA_NO_EXPIRY 0xffffffff + +#define INFF_EXTAUTH_START 1 +#define INFF_EXTAUTH_ABORT 2 +#define INFF_EXTAUTH_FAIL 3 +#define INFF_EXTAUTH_SUCCESS 4 + +/* mchan configuration (ap timeslot : sta timeslot)*/ +#define INFF_MCHAN_CONF_DEFAULT 0 /* mchan_algo=2 (25ms:25ms) */ +#define INFF_MCHAN_CONF_VEDIO 1 /* mchan_algo=5 (29ms:21ms) */ +#define INFF_MCHAN_CONF_AUDIO 2 /* mchan_algo=1, mchan_bw=32 (68ms:32ms) */ +/* mchan algo in dongle */ +#define INFF_MCHAN_DEFAULT_ALGO 0 +#define INFF_MCHAN_BANDWIDTH_ALGO 1 +#define INFF_MCHAN_SI_ALGO 2 +#define INFF_MCHAN_DYNAMIC_BW_ALGO 3 +#define INFF_MCHAN_ALTERNATE_SWITCHING 4 +#define INFF_MCHAN_ASYMMETRIC_SI_ALGO 5 +#define INFF_MCHAN_BANDWIDTH_VAL 32 + +/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each + * ioctl. It is relatively small because firmware has small maximum size input + * playload restriction for ioctls. + */ +#define MAX_CHUNK_LEN 1400 + +#define DLOAD_HANDLER_VER 1 /* Downloader version */ +#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */ +#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */ + +#define DL_BEGIN 0x0002 +#define DL_END 0x0004 + +#define DL_TYPE_CLM 2 + +#define MAX_RSSI_LEVELS 8 +#define WL_RSSI_EVENT_VERSION_OLD 0 +#define WL_RSSI_EVENT_VERSION_NEW 1 + +/* Offloads profile configuration version */ +#define INFF_OFFLOAD_CFG_VER_1 1 + +extern unsigned int inff_offload_prof; +extern unsigned int inff_offload_feat; + +/* join preference types for join_pref iovar */ +enum inff_join_pref_types { + INFF_JOIN_PREF_RSSI = 1, + INFF_JOIN_PREF_WPA, + INFF_JOIN_PREF_BAND, + INFF_JOIN_PREF_RSSI_DELTA, +}; + +enum inff_fil_p2p_if_types { + INFF_FIL_P2P_IF_CLIENT, + INFF_FIL_P2P_IF_GO, + INFF_FIL_P2P_IF_DYNBCN_GO, + INFF_FIL_P2P_IF_DEV, +}; + +enum inff_wowl_pattern_type { + INFF_WOWL_PATTERN_TYPE_BITMAP = 0, + INFF_WOWL_PATTERN_TYPE_ARP, + INFF_WOWL_PATTERN_TYPE_NA +}; + +struct inff_fil_p2p_if_le { + u8 addr[ETH_ALEN]; + __le16 type; + __le16 chspec; +}; + +struct inff_fil_chan_info_le { + __le32 hw_channel; + __le32 target_channel; + __le32 scan_channel; +}; + +struct inff_fil_action_frame_le { + u8 da[ETH_ALEN]; + __le16 len; + __le32 packet_id; + u8 data[INFF_FIL_ACTION_FRAME_SIZE]; +}; + +struct inff_fil_af_params_le { + __le32 channel; + __le32 dwell_time; + u8 bssid[ETH_ALEN]; + u8 pad[2]; + struct inff_fil_action_frame_le action_frame; +}; + +struct inff_fil_af_params_v2_le { + __le16 version; + __le16 length; + __le32 channel; + __le32 dwell_time; + u8 bssid[ETH_ALEN]; + u8 band; + u8 pad[1]; + struct inff_fil_action_frame_le action_frame; +}; + +struct inff_fil_bss_enable_le { + __le32 bsscfgidx; + __le32 enable; +}; + +struct inff_fil_bwcap_le { + __le32 band; + __le32 bw_cap; +}; + +/** + * struct tdls_iovar - common structure for tdls iovars. + * + * @ea: ether address of peer station. + * @mode: mode value depending on specific tdls iovar. + * @chanspec: channel specification. + * @pad: unused (for future use). + */ +struct inff_tdls_iovar_le { + u8 ea[ETH_ALEN]; /* Station address */ + u8 mode; /* mode: depends on iovar */ + __le16 chanspec; + __le32 pad; /* future */ +}; + +enum inff_tdls_manual_ep_ops { + INFF_TDLS_MANUAL_EP_CREATE = 1, + INFF_TDLS_MANUAL_EP_DELETE = 3, + INFF_TDLS_MANUAL_EP_DISCOVERY = 6 +}; + +/* Pattern matching filter. Specifies an offset within received packets to + * start matching, the pattern to match, the size of the pattern, and a bitmask + * that indicates which bits within the pattern should be matched. + */ +struct inff_pkt_filter_pattern_le { + /* + * Offset within received packet to start pattern matching. + * Offset '0' is the first byte of the ethernet header. + */ + __le32 offset; + /* Size of the pattern. Bitmask must be the same size.*/ + __le32 size_bytes; + /* + * Variable length mask and pattern data. mask starts at offset 0. + * Pattern immediately follows mask. + */ + u8 mask_and_pattern[MAX_PKTFILTER_PATTERN_FILL_SIZE]; +}; + +/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ +struct inff_pkt_filter_le { + __le32 id; /* Unique filter id, specified by app. */ + __le32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */ + __le32 negate_match; /* Negate the result of filter matches */ + union { /* Filter definitions */ + struct inff_pkt_filter_pattern_le pattern; /* Filter pattern */ + } u; +}; + +/* IOVAR "pkt_filter_enable" parameter. */ +struct inff_pkt_filter_enable_le { + __le32 id; /* Unique filter id */ + __le32 enable; /* Enable/disable bool */ +}; + +/* BSS info structure + * Applications MUST CHECK ie_offset field and length field to access IEs and + * next bss_info structure in a vector (in struct inff_scan_results) + */ +struct inff_bss_info_le { + __le32 version; /* version field */ + __le32 length; /* byte length of data in this record, + * starting at version and including IEs + */ + u8 BSSID[ETH_ALEN]; + __le16 beacon_period; /* units are Kusec */ + __le16 capability; /* Capability information */ + u8 SSID_len; + u8 SSID[32]; + struct { + __le32 count; /* # rates in this set */ + u8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ + } rateset; /* supported rates */ + __le16 chanspec; /* chanspec for bss */ + __le16 atim_window; /* units are Kusec */ + u8 dtim_period; /* DTIM period */ + __le16 RSSI; /* receive signal strength (in dBm) */ + s8 phy_noise; /* noise (in dBm) */ + + u8 n_cap; /* BSS is 802.11N Capable */ + /* 802.11N BSS Capabilities (based on HT_CAP_*): */ + __le32 nbss_cap; + u8 ctl_ch; /* 802.11N BSS control channel number */ + __le32 reserved32[1]; /* Reserved for expansion of BSS properties */ + u8 flags; /* flags */ + u8 reserved[3]; /* Reserved for expansion of BSS properties */ + u8 basic_mcs[INFF_MCSSET_LEN]; /* 802.11N BSS required MCS set */ + + __le16 ie_offset; /* offset at which IEs start, from beginning */ + __le32 ie_length; /* byte length of Information Elements */ + __le16 SNR; /* average SNR of during frame reception */ + /* Add new fields here */ + /* variable length Information Elements */ +}; + +struct inff_rateset_le { + /* # rates in this set */ + __le32 count; + /* rates in 500kbps units w/hi bit set if basic */ + u8 rates[INFF_MAXRATES_IN_SET]; +}; + +struct inff_ssid_le { + __le32 SSID_len; + unsigned char SSID[IEEE80211_MAX_SSID_LEN]; +}; + +struct inff_scan_params_le { + struct inff_ssid_le ssid_le; /* default: {0, ""} */ + u8 bssid[ETH_ALEN]; /* default: bcast */ + s8 bss_type; /* default: any, + * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT + */ + u8 scan_type; /* flags, 0 use default */ + __le32 nprobes; /* -1 use default, number of probes per channel */ + __le32 active_time; /* -1 use default, dwell time per channel for + * active scanning + */ + __le32 passive_time; /* -1 use default, dwell time per channel + * for passive scanning + */ + __le32 home_time; /* -1 use default, dwell time for the + * home channel between channel scans + */ + __le32 channel_num; /* count of channels and ssids that follow + * + * low half is count of channels in + * channel_list, 0 means default (use all + * available channels) + * + * high half is entries in struct inff_ssid + * array that follows channel_list, aligned for + * s32 (4 bytes) meaning an odd channel count + * implies a 2-byte pad between end of + * channel_list and first ssid + * + * if ssid count is zero, single ssid in the + * fixed parameter portion is assumed, otherwise + * ssid in the fixed portion is ignored + */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack inff_scan_params_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; +}; + +struct inff_scan_params_v2_le { + __le16 version; /* structure version */ + __le16 length; /* structure length */ + struct inff_ssid_le ssid_le; /* default: {0, ""} */ + u8 bssid[ETH_ALEN]; /* default: bcast */ + s8 bss_type; /* default: any, + * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT + */ + u8 pad; + __le32 scan_type; /* flags, 0 use default */ + __le32 nprobes; /* -1 use default, number of probes per channel */ + __le32 active_time; /* -1 use default, dwell time per channel for + * active scanning + */ + __le32 passive_time; /* -1 use default, dwell time per channel + * for passive scanning + */ + __le32 home_time; /* -1 use default, dwell time for the + * home channel between channel scans + */ + __le32 channel_num; /* count of channels and ssids that follow + * + * low half is count of channels in + * channel_list, 0 means default (use all + * available channels) + * + * high half is entries in struct inff_ssid + * array that follows channel_list, aligned for + * s32 (4 bytes) meaning an odd channel count + * implies a 2-byte pad between end of + * channel_list and first ssid + * + * if ssid count is zero, single ssid in the + * fixed parameter portion is assumed, otherwise + * ssid in the fixed portion is ignored + */ + union { + __le16 padding; /* Reserve space for at least 1 entry for abort + * which uses an on stack inff_scan_params_v2_le + */ + DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */ + }; +}; + +struct inff_scan_results { + u32 buflen; + u32 version; + u32 count; + struct inff_bss_info_le bss_info_le[]; +}; + +struct inff_escan_params_le { + __le32 version; + __le16 action; + __le16 sync_id; + union { + struct inff_scan_params_le params_le; + struct inff_scan_params_v2_le params_v2_le; + }; +}; + +struct inff_escan_result_le { + __le32 buflen; + __le32 version; + __le16 sync_id; + __le16 bss_count; + struct inff_bss_info_le bss_info_le; +}; + +#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(struct inff_escan_result_le) - \ + sizeof(struct inff_bss_info_le)) + +/* used for association with a specific BSSID and chanspec list */ +struct inff_assoc_params_le { + /* 00:00:00:00:00:00: broadcast scan */ + u8 bssid[ETH_ALEN]; + /* 0: all available channels, otherwise count of chanspecs in + * chanspec_list + */ + __le32 chanspec_num; + /* + * list of chanspecs. Currently, driver only uses 1 chanspec here. + * TODO: Conver this into a flexible array to support multiple + * chanspecs. + */ + __le16 chanspec_list; +}; + +/** + * struct join_pref params - parameters for preferred join selection. + * + * @type: preference type (see enum inff_join_pref_types). + * @len: length of bytes following (currently always 2). + * @rssi_gain: signal gain for selection (only when @type is RSSI_DELTA). + * @band: band to which selection preference applies. + * This is used if @type is BAND or RSSI_DELTA. + */ +struct inff_join_pref_params { + u8 type; + u8 len; + u8 rssi_gain; + u8 band; +}; + +/* used for join with or without a specific bssid and channel list */ +struct inff_join_params { + struct inff_ssid_le ssid_le; + struct inff_assoc_params_le params_le; +}; + +/* scan params for extended join */ +struct inff_join_scan_params_le { + u8 scan_type; /* 0 use default, active or passive scan */ + __le32 nprobes; /* -1 use default, nr of probes per channel */ + __le32 active_time; /* -1 use default, dwell time per channel for + * active scanning + */ + __le32 passive_time; /* -1 use default, dwell time per channel + * for passive scanning + */ + __le32 home_time; /* -1 use default, dwell time for the home + * channel between channel scans + */ +}; + +/* extended join params */ +struct inff_ext_join_params_le { + struct inff_ssid_le ssid_le; /* {0, ""}: wildcard scan */ + struct inff_join_scan_params_le scan_le; + struct inff_assoc_params_le assoc_le; +}; + +struct inff_wsec_key { + u32 index; /* key index */ + u32 len; /* key length */ + u8 data[WLAN_MAX_KEY_LEN]; /* key data */ + u32 pad_1[18]; + u32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + u32 flags; /* misc flags */ + u32 pad_2[3]; + u32 iv_initialized; /* has IV been initialized already? */ + u32 pad_3; + /* Rx IV */ + struct { + u32 hi; /* upper 32 bits of IV */ + u16 lo; /* lower 16 bits of IV */ + } rxiv; + u32 pad_4[2]; + u8 ea[ETH_ALEN]; /* per station */ +}; + +/* + * dongle requires same struct as above but with fields in little endian order + */ +struct inff_wsec_key_le { + __le32 index; /* key index */ + __le32 len; /* key length */ + u8 data[WLAN_MAX_KEY_LEN]; /* key data */ + __le32 pad_1[18]; + __le32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ + __le32 flags; /* misc flags */ + __le32 pad_2[3]; + __le32 iv_initialized; /* has IV been initialized already? */ + __le32 pad_3; + /* Rx IV */ + struct { + __le32 hi; /* upper 32 bits of IV */ + __le16 lo; /* lower 16 bits of IV */ + } rxiv; + __le32 pad_4[2]; + u8 ea[ETH_ALEN]; /* per station */ +}; + +/** + * struct inff_wsec_pmk_le - firmware pmk material. + * + * @key_len: number of octets in key material. + * @flags: key handling qualifiers. + * @key: PMK key material. + */ +struct inff_wsec_pmk_le { + __le16 key_len; + __le16 flags; + u8 key[INFF_WSEC_MAX_PMK_LEN]; +}; + +/** + * struct inff_wsec_sae_pwd_le - firmware SAE password material. + * + * @key_len: number of octets in key materials. + * @key: SAE password material. + */ +struct inff_wsec_sae_pwd_le { + __le16 key_len; + u8 key[INFF_WSEC_MAX_SAE_PASSWORD_LEN]; +}; + +/** + * struct inff_auth_req_status_le - external auth request and status update + * + * @flags: flags for external auth status + * @peer_mac: peer MAC address + * @ssid_len: length of ssid + * @ssid: ssid characters + */ +struct inff_auth_req_status_le { + __le16 flags; + u8 peer_mac[ETH_ALEN]; + __le32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 pmkid[WLAN_PMKID_LEN]; +}; + +struct inff_auth_req_status_info_le_v2 { + __le16 version; + __le16 len; + __le16 flags; + u8 peer_mac[ETH_ALEN];/* peer mac address */ + __le32 ssid_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 pmkid[WLAN_PMKID_LEN]; + struct inff_bss_info_le bss_info_le[]; +}; + +/** + * struct inff_mf_params_le - management frame parameters for mgmt_frame iovar + * + * @version: version of the iovar + * @dwell_time: dwell duration in ms + * @len: length of frame data + * @frame_control: frame control + * @channel: channel + * @da: peer MAC address + * @bssid: BSS network identifier + * @packet_id: packet identifier + * @data: frame data + */ +struct inff_mf_params_le { + __le32 version; + __le32 dwell_time; + __le16 len; + __le16 frame_control; + __le16 channel; + u8 da[ETH_ALEN]; + u8 bssid[ETH_ALEN]; + __le32 packet_id; + u8 data[] __counted_by_le(len); +}; + +/* Used to get specific STA parameters */ +struct inff_scb_val_le { + __le32 val; + u8 ea[ETH_ALEN]; +}; + +/* channel encoding */ +struct inff_channel_info_le { + __le32 hw_channel; + __le32 target_channel; + __le32 scan_channel; +}; + +struct inff_sta_info_le { + __le16 ver; /* version of this struct */ + __le16 len; /* length in bytes of this structure */ + __le16 cap; /* sta's advertised capabilities */ + __le32 flags; /* flags defined below */ + __le32 idle; /* time since data pkt rx'd from sta */ + u8 ea[ETH_ALEN]; /* Station address */ + __le32 count; /* # rates in this set */ + u8 rates[INFF_MAXRATES_IN_SET]; /* rates in 500kbps units */ + /* w/hi bit set if basic */ + __le32 in; /* seconds elapsed since associated */ + __le32 listen_interval_inms; /* Min Listen interval in ms for STA */ + + /* Fields valid for ver >= 3 */ + __le32 tx_pkts; /* # of packets transmitted */ + __le32 tx_failures; /* # of packets failed */ + __le32 rx_ucast_pkts; /* # of unicast packets received */ + __le32 rx_mcast_pkts; /* # of multicast packets received */ + __le32 tx_rate; /* Rate of last successful tx frame */ + __le32 rx_rate; /* Rate of last successful rx frame */ + __le32 rx_decrypt_succeeds; /* # of packet decrypted successfully */ + __le32 rx_decrypt_failures; /* # of packet decrypted failed */ + + /* Fields valid for ver >= 4 */ + __le32 tx_tot_pkts; /* # of tx pkts (ucast + mcast) */ + __le32 rx_tot_pkts; /* # of data packets recvd (uni + mcast) */ + __le32 tx_mcast_pkts; /* # of mcast pkts txed */ + __le64 tx_tot_bytes; /* data bytes txed (ucast + mcast) */ + __le64 rx_tot_bytes; /* data bytes recvd (ucast + mcast) */ + __le64 tx_ucast_bytes; /* data bytes txed (ucast) */ + __le64 tx_mcast_bytes; /* # data bytes txed (mcast) */ + __le64 rx_ucast_bytes; /* data bytes recvd (ucast) */ + __le64 rx_mcast_bytes; /* data bytes recvd (mcast) */ + s8 rssi[INFF_ANT_MAX]; /* per antenna rssi */ + s8 nf[INFF_ANT_MAX]; /* per antenna noise floor */ + __le16 aid; /* association ID */ + __le16 ht_capabilities; /* advertised ht caps */ + __le16 vht_flags; /* converted vht flags */ + __le32 tx_pkts_retry_cnt; /* # of frames where a retry was + * exhausted. + */ + __le32 tx_pkts_retry_exhausted; /* # of user frames where a retry + * was exhausted + */ + s8 rx_lastpkt_rssi[INFF_ANT_MAX]; /* Per antenna RSSI of last + * received data frame. + */ + /* TX WLAN retry/failure statistics: + * Separated for host requested frames and locally generated frames. + * Include unicast frame only where the retries/failures can be counted. + */ + __le32 tx_pkts_total; /* # user frames sent successfully */ + __le32 tx_pkts_retries; /* # user frames retries */ + __le32 tx_pkts_fw_total; /* # FW generated sent successfully */ + __le32 tx_pkts_fw_retries; /* # retries for FW generated frames */ + __le32 tx_pkts_fw_retry_exhausted; /* # FW generated where a retry + * was exhausted + */ + __le32 rx_pkts_retried; /* # rx with retry bit set */ + __le32 tx_rate_fallback; /* lowest fallback TX rate */ + + union { + struct { + struct { + /* # rates in this set */ + __le32 count; + /* rates in 500kbps units w/hi bit set if basic */ + u8 rates[INFF_MAXRATES_IN_SET]; + /* supported mcs index bit map */ + u8 mcs[INFF_MCSSET_LEN]; + /* supported mcs index bit map per nss */ + __le16 vht_mcs[INFF_VHT_CAP_MCS_MAP_NSS_MAX]; + } rateset_adv; + } v5; + + struct { + __le32 rx_dur_total; /* total user RX duration (estimated) */ + __le16 chanspec; /** chanspec this sta is on */ + __le16 pad_1; + struct { + __le16 version; /* version */ + __le16 len; /* length */ + /* # rates in this set */ + __le32 count; + /* rates in 500kbps units w/hi bit set if basic */ + u8 rates[INFF_MAXRATES_IN_SET]; + /* supported mcs index bit map */ + u8 mcs[INFF_MCSSET_LEN]; + /* supported mcs index bit map per nss */ + __le16 vht_mcs[INFF_VHT_CAP_MCS_MAP_NSS_MAX]; + /* supported he mcs index bit map per nss */ + __le16 he_mcs[INFF_HE_CAP_MCS_MAP_NSS_MAX]; + } rateset_adv; /* rateset along with mcs index bitmap */ + __le16 wpauth; /* authentication type */ + u8 algo; /* crypto algorithm */ + u8 pad_2; + __le32 tx_rspec; /* Rate of last successful tx frame */ + __le32 rx_rspec; /* Rate of last successful rx frame */ + __le32 wnm_cap; /* wnm capabilities */ + } v7; + }; +}; + +struct inff_chanspec_list { + __le32 count; /* # of entries */ + __le32 element[]; /* variable length uint32 list */ +}; + +/* + * WLC_E_PROBRESP_MSG + * WLC_E_P2P_PROBREQ_MSG + * WLC_E_ACTION_FRAME_RX + */ +struct inff_rx_mgmt_data { + __be16 version; + __be16 chanspec; + __be32 rssi; + __be32 mactime; + __be32 rate; +}; + +/** + * struct inff_fil_wowl_pattern_le - wowl pattern configuration struct. + * + * @cmd: "add", "del" or "clr". + * @masksize: Size of the mask in #of bytes + * @offset: Pattern byte offset in packet + * @patternoffset: Offset of start of pattern. Starting from field masksize. + * @patternsize: Size of the pattern itself in #of bytes + * @id: id + * @reasonsize: Size of the wakeup reason code + * @type: Type of pattern (enum inff_wowl_pattern_type) + */ +struct inff_fil_wowl_pattern_le { + u8 cmd[4]; + __le32 masksize; + __le32 offset; + __le32 patternoffset; + __le32 patternsize; + __le32 id; + __le32 reasonsize; + __le32 type; + /* u8 mask[] - Mask follows the structure above */ + /* u8 pattern[] - Pattern follows the mask is at 'patternoffset' */ +}; + +struct inff_mbss_ssid_le { + __le32 bsscfgidx; + __le32 SSID_len; + unsigned char SSID[32]; +}; + +/** + * struct inff_fil_country_le - country configuration structure. + * + * @country_abbrev: null-terminated country code used in the country IE. + * @rev: revision specifier for ccode. on set, -1 indicates unspecified. + * @ccode: null-terminated built-in country code. + */ +struct inff_fil_country_le { + char country_abbrev[INFF_COUNTRY_BUF_SZ]; + __le32 rev; + char ccode[INFF_COUNTRY_BUF_SZ]; +}; + +/** + * struct inff_rev_info_le - device revision info. + * + * @vendorid: PCI vendor id. + * @deviceid: device id of chip. + * @radiorev: radio revision. + * @chiprev: chip revision. + * @corerev: core revision. + * @boardid: board identifier (usu. PCI sub-device id). + * @boardvendor: board vendor (usu. PCI sub-vendor id). + * @boardrev: board revision. + * @driverrev: driver version. + * @ucoderev: microcode version. + * @bus: bus type. + * @chipnum: chip number. + * @phytype: phy type. + * @phyrev: phy revision. + * @anarev: anacore rev. + * @chippkg: chip package info. + * @nvramrev: nvram revision number. + */ +struct inff_rev_info_le { + __le32 vendorid; + __le32 deviceid; + __le32 radiorev; + __le32 chiprev; + __le32 corerev; + __le32 boardid; + __le32 boardvendor; + __le32 boardrev; + __le32 driverrev; + __le32 ucoderev; + __le32 bus; + __le32 chipnum; + __le32 phytype; + __le32 phyrev; + __le32 anarev; + __le32 chippkg; + __le32 nvramrev; +}; + +/** + * struct inff_wlc_version_le - firmware revision info. + * + * @version: structure version. + * @length: structure length. + * @epi_ver_major: EPI major version + * @epi_ver_minor: EPI minor version + * @epi_ver_rc: EPI rc version + * @epi_ver_incr: EPI increment version + * @wlc_ver_major: WLC major version + * @wlc_ver_minor: WLC minor version + */ +struct inff_wlc_version_le { + __le16 version; + __le16 length; + + __le16 epi_ver_major; + __le16 epi_ver_minor; + __le16 epi_ver_rc; + __le16 epi_ver_incr; + + __le16 wlc_ver_major; + __le16 wlc_ver_minor; +}; + +/** + * struct inff_assoclist_le - request assoc list. + * + * @count: indicates number of stations. + * @mac: MAC addresses of stations. + */ +struct inff_assoclist_le { + __le32 count; + u8 mac[INFF_MAX_ASSOCLIST][ETH_ALEN]; +}; + +/** + * struct inff_rssi_be - RSSI threshold event format + * + * @rssi: receive signal strength (in dBm) + * @snr: signal-noise ratio + * @noise: noise (in dBm) + */ +struct inff_rssi_be { + __be32 rssi; + __be32 snr; + __be32 noise; +}; + +#define INFF_MAX_RSSI_LEVELS 8 + +/** + * struct inff_rssi_event_le - rssi_event IOVAR format + * + * @rate_limit_msec: RSSI event rate limit + * @rssi_level_num: number of supplied RSSI levels + * @rssi_levels: RSSI levels in ascending order + */ +struct inff_rssi_event_le { + __le32 rate_limit_msec; + s8 rssi_level_num; + s8 rssi_levels[INFF_MAX_RSSI_LEVELS]; +}; + +/** + * struct inff_wowl_wakeind_le - Wakeup indicators + * Note: note both fields contain same information. + * + * @pci_wakeind: Whether PCI PMECSR PMEStatus bit was set. + * @ucode_wakeind: What wakeup-event indication was set by ucode + */ +struct inff_wowl_wakeind_le { + __le32 pci_wakeind; + __le32 ucode_wakeind; +}; + +/** + * struct inff_pmksa - PMK Security Association + * + * @bssid: The AP's BSSID. + * @pmkid: he PMK material itself. + */ +struct inff_pmksa { + u8 bssid[ETH_ALEN]; + u8 pmkid[WLAN_PMKID_LEN]; +}; + +/** + * struct inff_pmk_list_le - List of pmksa's. + * + * @npmk: Number of pmksa's. + * @pmk: PMK SA information. + */ +struct inff_pmk_list_le { + __le32 npmk; + struct inff_pmksa pmk[INFF_MAXPMKID]; +}; + +/** + * struct inff_pno_param_le - PNO scan configuration parameters + * + * @version: PNO parameters version. + * @scan_freq: scan frequency. + * @lost_network_timeout: #sec. to declare discovered network as lost. + * @flags: Bit field to control features of PFN such as sort criteria auto + * enable switch and background scan. + * @rssi_margin: Margin to avoid jitter for choosing a PFN based on RSSI sort + * criteria. + * @bestn: number of best networks in each scan. + * @mscan: number of scans recorded. + * @repeat: minimum number of scan intervals before scan frequency changes + * in adaptive scan. + * @exp: exponent of 2 for maximum scan interval. + * @slow_freq: slow scan period. + */ +struct inff_pno_param_le { + __le32 version; + __le32 scan_freq; + __le32 lost_network_timeout; + __le16 flags; + __le16 rssi_margin; + u8 bestn; + u8 mscan; + u8 repeat; + u8 exp; + __le32 slow_freq; +}; + +/** + * struct inff_pno_config_le - PNO channel configuration. + * + * @reporttype: determines what is reported. + * @channel_num: number of channels specified in @channel_list. + * @channel_list: channels to use in PNO scan. + * @flags: reserved. + */ +struct inff_pno_config_le { + __le32 reporttype; + __le32 channel_num; + __le16 channel_list[INFF_NUMCHANNELS]; + __le32 flags; +}; + +/** + * struct inff_pno_net_param_le - scan parameters per preferred network. + * + * @ssid: ssid name and its length. + * @flags: bit2: hidden. + * @infra: BSS vs IBSS. + * @auth: Open vs Closed. + * @wpa_auth: WPA type. + * @wsec: wsec value. + */ +struct inff_pno_net_param_le { + struct inff_ssid_le ssid; + __le32 flags; + __le32 infra; + __le32 auth; + __le32 wpa_auth; + __le32 wsec; +}; + +/** + * struct inff_pno_net_info_le - information per found network. + * + * @bssid: BSS network identifier. + * @channel: channel number only. + * @SSID_len: length of ssid. + * @SSID: ssid characters. + * @RSSI: receive signal strength (in dBm). + * @timestamp: age in seconds. + */ +struct inff_pno_net_info_le { + u8 bssid[ETH_ALEN]; + u8 channel; + u8 SSID_len; + u8 SSID[32]; + __le16 RSSI; + __le16 timestamp; +}; + +/** + * struct inff_pno_scanresults_le - result returned in PNO NET FOUND event. + * + * @version: PNO version identifier. + * @status: indicates completion status of PNO scan. + * @count: amount of inff_pno_net_info_le entries appended. + */ +struct inff_pno_scanresults_le { + __le32 version; + __le32 status; + __le32 count; +}; + +struct inff_pno_scanresults_v2_le { + __le32 version; + __le32 status; + __le32 count; + __le32 scan_ch_bucket; +}; + +/** + * struct inff_pno_macaddr_le - to configure PNO macaddr randomization. + * + * @version: PNO version identifier. + * @flags: Flags defining how mac addrss should be used. + * @mac: MAC address. + */ +struct inff_pno_macaddr_le { + u8 version; + u8 flags; + u8 mac[ETH_ALEN]; +}; + +/** + * struct inff_dload_data_le - data passing to firmware for downloading + * @flag: flags related to download data. + * @dload_type: type of download data. + * @len: length in bytes of download data. + * @crc: crc of download data. + * @data: download data. + */ +struct inff_dload_data_le { + __le16 flag; + __le16 dload_type; + __le32 len; + __le32 crc; + u8 data[]; +}; + +/** + * struct inff_pno_bssid_le - bssid configuration for PNO scan. + * + * @bssid: BSS network identifier. + * @flags: flags for this BSSID. + */ +struct inff_pno_bssid_le { + u8 bssid[ETH_ALEN]; + __le16 flags; +}; + +/** + * struct inff_pktcnt_le - packet counters. + * + * @rx_good_pkt: packets (MSDUs & MMPDUs) received from this station + * @rx_bad_pkt: failed rx packets + * @tx_good_pkt: packets (MSDUs & MMPDUs) transmitted to this station + * @tx_bad_pkt: failed tx packets + * @rx_ocast_good_pkt: unicast packets destined for others + */ +struct inff_pktcnt_le { + __le32 rx_good_pkt; + __le32 rx_bad_pkt; + __le32 tx_good_pkt; + __le32 tx_bad_pkt; + __le32 rx_ocast_good_pkt; +}; + +/** + * struct inff_gtk_keyinfo_le - GTP rekey data + * + * @kck: key confirmation key. + * @kek: key encryption key. + * @replay_counter: replay counter. + */ +struct inff_gtk_keyinfo_le { + u8 kck[INFF_RSN_KCK_LENGTH]; + u8 kek[INFF_RSN_KEK_LENGTH]; + u8 replay_counter[INFF_RSN_REPLAY_LEN]; +}; + +#define INFF_PNO_REPORT_NO_BATCH BIT(2) + +/** + * struct inff_gscan_bucket_config - configuration data for channel bucket. + * + * @bucket_end_index: last channel index in @channel_list in + * @struct inff_pno_config_le. + * @bucket_freq_multiple: scan interval expressed in N * @scan_freq. + * @flag: channel bucket report flags. + * @reserved: for future use. + * @repeat: number of scan at interval for exponential scan. + * @max_freq_multiple: maximum scan interval for exponential scan. + */ +struct inff_gscan_bucket_config { + u8 bucket_end_index; + u8 bucket_freq_multiple; + u8 flag; + u8 reserved; + __le16 repeat; + __le16 max_freq_multiple; +}; + +/* version supported which must match firmware */ +#define INFF_GSCAN_CFG_VERSION 2 + +/** + * enum inff_gscan_cfg_flags - bit values for gscan flags. + * + * @INFF_GSCAN_CFG_FLAGS_ALL_RESULTS: send probe responses/beacons to host. + * @INFF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN: all buckets will be included in + * first scan cycle. + * @INFF_GSCAN_CFG_FLAGS_CHANGE_ONLY: indicated only flags member is changed. + */ +enum inff_gscan_cfg_flags { + INFF_GSCAN_CFG_FLAGS_ALL_RESULTS = BIT(0), + INFF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN = BIT(3), + INFF_GSCAN_CFG_FLAGS_CHANGE_ONLY = BIT(7), +}; + +/** + * struct inff_gscan_config - configuration data for gscan. + * + * @version: version of the api to match firmware. + * @flags: flags according %enum inff_gscan_cfg_flags. + * @buffer_threshold: percentage threshold of buffer to generate an event. + * @swc_nbssid_threshold: number of BSSIDs with significant change that + * will generate an event. + * @swc_rssi_window_size: size of rssi cache buffer (max=8). + * @count_of_channel_buckets: number of array members in @bucket. + * @retry_threshold: !unknown! + * @lost_ap_window: !unknown! + * @bucket: array of channel buckets. + */ +struct inff_gscan_config { + __le16 version; + u8 flags; + u8 buffer_threshold; + u8 swc_nbssid_threshold; + u8 swc_rssi_window_size; + u8 count_of_channel_buckets; + u8 retry_threshold; + __le16 lost_ap_window; + struct inff_gscan_bucket_config bucket[] __counted_by(count_of_channel_buckets); +}; + +/** + * struct inff_mkeep_alive_pkt_le - configuration data for keep-alive frame. + * + * @version: version for mkeep_alive + * @length: length of fixed parameters in the structure. + * @period_msec: keep-alive period in milliseconds. + * @len_bytes: size of the data. + * @keep_alive_id: ID (0 - 3). + * @data: keep-alive frame data. + */ +struct inff_mkeep_alive_pkt_le { + __le16 version; + __le16 length; + __le32 period_msec; + __le16 len_bytes; + u8 keep_alive_id; + u8 data[]; +} __packed; + +/* INFF_E_RSSI event data */ +struct wl_event_data_rssi { + s32 rssi; + s32 snr; + s32 noise; +}; + +/** RSSI event notification configuration. */ +struct wl_rssi_event { + u32 rate_limit_msec; + u8 num_rssi_levels; + s8 rssi_levels[MAX_RSSI_LEVELS]; + u8 version; + s8 pad[2]; +}; + +struct ipv4_addr { + u8 addr[INFF_IPV4_ADDR_LEN]; +}; + +struct ipv6_addr { + u8 addr[INFF_IPV6_ADDR_LEN]; +}; + +#endif /* INFF_FWIL_TYPES_H */ -- 2.25.1 Driver implementation for registering and handling various types of asynchronous events generated by the Device firmware. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/fweh.c | 1552 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/fweh.h | 402 +++++ 2 files changed, 1954 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/fweh.c create mode 100644 drivers/net/wireless/infineon/inffmac/fweh.h diff --git a/drivers/net/wireless/infineon/inffmac/fweh.c b/drivers/net/wireless/infineon/inffmac/fweh.c new file mode 100644 index 000000000000..e8a5ae27a12c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fweh.c @@ -0,0 +1,1552 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include + +#include "chanspec.h" +#include "utils.h" +#include "cfg80211.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fweh.h" +#include "fwil.h" +#include "proto.h" +#include "common.h" +#include "offload.h" +#include "pmsr.h" +#include "twt.h" +#include "logger.h" + +/** + * struct inff_fweh_queue_item - event item on event queue. + * + * @q: list element for queuing. + * @code: event code. + * @ifidx: interface index related to this event. + * @ifaddr: ethernet address for interface. + * @emsg: common parameters of the firmware event message. + * @datalen: length of the data array + * @data: event specific data part of the firmware event. + */ +struct inff_fweh_queue_item { + struct list_head q; + enum inff_fweh_event_code code; + u8 ifidx; + u8 ifaddr[ETH_ALEN]; + struct inff_event_msg_be emsg; + u32 datalen; + u8 data[] __counted_by(datalen); +}; + +/* + * struct inff_fweh_event_name - code, name mapping entry. + */ +struct inff_fweh_event_name { + enum inff_fweh_event_code code; + const char *name; +}; + +#ifdef DEBUG +/* array for mapping code to event name */ +static struct inff_fweh_event_name fweh_event_names[] = { + {INFF_E_SET_SSID, "SET_SSID"}, + {INFF_E_JOIN, "JOIN"}, + {INFF_E_START, "START"}, + {INFF_E_AUTH, "AUTH"}, + {INFF_E_AUTH_IND, "AUTH_IND"}, + {INFF_E_DEAUTH, "DEAUTH"}, + {INFF_E_DEAUTH_IND, "DEAUTH_IND"}, + {INFF_E_ASSOC, "ASSOC"}, + {INFF_E_ASSOC_IND, "ASSOC_IND"}, + {INFF_E_REASSOC, "REASSOC"}, + {INFF_E_REASSOC_IND, "REASSOC_IND"}, + {INFF_E_DISASSOC, "DISASSOC"}, + {INFF_E_DISASSOC_IND, "DISASSOC_IND"}, + {INFF_E_QUIET_START, "QUIET_START"}, + {INFF_E_QUIET_END, "QUIET_END"}, + {INFF_E_BEACON_RX, "BEACON_RX"}, + {INFF_E_LINK, "LINK"}, + {INFF_E_MIC_ERROR, "MIC_ERROR"}, + {INFF_E_NDIS_LINK, "NDIS_LINK"}, + {INFF_E_ROAM, "ROAM"}, + {INFF_E_TXFAIL, "TXFAIL"}, + {INFF_E_PMKID_CACHE, "PMKID_CACHE"}, + {INFF_E_RETROGRADE_TSF, "RETROGRADE_TSF"}, + {INFF_E_PRUNE, "PRUNE"}, + {INFF_E_AUTOAUTH, "AUTOAUTH"}, + {INFF_E_EAPOL_MSG, "EAPOL_MSG"}, + {INFF_E_SCAN_COMPLETE, "SCAN_COMPLETE"}, + {INFF_E_ADDTS_IND, "ADDTS_IND"}, + {INFF_E_DELTS_IND, "DELTS_IND"}, + {INFF_E_BCNSENT_IND, "BCNSENT_IND"}, + {INFF_E_BCNRX_MSG, "BCNRX_MSG"}, + {INFF_E_BCNLOST_MSG, "BCNLOST_MSG"}, + {INFF_E_ROAM_PREP, "ROAM_PREP"}, + {INFF_E_PFN_NET_FOUND, "PFN_NET_FOUND"}, + {INFF_E_PFN_NET_LOST, "PFN_NET_LOST"}, + {INFF_E_RESET_COMPLETE, "RESET_COMPLETE"}, + {INFF_E_JOIN_START, "JOIN_START"}, + {INFF_E_ROAM_START, "ROAM_START"}, + {INFF_E_ASSOC_START, "ASSOC_START"}, + {INFF_E_IBSS_ASSOC, "IBSS_ASSOC"}, + {INFF_E_RADIO, "RADIO"}, + {INFF_E_PSM_WATCHDOG, "PSM_WATCHDOG"}, + {INFF_E_PROBREQ_MSG, "PROBREQ_MSG"}, + {INFF_E_SCAN_CONFIRM_IND, "SCAN_CONFIRM_IND"}, + {INFF_E_PSK_SUP, "PSK_SUP"}, + {INFF_E_COUNTRY_CODE_CHANGED, "COUNTRY_CODE_CHANGED"}, + {INFF_E_EXCEEDED_MEDIUM_TIME, "EXCEEDED_MEDIUM_TIME"}, + {INFF_E_ICV_ERROR, "ICV_ERROR"}, + {INFF_E_UNICAST_DECODE_ERROR, "UNICAST_DECODE_ERROR"}, + {INFF_E_MULTICAST_DECODE_ERROR, "MULTICAST_DECODE_ERROR"}, + {INFF_E_TRACE, "TRACE"}, + {INFF_E_IF, "IF"}, + {INFF_E_P2P_DISC_LISTEN_COMPLETE, "P2P_DISC_LISTEN_COMPLETE"}, + {INFF_E_RSSI, "RSSI"}, + {INFF_E_EXTLOG_MSG, "EXTLOG_MSG"}, + {INFF_E_ACTION_FRAME, "ACTION_FRAME"}, + {INFF_E_ACTION_FRAME_COMPLETE, "ACTION_FRAME_COMPLETE"}, + {INFF_E_PRE_ASSOC_IND, "PRE_ASSOC_IND"}, + {INFF_E_PRE_REASSOC_IND, "PRE_REASSOC_IND"}, + {INFF_E_CHANNEL_ADOPTED, "CHANNEL_ADOPTED"}, + {INFF_E_AP_STARTED, "AP_STARTED"}, + {INFF_E_DFS_AP_STOP, "DFS_AP_STOP"}, + {INFF_E_DFS_AP_RESUME, "DFS_AP_RESUME"}, + {INFF_E_ESCAN_RESULT, "ESCAN_RESULT"}, + {INFF_E_ACTION_FRAME_OFF_CHAN_COMPLETE, "ACTION_FRAME_OFF_CHAN_COMPLETE"}, + {INFF_E_PROBERESP_MSG, "PROBERESP_MSG"}, + {INFF_E_P2P_PROBEREQ_MSG, "P2P_PROBEREQ_MSG"}, + {INFF_E_DCS_REQUEST, "DCS_REQUEST"}, + {INFF_E_FIFO_CREDIT_MAP, "FIFO_CREDIT_MAP"}, + {INFF_E_ACTION_FRAME_RX, "ACTION_FRAME_RX"}, + {INFF_E_SA_COMPLETE_IND, "SA_COMPLETE_IND"}, + {INFF_E_ASSOC_REQ_IE, "ASSOC_REQ_IE"}, + {INFF_E_ASSOC_RESP_IE, "ASSOC_RESP_IE"}, + {INFF_E_TDLS_PEER_EVENT, "TDLS_PEER_EVENT"}, + {INFF_E_PROXD, "PROXD"}, + {INFF_E_BCMC_CREDIT_SUPPORT, "BCMC_CREDIT_SUPPORT"}, + {INFF_E_ULP, "ULP"}, + {INFF_E_TWT_SETUP, "TWT_SETUP"}, + {INFF_E_EXT_AUTH_REQ, "EXT_AUTH_REQ"}, + {INFF_E_EXT_AUTH_FRAME_RX, "EXT_AUTH_FRAME_RX"}, + {INFF_E_MGMT_FRAME_TXSTATUS, "MGMT_FRAME_TXSTATUS"}, + {INFF_E_MGMT_FRAME_OFF_CHAN_COMPLETE, "MGMT_FRAME_OFF_CHAN_COMPLETE"}, + {INFF_E_TWT_TEARDOWN, "TWT_TEARDOWN"}, + {INFF_E_EXT_ASSOC_FRAME_RX, "EXT_ASSOC_FRAME_RX"}, + {INFF_E_WLAN_SENSE_ENABLED, "WLAN_SENSE_ENABLED"}, + {INFF_E_WLAN_SENSE_DATA, "WLAN_SENSE_DATA"}, + {INFF_E_WLAN_SENSE_DISABLED, "WLAN_SENSE_DISABLED"}, + {INFF_E_ICMP_ECHO_REQ, "ICMP_ECHO_REQ"} +}; + +/** + * inff_fweh_event_name() - returns name for given event code. + * + * @code: code to lookup. + */ +const char *inff_fweh_event_name(enum inff_fweh_event_code code) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(fweh_event_names); i++) { + if (fweh_event_names[i].code == code) + return fweh_event_names[i].name; + } + return "unknown"; +} +#else +const char *inff_fweh_event_name(enum inff_fweh_event_code code) +{ + return "nodebug"; +} +#endif + +/** + * inff_fweh_queue_event() - create and queue event. + * + * @fweh: firmware event handling info. + * @event: event queue entry. + */ +static void inff_fweh_queue_event(struct inff_fweh_info *fweh, + struct inff_fweh_queue_item *event) +{ + ulong flags; + + spin_lock_irqsave(&fweh->evt_q_lock, flags); + list_add_tail(&event->q, &fweh->event_q); + spin_unlock_irqrestore(&fweh->evt_q_lock, flags); + schedule_work(&fweh->event_work); +} + +static int inff_fweh_call_event_handler(struct inff_pub *drvr, + struct inff_if *ifp, + enum inff_fweh_event_code code, + struct inff_event_msg *emsg, + void *data) +{ + struct inff_fweh_info *fweh; + int err = -EINVAL; + + if (ifp) { + fweh = &ifp->drvr->fweh; + + /* handle the event if valid interface and handler */ + if (fweh->evt_handler[code]) + err = fweh->evt_handler[code](ifp, emsg, data); + else + iphy_err(drvr, "unhandled event %d ignored\n", code); + } else { + iphy_err(drvr, "no interface object\n"); + } + return err; +} + +/** + * inff_fweh_handle_if_event() - handle IF event. + * + * @drvr: driver information object. + * @emsg: event message object. + * @data: event object. + */ +static void inff_fweh_handle_if_event(struct inff_pub *drvr, + struct inff_event_msg *emsg, + void *data) +{ + struct inff_if_event *ifevent = data; + struct inff_if *ifp; + bool is_p2pdev = false, is_wlan_sensedev = false; + + inff_dbg(EVENT, "action: %u ifidx: %u bsscfgidx: %u flags: %u role: %u\n", + ifevent->action, ifevent->ifidx, ifevent->bsscfgidx, + ifevent->flags, ifevent->role); + + /* The P2P Device interface event must not be ignored contrary to what + * firmware tells us. Older firmware uses p2p noif, with sta role. + * This should be accepted when p2pdev_setup is ongoing. TDLS setup will + * use the same ifevent and should be ignored. + */ + is_p2pdev = ((ifevent->flags & INFF_E_IF_FLAG_NOIF) && + (ifevent->role == INFF_E_IF_ROLE_P2P_CLIENT || + ((ifevent->role == INFF_E_IF_ROLE_STA) && + (drvr->fweh.p2pdev_setup_ongoing)))); + + is_wlan_sensedev = ((ifevent->role == INFF_E_IF_ROLE_WLAN_SENSE) && + (drvr->fweh.wlan_sensedev_setup_ongoing)); + + if (!is_p2pdev && !is_wlan_sensedev && + (ifevent->flags & INFF_E_IF_FLAG_NOIF)) { + inff_dbg(EVENT, "event can be ignored\n"); + return; + } + + if (ifevent->ifidx >= INFF_MAX_IFS) { + iphy_err(drvr, "invalid interface index: %u\n", ifevent->ifidx); + return; + } + + ifp = drvr->iflist[ifevent->bsscfgidx]; + + if (ifevent->action == INFF_E_IF_ADD) { + inff_dbg(EVENT, "adding %s (%pM)\n", emsg->ifname, + emsg->addr); + ifp = inff_add_if(drvr, ifevent->bsscfgidx, ifevent->ifidx, + emsg->ifname, emsg->addr, is_p2pdev, is_wlan_sensedev); + if (IS_ERR(ifp)) + return; + if (!is_p2pdev && !is_wlan_sensedev) + inff_proto_add_if(drvr, ifp); + if (!drvr->fweh.evt_handler[INFF_E_IF]) + if (inff_net_attach(ifp, false) < 0) + return; + } + + if (ifp && ifevent->action == INFF_E_IF_CHANGE) + inff_proto_reset_if(drvr, ifp); + + inff_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg, + data); + + if (ifp && ifevent->action == INFF_E_IF_DEL) { + bool armed = inff_cfg80211_vif_event_armed(drvr->config); + + /* Default handling in case no-one waits for this event */ + if (!armed) + inff_remove_interface(ifp, false); + } +} + +/** + * inff_fweh_dequeue_event() - get event from the queue. + * + * @fweh: firmware event handling info. + */ +static struct inff_fweh_queue_item * +inff_fweh_dequeue_event(struct inff_fweh_info *fweh) +{ + struct inff_fweh_queue_item *event = NULL; + ulong flags; + + spin_lock_irqsave(&fweh->evt_q_lock, flags); + if (!list_empty(&fweh->event_q)) { + event = list_first_entry(&fweh->event_q, + struct inff_fweh_queue_item, q); + list_del(&event->q); + } + spin_unlock_irqrestore(&fweh->evt_q_lock, flags); + + return event; +} + +/** + * inff_fweh_event_worker() - firmware event worker. + * + * @work: worker object. + */ +static void inff_fweh_event_worker(struct work_struct *work) +{ + struct inff_pub *drvr; + struct inff_if *ifp; + struct inff_fweh_info *fweh; + struct inff_fweh_queue_item *event; + int err = 0; + struct inff_event_msg_be *emsg_be; + struct inff_event_msg emsg; + + fweh = container_of(work, struct inff_fweh_info, event_work); + drvr = container_of(fweh, struct inff_pub, fweh); + + while ((event = inff_fweh_dequeue_event(fweh))) { + inff_dbg(EVENT, "event %s (%u) ifidx %u bsscfg %u addr %pM\n", + inff_fweh_event_name(event->code), event->code, + event->emsg.ifidx, event->emsg.bsscfgidx, + event->emsg.addr); + if (event->emsg.bsscfgidx >= INFF_MAX_IFS) { + iphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx); + goto event_free; + } + + /* convert event message */ + emsg_be = &event->emsg; + emsg.version = be16_to_cpu(emsg_be->version); + emsg.flags = be16_to_cpu(emsg_be->flags); + emsg.event_code = event->code; + emsg.status = be32_to_cpu(emsg_be->status); + emsg.reason = be32_to_cpu(emsg_be->reason); + emsg.auth_type = be32_to_cpu(emsg_be->auth_type); + emsg.datalen = be32_to_cpu(emsg_be->datalen); + memcpy(emsg.addr, emsg_be->addr, ETH_ALEN); + memcpy(emsg.ifname, emsg_be->ifname, sizeof(emsg.ifname)); + emsg.ifidx = emsg_be->ifidx; + emsg.bsscfgidx = emsg_be->bsscfgidx; + + inff_logring_fill(drvr, INFF_LOGRING_FW_EVENT, (u8 *)&emsg, + sizeof(emsg)); + + inff_dbg(EVENT, " version %u flags %u status %u reason %u\n", + emsg.version, emsg.flags, emsg.status, emsg.reason); + inff_dbg_hex_dump(INFF_EVENT_ON(), event->data, + min_t(u32, emsg.datalen, 64), + "event payload, len=%d\n", emsg.datalen); + + /* special handling of interface event */ + if (event->code == INFF_E_IF) { + inff_fweh_handle_if_event(drvr, &emsg, event->data); + goto event_free; + } + + if (event->code == INFF_E_TDLS_PEER_EVENT) + ifp = drvr->iflist[0]; + else + ifp = drvr->iflist[emsg.bsscfgidx]; + err = inff_fweh_call_event_handler(drvr, ifp, event->code, + &emsg, event->data); + if (err) { + iphy_err(drvr, "event handler failed (%d)\n", + event->code); + err = 0; + } +event_free: + kfree(event); + } +} + +/** + * inff_fweh_p2pdev_setup() - P2P device setup ongoing (or not). + * + * @ifp: ifp on which setup is taking place or finished. + * @ongoing: p2p device setup in progress (or not). + */ +void inff_fweh_p2pdev_setup(struct inff_if *ifp, bool ongoing) +{ + ifp->drvr->fweh.p2pdev_setup_ongoing = ongoing; +} + +/** + * inff_fweh_wlan_sensedev_setup() - WLAN Sense device setup ongoing (or not). + * + * @ifp: ifp on which setup is taking place or finished. + * @ongoing: WLAN sense device setup in progress (or not). + */ +void inff_fweh_wlan_sensedev_setup(struct inff_if *ifp, bool ongoing) +{ + ifp->drvr->fweh.wlan_sensedev_setup_ongoing = ongoing; +} + +static void inff_fweh_logring_event_print(u8 *data, u32 size) +{ + struct inff_logring_fw_event_item *evt_item = + (struct inff_logring_fw_event_item *)data; + struct inff_event_msg *emsg; + + if (size != sizeof(*evt_item)) + return; + + emsg = &evt_item->emsg; + + inff_dbg(INFO, "%llu: event %s (%u) ifidx %u bsscfg %u\n", + evt_item->timestamp, inff_fweh_event_name(emsg->event_code), + emsg->event_code, emsg->ifidx, emsg->bsscfgidx); +} + +/** + * inff_fweh_attach() - initialize firmware event handling. + * + * @drvr: driver information object. + */ +void inff_fweh_attach(struct inff_pub *drvr) +{ + struct inff_fweh_info *fweh = &drvr->fweh; + s32 ret; + + INIT_WORK(&fweh->event_work, inff_fweh_event_worker); + spin_lock_init(&fweh->evt_q_lock); + INIT_LIST_HEAD(&fweh->event_q); + + ret = inff_logring_init(drvr, INFF_LOGRING_FW_EVENT, + sizeof(struct inff_logring_fw_event_item), + inff_fweh_logring_event_print); + if (ret) + inff_err("Logger: FW_EVENT logring initialization failed ret=%d\n", ret); +} + +/** + * inff_fweh_detach() - cleanup firmware event handling. + * + * @drvr: driver information object. + */ +void inff_fweh_detach(struct inff_pub *drvr) +{ + struct inff_fweh_info *fweh = &drvr->fweh; + + inff_logring_deinit(drvr, INFF_LOGRING_FW_EVENT); + + /* cancel the worker if initialized */ + if (fweh->event_work.func) { + cancel_work_sync(&fweh->event_work); + WARN_ON(!list_empty(&fweh->event_q)); + memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler)); + } +} + +/** + * inff_fweh_register() - register handler for given event code. + * + * @drvr: driver information object. + * @code: event code. + * @handler: handler for the given event code. + */ +int inff_fweh_register(struct inff_pub *drvr, enum inff_fweh_event_code code, + inff_fweh_handler_t handler) +{ + if (drvr->fweh.evt_handler[code]) { + iphy_err(drvr, "event code %d already registered\n", code); + return -ENOSPC; + } + drvr->fweh.evt_handler[code] = handler; + inff_dbg(TRACE, "event handler registered for %s\n", + inff_fweh_event_name(code)); + return 0; +} + +/** + * inff_fweh_unregister() - remove handler for given code. + * + * @drvr: driver information object. + * @code: event code. + */ +void inff_fweh_unregister(struct inff_pub *drvr, + enum inff_fweh_event_code code) +{ + inff_dbg(TRACE, "event handler cleared for %s\n", + inff_fweh_event_name(code)); + drvr->fweh.evt_handler[code] = NULL; +} + +/** + * inff_fweh_activate_events() - enables firmware events registered. + * + * @ifp: primary interface object. + */ +int inff_fweh_activate_events(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + int i, err; + struct eventmsgs_ext *eventmask_msg; + u32 msglen; + + msglen = EVENTMSGS_EXT_STRUCT_SIZE + INFF_EVENTING_MASK_LEN; + eventmask_msg = kzalloc(msglen, GFP_KERNEL); + if (!eventmask_msg) + return -ENOMEM; + + for (i = 0; i < INFF_E_LAST; i++) { + if (ifp->drvr->fweh.evt_handler[i]) { + inff_dbg(EVENT, "enable event %s\n", + inff_fweh_event_name(i)); + setbit(eventmask_msg->mask, i); + } + } + + /* want to handle IF event as well */ + inff_dbg(EVENT, "enable event IF\n"); + setbit(eventmask_msg->mask, INFF_E_IF); + + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = INFF_EVENTING_MASK_LEN; + + err = inff_fil_iovar_data_set(ifp, "event_msgs_ext", eventmask_msg, + msglen); + if (!err) + goto end; + + err = inff_fil_iovar_data_set(ifp, "event_msgs", eventmask_msg->mask, + INFF_EVENTING_MASK_LEN); + if (err) + iphy_err(drvr, "Set event_msgs error (%d)\n", err); + +end: + kfree(eventmask_msg); + return err; +} + +/** + * inff_fweh_process_event() - process skb as firmware event. + * + * @drvr: driver information object. + * @event_packet: event packet to process. + * @packet_len: length of the packet + * @gfp: memory allocation flags. + * + * If the packet buffer contains a firmware event message it will + * dispatch the event to a registered handler (using worker). + */ +void inff_fweh_process_event(struct inff_pub *drvr, + struct inff_event *event_packet, + u32 packet_len, gfp_t gfp) +{ + enum inff_fweh_event_code code; + struct inff_fweh_info *fweh = &drvr->fweh; + struct inff_fweh_queue_item *event; + void *data; + u32 datalen; + + /* get event info */ + code = get_unaligned_be32(&event_packet->msg.event_type); + datalen = get_unaligned_be32(&event_packet->msg.datalen); + data = &event_packet[1]; + + if (code >= INFF_E_LAST) + return; + + if (code != INFF_E_IF && !fweh->evt_handler[code]) + return; + + if (datalen > INFF_DCMD_MAXLEN || + datalen + sizeof(*event_packet) > packet_len) + return; + + event = kzalloc(struct_size(event, data, datalen), gfp); + if (!event) + return; + + event->datalen = datalen; + event->code = code; + event->ifidx = event_packet->msg.ifidx; + + /* use memcpy to get aligned event message */ + memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); + memcpy(event->data, data, datalen); + memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); + + inff_fweh_queue_event(fweh, event); +} + +void inff_fweh_process_skb(struct inff_pub *drvr, struct sk_buff *skb, u16 stype, + gfp_t gfp) +{ + struct inff_event *event_packet; + u16 subtype, usr_stype; + + /* only process events when protocol matches */ + if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL)) + return; + + if ((skb->len + ETH_HLEN) < sizeof(*event_packet)) + return; + + event_packet = (struct inff_event *)skb_mac_header(skb); + + /* check subtype if needed */ + if (unlikely(stype)) { + subtype = get_unaligned_be16(&event_packet->hdr.subtype); + if (subtype != stype) + return; + } + + if (memcmp("\x00\x10\x18", &event_packet->hdr.oui[0], + sizeof(event_packet->hdr.oui))) + return; + + /* final match on usr_subtype */ + usr_stype = get_unaligned_be16(&event_packet->hdr.usr_subtype); + if (usr_stype != INFILCP_INF_SUBTYPE_EVENT) + return; + + inff_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN, gfp); +} + +/* PFN result doesn't have all the info which are required by the supplicant + * (For e.g IEs) Do a target Escan so that sched scan results are reported + * via wl_inform_single_bss in the required format. Escan does require the + * scan request in the form of cfg80211_scan_request. For timebeing, create + * cfg80211_scan_request one out of the received PNO event. + */ +s32 +inff_notify_sched_scan_results(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_info *cfg = drvr->config; + struct inff_pno_net_info_le *netinfo, *netinfo_start; + struct cfg80211_scan_request *request = NULL; + struct wiphy *wiphy = cfg_to_wiphy(cfg); + int i, err = 0; + struct inff_pno_scanresults_le *pfn_result; + u32 bucket_map; + u32 result_count; + u32 status; + u32 datalen; + + inff_dbg(SCAN, "Enter\n"); + + if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) { + inff_dbg(SCAN, "Event data too small. Ignore\n"); + return 0; + } + + if (e->event_code == INFF_E_PFN_NET_LOST) { + inff_dbg(SCAN, "PFN NET LOST event. Do Nothing\n"); + return 0; + } + + pfn_result = (struct inff_pno_scanresults_le *)data; + result_count = le32_to_cpu(pfn_result->count); + status = le32_to_cpu(pfn_result->status); + + /* PFN event is limited to fit 512 bytes so we may get + * multiple NET_FOUND events. For now place a warning here. + */ + WARN_ON(status != INFF_PNO_SCAN_COMPLETE); + inff_dbg(SCAN, "PFN NET FOUND event. count: %d\n", result_count); + if (!result_count) { + iphy_err(drvr, "FALSE PNO Event. (pfn_count == 0)\n"); + goto out_err; + } + + netinfo_start = inff_get_netinfo_array(pfn_result); + /* To make sure e->datalen is big enough */ + if (e->datalen >= ((void *)netinfo_start - (void *)pfn_result)) { + u32 cnt_sanity = ~0; + + datalen = e->datalen - ((void *)netinfo_start - (void *)pfn_result); + if (datalen < result_count * sizeof(*netinfo) || + (result_count > cnt_sanity / sizeof(*netinfo))) { + inff_err("insufficient event data\n"); + goto out_err; + } + } else { + inff_err("insufficient event data\n"); + goto out_err; + } + + request = inff_alloc_internal_escan_request(wiphy, + result_count); + if (!request) { + err = -ENOMEM; + goto out_err; + } + + bucket_map = 0; + for (i = 0; i < result_count; i++) { + netinfo = &netinfo_start[i]; + + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; + inff_dbg(SCAN, "SSID:%.32s Channel:%d\n", + netinfo->SSID, netinfo->channel); + bucket_map |= inff_pno_get_bucket_map(cfg->pno, netinfo); + err = inff_internal_escan_add_info(request, + netinfo->SSID, + netinfo->SSID_len, + netinfo->channel); + if (err) + goto out_err; + } + + if (!bucket_map) + goto free_req; + + err = inff_start_internal_escan(ifp, bucket_map, request); + if (!err) + goto free_req; + +out_err: + cfg80211_sched_scan_stopped(wiphy, 0); +free_req: + kfree(request); + return err; +} + +static s32 +inff_notify_connect_status_ap(struct inff_cfg80211_info *cfg, + struct net_device *ndev, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = cfg->pub; + static int generation; + u32 event = e->event_code; + u32 reason = e->reason; + struct station_info *sinfo; + + inff_dbg(CONN, "event %s (%u), reason %d\n", + inff_fweh_event_name(event), event, reason); + if (event == INFF_E_LINK && reason == INFF_E_REASON_LINK_BSSCFG_DIS && + ndev != cfg_to_ndev(cfg)) { + inff_dbg(CONN, "AP mode link down\n"); + complete(&cfg->vif_disabled); + return 0; + } + + if ((event == INFF_E_ASSOC_IND || event == INFF_E_REASSOC_IND) && + reason == INFF_E_STATUS_SUCCESS) { + if (!data) { + iphy_err(drvr, "No IEs present in ASSOC/REASSOC_IND\n"); + return -EINVAL; + } + + sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL); + if (!sinfo) + return -ENOMEM; + + sinfo->assoc_req_ies = data; + sinfo->assoc_req_ies_len = e->datalen; + generation++; + sinfo->generation = generation; + cfg80211_new_sta(ndev, e->addr, sinfo, GFP_KERNEL); + + kfree(sinfo); + } else if ((event == INFF_E_DISASSOC_IND) || + (event == INFF_E_DEAUTH_IND) || + (event == INFF_E_DEAUTH)) { + cfg80211_del_sta(ndev, e->addr, GFP_KERNEL); + } + return 0; +} + +s32 +inff_notify_roaming_status(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + u32 event = e->event_code; + u32 status = e->status; + + if (event == INFF_E_ROAM && status == INFF_E_STATUS_SUCCESS) { + if (test_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { + if (ifp->vif->profile.is_ft) + inff_bss_roaming_done(cfg, ifp->ndev, e); + } else { + inff_bss_connect_done(cfg, ifp->ndev, e, true); + inff_net_setcarrier(ifp, true); + } + } + + return 0; +} + +s32 +inff_notify_mic_status(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + u16 flags = e->flags; + enum nl80211_key_type key_type; + + if (flags & INFF_EVENT_MSG_GROUP) + key_type = NL80211_KEYTYPE_GROUP; + else + key_type = NL80211_KEYTYPE_PAIRWISE; + + cfg80211_michael_mic_failure(ifp->ndev, (u8 *)&e->addr, key_type, -1, + NULL, GFP_KERNEL); + + return 0; +} + +s32 inff_notify_rssi(struct inff_if *ifp, const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_vif *vif = ifp->vif; + struct inff_rssi_be *info = data; + s32 rssi, snr = 0, noise = 0; + s32 low, high, last; + + if (e->datalen >= sizeof(*info)) { + rssi = be32_to_cpu(info->rssi); + snr = be32_to_cpu(info->snr); + noise = be32_to_cpu(info->noise); + } else if (e->datalen >= sizeof(rssi)) { + rssi = be32_to_cpu(*(__be32 *)data); + } else { + inff_err("insufficient RSSI event data\n"); + return 0; + } + + low = vif->cqm_rssi_low; + high = vif->cqm_rssi_high; + last = vif->cqm_rssi_last; + + inff_dbg(TRACE, "rssi=%d snr=%d noise=%d low=%d high=%d last=%d\n", + rssi, snr, noise, low, high, last); + + vif->cqm_rssi_last = rssi; + + if (rssi <= low || rssi == 0) { + inff_dbg(INFO, "LOW rssi=%d\n", rssi); + cfg80211_cqm_rssi_notify(ifp->ndev, + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW, + rssi, GFP_KERNEL); + } else if (rssi > high) { + inff_dbg(INFO, "HIGH rssi=%d\n", rssi); + cfg80211_cqm_rssi_notify(ifp->ndev, + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH, + rssi, GFP_KERNEL); + } + + return 0; +} + +s32 inff_notify_vif_event(struct inff_if *ifp, const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_if_event *ifevent = (struct inff_if_event *)data; + struct inff_cfg80211_vif_event *event = &cfg->vif_event; + struct inff_cfg80211_vif *vif; + enum nl80211_iftype iftype = NL80211_IFTYPE_UNSPECIFIED; + bool vif_pend = false; + int err; + + inff_dbg(TRACE, "Enter: action %u ifidx %u bsscfgidx %u flags %u role %u\n", + ifevent->action, ifevent->ifidx, ifevent->bsscfgidx, ifevent->flags, + ifevent->role); + + spin_lock(&event->vif_event_lock); + + event->action = ifevent->action; + vif = event->vif; + + switch (ifevent->action) { + case INFF_E_IF_ADD: + /* waiting process may have timed out */ + if (!vif) { + /* handle IF_ADD event from firmware */ + spin_unlock(&event->vif_event_lock); + vif_pend = true; + if (ifevent->role == WL_INTERFACE_CREATE_STA) + iftype = NL80211_IFTYPE_STATION; + else if (ifevent->role == WL_INTERFACE_CREATE_AP) + iftype = NL80211_IFTYPE_AP; + else + vif_pend = false; + + if (vif_pend) { + vif = inff_alloc_vif(cfg, iftype); + if (IS_ERR(vif)) { + inff_err("Role:%d failed to alloc vif\n", + ifevent->role); + return PTR_ERR(vif); + } + } else { + inff_err("Invalid Role:%d\n", ifevent->role); + return -EBADF; + } + } + + ifp->vif = vif; + vif->ifp = ifp; + if (ifp->ndev) { + vif->wdev.netdev = ifp->ndev; + ifp->ndev->ieee80211_ptr = &vif->wdev; + SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); + } + + if (vif_pend) { + err = inff_net_attach(ifp, false); + if (err) { + inff_err("netdevice register failed with err:%d\n", + err); + inff_free_vif(vif); + free_netdev(ifp->ndev); + } + return err; + } + + spin_unlock(&event->vif_event_lock); + wake_up(&event->vif_wq); + return 0; + + case INFF_E_IF_DEL: + spin_unlock(&event->vif_event_lock); + /* event may not be upon user request */ + if (inff_cfg80211_vif_event_armed(cfg)) + wake_up(&event->vif_wq); + return 0; + + case INFF_E_IF_CHANGE: + spin_unlock(&event->vif_event_lock); + wake_up(&event->vif_wq); + return 0; + + default: + spin_unlock(&event->vif_event_lock); + break; + } + return -EINVAL; +} + +s32 +inff_notify_ext_auth_request(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct cfg80211_external_auth_params params; + struct inff_auth_req_status_le *auth_req = + (struct inff_auth_req_status_le *)data; + s32 err = 0; + struct inff_auth_req_status_info_le_v2 *auth_req_v2 = NULL; + struct inff_bss_info_le *bi = NULL; + struct inff_cfg80211_info *cfg = drvr->config; + + inff_dbg(INFO, "Enter: event %s (%d) received\n", + inff_fweh_event_name(e->event_code), e->event_code); + + if (drvr->wlc_ver.wlc_ver_major > INFF_AUTH_STATUS_V2_FW_MAJOR || + (drvr->wlc_ver.wlc_ver_major == INFF_AUTH_STATUS_V2_FW_MAJOR && + drvr->wlc_ver.wlc_ver_minor >= INFF_AUTH_STATUS_V2_FW_MINOR)) { + auth_req_v2 = (struct inff_auth_req_status_info_le_v2 *)data; + if (e->datalen < sizeof(*auth_req_v2)) { + inff_err("Ext auth req event data too small. Ignoring event\n"); + return -EINVAL; + } + /* Inform bss info to cfg80211 layer as during roaming + * Supplicant might not have scan results,if scan results + * are not found the SAE auth uses HNP by default and + * Target AP will reject the connection. + */ + if (e->datalen > sizeof(*auth_req_v2)) { + bi = (struct inff_bss_info_le *)&auth_req_v2->bss_info_le; + if (bi) { + err = inff_inform_single_bss(cfg, bi); + if (err) { + inff_err("failed to update bss info, err=%d\n", err); + return err; + } + } else { + inff_err("External Auth request bss info is null\n"); + return -EINVAL; + } + } + /* 10 ms delay to update results in cfg80211 */ + inff_delay(10); + memset(¶ms, 0, sizeof(params)); + params.action = NL80211_EXTERNAL_AUTH_START; + params.key_mgmt_suite = ntohl(WLAN_AKM_SUITE_SAE); + params.status = WLAN_STATUS_SUCCESS; + params.ssid.ssid_len = min_t(u32, IEEE80211_MAX_SSID_LEN, auth_req_v2->ssid_len); + memcpy(params.ssid.ssid, auth_req_v2->ssid, params.ssid.ssid_len); + memcpy(params.bssid, auth_req_v2->peer_mac, ETH_ALEN); + } else { + if (e->datalen < sizeof(*auth_req)) { + iphy_err(drvr, "Event %s (%d) data too small. Ignore\n", + inff_fweh_event_name(e->event_code), e->event_code); + return -EINVAL; + } + + memset(¶ms, 0, sizeof(params)); + params.action = NL80211_EXTERNAL_AUTH_START; + params.key_mgmt_suite = ntohl(WLAN_AKM_SUITE_SAE); + params.status = WLAN_STATUS_SUCCESS; + params.ssid.ssid_len = min_t(u32, 32, le32_to_cpu(auth_req->ssid_len)); + memcpy(params.ssid.ssid, auth_req->ssid, params.ssid.ssid_len); + memcpy(params.bssid, auth_req->peer_mac, ETH_ALEN); + } + err = cfg80211_external_auth_request(ifp->ndev, ¶ms, GFP_ATOMIC); + if (err) + iphy_err(drvr, "Ext Auth request to supplicant failed (%d)\n", + err); + + return err; +} + +s32 +inff_notify_auth_frame_rx(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_info *cfg = drvr->config; + struct wireless_dev *wdev; + u32 mgmt_frame_len = e->datalen - sizeof(struct inff_rx_mgmt_data); + struct inff_rx_mgmt_data *rxframe = (struct inff_rx_mgmt_data *)data; + u8 *frame = (u8 *)(rxframe + 1); + struct inff_chan ch; + struct ieee80211_mgmt *mgmt_frame; + s32 freq; + + inff_dbg(INFO, "Enter: event %s (%d) received\n", + inff_fweh_event_name(e->event_code), e->event_code); + + if (e->datalen < sizeof(*rxframe)) { + iphy_err(drvr, "Event %s (%d) data too small. Ignore\n", + inff_fweh_event_name(e->event_code), e->event_code); + return -EINVAL; + } + + wdev = &ifp->vif->wdev; + WARN_ON(!wdev); + + ch.chspec = be16_to_cpu(rxframe->chanspec); + cfg->d11inf.decchspec(&ch); + + mgmt_frame = kzalloc(mgmt_frame_len, GFP_KERNEL); + if (!mgmt_frame) + return -ENOMEM; + + mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH); + memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN); + memcpy(mgmt_frame->sa, e->addr, ETH_ALEN); + inff_fil_cmd_data_get(ifp, INFF_C_GET_BSSID, mgmt_frame->bssid, + ETH_ALEN); + frame += offsetof(struct ieee80211_mgmt, u); + unsafe_memcpy(&mgmt_frame->u, frame, + mgmt_frame_len - offsetof(struct ieee80211_mgmt, u), + /* alloc enough buf*/); + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, + inff_d11_chan_band_to_nl80211(ch.band)); + + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, + NL80211_RXMGMT_FLAG_EXTERNAL_AUTH); + kfree(mgmt_frame); + return 0; +} + +s32 +inff_notify_mgmt_tx_status(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_vif *vif = ifp->vif; + u32 *packet_id = (u32 *)data; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + if (!test_bit(INFF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status) || + (*packet_id != vif->mgmt_tx_id)) + return 0; + + if (e->event_code == INFF_E_MGMT_FRAME_TXSTATUS) { + if (e->status == INFF_E_STATUS_SUCCESS) + set_bit(INFF_MGMT_TX_ACK, &vif->mgmt_tx_status); + else + set_bit(INFF_MGMT_TX_NOACK, &vif->mgmt_tx_status); + } else { + set_bit(INFF_MGMT_TX_OFF_CHAN_COMPLETED, &vif->mgmt_tx_status); + } + + complete(&vif->mgmt_tx); + return 0; +} + +s32 +inff_notify_rssi_change_ind(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct wl_event_data_rssi *value = (struct wl_event_data_rssi *)data; + s32 rssi = 0; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + if (!cfg->cqm_info.enable) + return 0; + + rssi = ntohl(value->rssi); + inff_dbg(TRACE, "rssi: %d, threshold: %d, send event(%s)\n", + rssi, cfg->cqm_info.rssi_threshold, + rssi > cfg->cqm_info.rssi_threshold ? "HIGH" : "LOW"); + cfg80211_cqm_rssi_notify(cfg_to_ndev(cfg), + (rssi > cfg->cqm_info.rssi_threshold ? + NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH : + NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW), + rssi, GFP_KERNEL); + + return 0; +} + +s32 +inff_notify_beacon_loss(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct cfg80211_bss *bss; + struct net_device *ndev = ifp->ndev; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + switch (ifp->drvr->settings->roamoff) { + case INFF_ROAMOFF_EN_BCNLOST_MSG: + /* On beacon loss event, Supplicant triggers new scan request + * with NL80211_SCAN_FLAG_FLUSH Flag set, but lost AP bss entry + * still remained as it is held by cfg as associated. Unlinking this + * current BSS from cfg cached bss list on beacon loss event here, + * would allow supplicant to receive new scanned entries + * without current bss and select new bss to trigger roam. + */ + bss = cfg80211_get_bss(cfg->wiphy, NULL, profile->bssid, 0, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + if (bss) { + cfg80211_unlink_bss(cfg->wiphy, bss); + cfg80211_put_bss(cfg->wiphy, bss); + } + + cfg80211_cqm_beacon_loss_notify(cfg_to_ndev(cfg), GFP_KERNEL); + break; + case INFF_ROAMOFF_EN_DISCONNECT_EVT: + inff_link_down(ifp->vif, + WLAN_REASON_UNSPECIFIED, + true); + inff_init_prof(ndev_to_prof(ndev)); + if (ndev != cfg_to_ndev(cfg)) + complete(&cfg->vif_disabled); + inff_net_setcarrier(ifp, false); + break; + + case INFF_ROAMOFF_DISABLE: + default: + break; + } + + return 0; +} + +s32 +inff_notify_ext_assoc_frame_rx(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct net_device *ndev = ifp->ndev; + u32 mgmt_frame_len = e->datalen - sizeof(struct inff_rx_mgmt_data); + struct inff_rx_mgmt_data *rxframe = (struct inff_rx_mgmt_data *)data; + u8 *frame = (u8 *)(rxframe + 1); + struct cfg80211_update_owe_info owe_info; + struct ieee80211_mgmt *mgmt_frame = (struct ieee80211_mgmt *)frame; + + inff_dbg(INFO, "Enter: event %s (%d) received\n", + inff_fweh_event_name(e->event_code), e->event_code); + + if (e->datalen < sizeof(*rxframe)) { + iphy_err(drvr, "Event %s (%d) data too small. Ignore\n", + inff_fweh_event_name(e->event_code), e->event_code); + return -EINVAL; + } + + memset(&owe_info, 0, sizeof(struct cfg80211_update_owe_info)); + owe_info.ie_len = mgmt_frame_len - offsetof(struct ieee80211_mgmt, u); + memcpy(owe_info.peer, e->addr, ETH_ALEN); + owe_info.ie = kzalloc(owe_info.ie_len, GFP_KERNEL); + owe_info.assoc_link_id = -1; + + if (!owe_info.ie) + return -ENOMEM; + + if (ieee80211_is_assoc_req(mgmt_frame->frame_control)) { + frame += offsetof(struct ieee80211_mgmt, u.assoc_req.variable); + } else if (ieee80211_is_reassoc_req(mgmt_frame->frame_control)) { + frame += offsetof(struct ieee80211_mgmt, u.reassoc_req.variable); + } else { + iphy_err(drvr, "unexpected FC:0x%x\n", mgmt_frame->frame_control); + return -EINVAL; + } + + memcpy((u8 *)owe_info.ie, frame, owe_info.ie_len); + cfg80211_update_owe_info_event(ndev, &owe_info, GFP_KERNEL); + + kfree(owe_info.ie); + return 0; +} + +s32 +inff_notify_assoc_req_ie(struct inff_if *ifp, const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + u32 event = e->event_code; + u32 status = e->status; + u32 req_len = e->datalen; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + if (event == INFF_E_ASSOC_REQ_IE && status == INFF_E_STATUS_SUCCESS) { + if (test_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state) || + test_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) { + inff_clear_assoc_req_ie(cfg); + + conn_info->req_ie_len = req_len; + conn_info->req_ie = kmemdup(data, conn_info->req_ie_len, GFP_KERNEL); + if (!conn_info->req_ie) + conn_info->req_ie_len = 0; + inff_dbg(CONN, "req ie (%p) req len (%d)\n", conn_info->req_ie, + conn_info->req_ie_len); + } else { + inff_err("No (Re)connection is in progress, Ignore event\n"); + } + } else { + inff_err("Failed to handle the event\n"); + } + + return 0; +} + +s32 +inff_notify_assoc_resp_ie(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + struct inff_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT]; + u32 event = e->event_code; + u32 status = e->status; + u32 resp_len = e->datalen; + s32 err = 0; + + inff_dbg(INFO, "Enter: event %s (%d), status=%d\n", + inff_fweh_event_name(e->event_code), e->event_code, + e->status); + + if (event == INFF_E_ASSOC_RESP_IE && status == INFF_E_STATUS_SUCCESS) { + if (test_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state) || + test_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) { + inff_clear_assoc_resp_ie(cfg); + + conn_info->resp_ie_len = resp_len; + conn_info->resp_ie = kmemdup(data, conn_info->resp_ie_len, GFP_KERNEL); + if (!conn_info->resp_ie) + conn_info->resp_ie_len = 0; + + inff_dbg(CONN, "resp ie (%p) resp len (%d)\n", conn_info->resp_ie, + conn_info->resp_ie_len); + err = inff_fil_iovar_data_get(ifp, "wme_ac_sta", + edcf_acparam_info, + sizeof(edcf_acparam_info)); + if (err) { + inff_err("could not get wme_ac_sta (%d)\n", err); + inff_clear_assoc_resp_ie(cfg); + } else { + inff_wifi_prioritize_acparams(edcf_acparam_info, + cfg->ac_priority); + } + } else { + inff_err("No (Re)connection is in progress, Ignore event\n"); + } + } else { + inff_err("Failed to handle the event\n"); + } + + return 0; +} + +static u16 inff_map_fw_linkdown_reason(const struct inff_event_msg *e) +{ + u16 reason; + + switch (e->event_code) { + case INFF_E_DEAUTH: + case INFF_E_DEAUTH_IND: + case INFF_E_DISASSOC_IND: + reason = e->reason; + break; + case INFF_E_LINK: + default: + reason = 0; + break; + } + return reason; +} + +s32 +inff_notify_connect_status(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct net_device *ndev = ifp->ndev; + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct ieee80211_channel *chan; + s32 err = 0; + + if (e->event_code == INFF_E_DEAUTH || + e->event_code == INFF_E_DEAUTH_IND || + e->event_code == INFF_E_DISASSOC_IND || + (e->event_code == INFF_E_LINK && !e->flags)) { + inff_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr); + } + + if (inff_is_apmode(ifp->vif)) { + if (e->event_code == INFF_E_ASSOC_IND || + e->event_code == INFF_E_REASSOC_IND) { + inff_findadd_sta(ifp, e->addr); + } else if ((e->event_code == INFF_E_DISASSOC_IND) || + (e->event_code == INFF_E_DEAUTH_IND) || + (e->event_code == INFF_E_DEAUTH)) { + inff_del_sta(ifp, e->addr); + } + err = inff_notify_connect_status_ap(cfg, ndev, e, data); + inff_mchan_config(cfg); + } else if (inff_is_linkup(ifp->vif, e)) { + inff_dbg(CONN, "Linkup\n"); + if (inff_is_ibssmode(ifp->vif)) { + inff_inform_ibss(cfg, ndev, e->addr); + chan = ieee80211_get_channel(cfg->wiphy, cfg->channel); + memcpy(profile->bssid, e->addr, ETH_ALEN); + cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL); + clear_bit(INFF_VIF_STATUS_CONNECTING, + &ifp->vif->sme_state); + set_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state); + } else { + if (e->event_code == INFF_E_LINK && !profile->is_ft && + test_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) { + inff_bss_roaming_done(cfg, ndev, e); + } else { + inff_bss_connect_done(cfg, ndev, e, true); + } + } + inff_net_setcarrier(ifp, true); + inff_mchan_config(cfg); + } else if (inff_is_linkdown(ifp->vif, e)) { + inff_dbg(CONN, "Linkdown\n"); + if (!inff_is_ibssmode(ifp->vif) && + (test_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state) || + test_bit(INFF_VIF_STATUS_CONNECTING, + &ifp->vif->sme_state))) { + if (test_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state) && + memcmp(profile->bssid, e->addr, ETH_ALEN)) + return err; + + inff_bss_connect_done(cfg, ndev, e, false); + inff_link_down(ifp->vif, + inff_map_fw_linkdown_reason(e), + e->event_code & + (INFF_E_DEAUTH_IND | + INFF_E_DISASSOC_IND) + ? false : true); + inff_init_prof(ndev_to_prof(ndev)); + if (ndev != cfg_to_ndev(cfg)) + complete(&cfg->vif_disabled); + inff_net_setcarrier(ifp, false); + } + inff_proto_cleanup_if(ifp->drvr, ifp); + } else if (inff_is_nonetwork(cfg, e)) { + if (inff_is_ibssmode(ifp->vif)) + clear_bit(INFF_VIF_STATUS_CONNECTING, + &ifp->vif->sme_state); + else + inff_bss_connect_done(cfg, ndev, e, false); + } + + return err; +} + +s32 +inff_notify_csa_completion_ind(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_cfg80211_info *cfg; + struct net_device *ndev; + struct wiphy *wiphy; + struct cfg80211_chan_def chandef; + struct wireless_dev *wdev; + + int error = 0; + + inff_dbg(TRACE, "Enter\n"); + + if (unlikely(e->status)) { + inff_err("status:0x%x\n", e->status); + return -EINVAL; + } + + if (!ifp) + return -EINVAL; + else if (!ifp->drvr) + return -EINVAL; + + cfg = ifp->drvr->config; + ndev = ifp->ndev; + wiphy = cfg_to_wiphy(cfg); + + if (!cfg || !ndev || !wiphy) + return -EINVAL; + + wdev = ndev->ieee80211_ptr; + if (!wdev) + return -EINVAL; + + if (!wdev->u.ibss.current_bss) { + inff_err("Got csa_completion event while no connection\n"); + return -EINVAL; + } + + memset(&chandef, 0, sizeof(chandef)); + /* Reuse cfg80211 call to get chandef */ + error = inff_cfg80211_get_channel(wiphy, wdev, 0, &chandef); + if (unlikely(error)) { + inff_err("Get chandef error: %d\n", error); + return -EINVAL; + } + + /* Send channel switch notification only for STA mode */ + if (wdev->iftype == NL80211_IFTYPE_STATION) { + cfg80211_ch_switch_notify(ndev, &chandef, 0); + inff_dbg(TRACE, "CSA sent upstream\n"); + } + + return 0; +} + +void inff_register_event_handlers(struct inff_cfg80211_info *cfg) +{ + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct wl_rssi_event rssi_event = {}; + int err = 0; + + /* get supported version from firmware side */ + err = inff_fil_iovar_data_get(ifp, "rssi_event", &rssi_event, + sizeof(rssi_event)); + if (err) + inff_err("fail to get supported rssi_event version, err=%d\n", err); + + inff_fweh_register(cfg->pub, INFF_E_LINK, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_DEAUTH_IND, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_DEAUTH, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_DISASSOC_IND, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_ASSOC_IND, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_REASSOC_IND, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_ROAM, + inff_notify_roaming_status); + inff_fweh_register(cfg->pub, INFF_E_MIC_ERROR, + inff_notify_mic_status); + inff_fweh_register(cfg->pub, INFF_E_SET_SSID, + inff_notify_connect_status); + inff_fweh_register(cfg->pub, INFF_E_PFN_NET_FOUND, + inff_notify_sched_scan_results); + inff_fweh_register(cfg->pub, INFF_E_SA_COMPLETE_IND, + inff_notify_csa_completion_ind); + inff_fweh_register(cfg->pub, INFF_E_IF, + inff_notify_vif_event); + inff_fweh_register(cfg->pub, INFF_E_P2P_PROBEREQ_MSG, + inff_p2p_notify_rx_mgmt_p2p_probereq); + inff_fweh_register(cfg->pub, INFF_E_P2P_DISC_LISTEN_COMPLETE, + inff_p2p_notify_listen_complete); + inff_fweh_register(cfg->pub, INFF_E_ACTION_FRAME_RX, + inff_p2p_notify_action_frame_rx); + inff_fweh_register(cfg->pub, INFF_E_ACTION_FRAME_COMPLETE, + inff_p2p_notify_action_tx_complete); + inff_fweh_register(cfg->pub, INFF_E_ACTION_FRAME_OFF_CHAN_COMPLETE, + inff_p2p_notify_action_tx_complete); + inff_fweh_register(cfg->pub, INFF_E_PSK_SUP, + inff_notify_connect_status); + if (rssi_event.version == WL_RSSI_EVENT_VERSION_NEW) + inff_fweh_register(cfg->pub, INFF_E_RSSI, + inff_notify_rssi_change_ind); + else if (rssi_event.version == WL_RSSI_EVENT_VERSION_OLD) + inff_fweh_register(cfg->pub, INFF_E_RSSI, + inff_notify_rssi); + inff_fweh_register(cfg->pub, INFF_E_EXT_AUTH_REQ, + inff_notify_ext_auth_request); + inff_fweh_register(cfg->pub, INFF_E_EXT_AUTH_FRAME_RX, + inff_notify_auth_frame_rx); + inff_fweh_register(cfg->pub, INFF_E_MGMT_FRAME_TXSTATUS, + inff_notify_mgmt_tx_status); + inff_fweh_register(cfg->pub, INFF_E_MGMT_FRAME_OFF_CHAN_COMPLETE, + inff_notify_mgmt_tx_status); + inff_fweh_register(cfg->pub, INFF_E_BCNLOST_MSG, + inff_notify_beacon_loss); + + if (inff_feat_is_enabled(ifp, INFF_FEAT_TWT)) { + inff_fweh_register(cfg->pub, INFF_E_TWT_SETUP, + inff_notify_twt_event); + inff_fweh_register(cfg->pub, INFF_E_TWT_TEARDOWN, + inff_notify_twt_event); + } + inff_fweh_register(cfg->pub, INFF_E_EXT_ASSOC_FRAME_RX, + inff_notify_ext_assoc_frame_rx); + inff_fweh_register(cfg->pub, INFF_E_ASSOC_REQ_IE, + inff_notify_assoc_req_ie); + inff_fweh_register(cfg->pub, INFF_E_ASSOC_RESP_IE, + inff_notify_assoc_resp_ie); + if (inff_feat_is_enabled(ifp, INFF_FEAT_FTM)) + inff_fweh_register(cfg->pub, INFF_E_PROXD, + inff_notify_ftm_evt); + + if (inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE)) { + inff_fweh_register(cfg->pub, INFF_E_WLAN_SENSE_ENABLED, + inff_notify_wlan_sense_event); + inff_fweh_register(cfg->pub, INFF_E_WLAN_SENSE_DATA, + inff_notify_wlan_sense_event); + inff_fweh_register(cfg->pub, INFF_E_WLAN_SENSE_DISABLED, + inff_notify_wlan_sense_event); + } + inff_fweh_register(cfg->pub, INFF_E_ICMP_ECHO_REQ, + inff_notify_icmp_echo_req_event); +} diff --git a/drivers/net/wireless/infineon/inffmac/fweh.h b/drivers/net/wireless/infineon/inffmac/fweh.h new file mode 100644 index 000000000000..fcbc00ba145d --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fweh.h @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FWEH_H +#define INFF_FWEH_H + +#include +#include +#include +#include + +/* formward declarations */ +struct inff_pub; +struct inff_if; +struct inff_cfg80211_info; + +/* list of firmware events */ +/* firmware event codes sent by the dongle */ +enum inff_fweh_event_code { + INFF_E_SET_SSID = 0, + INFF_E_JOIN = 1, + INFF_E_START = 2, + INFF_E_AUTH = 3, + INFF_E_AUTH_IND = 4, + INFF_E_DEAUTH = 5, + INFF_E_DEAUTH_IND = 6, + INFF_E_ASSOC = 7, + INFF_E_ASSOC_IND = 8, + INFF_E_REASSOC = 9, + INFF_E_REASSOC_IND = 10, + INFF_E_DISASSOC = 11, + INFF_E_DISASSOC_IND = 12, + INFF_E_QUIET_START = 13, + INFF_E_QUIET_END = 14, + INFF_E_BEACON_RX = 15, + INFF_E_LINK = 16, + INFF_E_MIC_ERROR = 17, + INFF_E_NDIS_LINK = 18, + INFF_E_ROAM = 19, + INFF_E_TXFAIL = 20, + INFF_E_PMKID_CACHE = 21, + INFF_E_RETROGRADE_TSF = 22, + INFF_E_PRUNE = 23, + INFF_E_AUTOAUTH = 24, + INFF_E_EAPOL_MSG = 25, + INFF_E_SCAN_COMPLETE = 26, + INFF_E_ADDTS_IND = 27, + INFF_E_DELTS_IND = 28, + INFF_E_BCNSENT_IND = 29, + INFF_E_BCNRX_MSG = 30, + INFF_E_BCNLOST_MSG = 31, + INFF_E_ROAM_PREP = 32, + INFF_E_PFN_NET_FOUND = 33, + INFF_E_PFN_NET_LOST = 34, + INFF_E_RESET_COMPLETE = 35, + INFF_E_JOIN_START = 36, + INFF_E_ROAM_START = 37, + INFF_E_ASSOC_START = 38, + INFF_E_IBSS_ASSOC = 39, + INFF_E_RADIO = 40, + INFF_E_PSM_WATCHDOG = 41, + INFF_E_PROBREQ_MSG = 44, + INFF_E_SCAN_CONFIRM_IND = 45, + INFF_E_PSK_SUP = 46, + INFF_E_COUNTRY_CODE_CHANGED = 47, + INFF_E_EXCEEDED_MEDIUM_TIME = 48, + INFF_E_ICV_ERROR = 49, + INFF_E_UNICAST_DECODE_ERROR = 50, + INFF_E_MULTICAST_DECODE_ERROR = 51, + INFF_E_TRACE = 52, + INFF_E_IF = 54, + INFF_E_P2P_DISC_LISTEN_COMPLETE = 55, + INFF_E_RSSI = 56, + INFF_E_EXTLOG_MSG = 58, + INFF_E_ACTION_FRAME = 59, + INFF_E_ACTION_FRAME_COMPLETE = 60, + INFF_E_PRE_ASSOC_IND = 61, + INFF_E_PRE_REASSOC_IND = 62, + INFF_E_CHANNEL_ADOPTED = 63, + INFF_E_AP_STARTED = 64, + INFF_E_DFS_AP_STOP = 65, + INFF_E_DFS_AP_RESUME = 66, + INFF_E_ESCAN_RESULT = 69, + INFF_E_ACTION_FRAME_OFF_CHAN_COMPLETE = 70, + INFF_E_PROBERESP_MSG = 71, + INFF_E_P2P_PROBEREQ_MSG = 72, + INFF_E_DCS_REQUEST = 73, + INFF_E_FIFO_CREDIT_MAP = 74, + INFF_E_ACTION_FRAME_RX = 75, + INFF_E_SA_COMPLETE_IND = 80, + INFF_E_ASSOC_REQ_IE = 87, + INFF_E_ASSOC_RESP_IE = 88, + INFF_E_TDLS_PEER_EVENT = 92, + INFF_E_PROXD = 109, + INFF_E_BCMC_CREDIT_SUPPORT = 127, + INFF_E_ULP = 146, + INFF_E_TWT_SETUP = 157, + INFF_E_EXT_AUTH_REQ = 187, + INFF_E_EXT_AUTH_FRAME_RX = 188, + INFF_E_MGMT_FRAME_TXSTATUS = 189, + INFF_E_MGMT_FRAME_OFF_CHAN_COMPLETE = 190, + INFF_E_TWT_TEARDOWN = 195, + INFF_E_EXT_ASSOC_FRAME_RX = 196, + INFF_E_WLAN_SENSE_ENABLED = 198, + INFF_E_WLAN_SENSE_DATA = 199, + INFF_E_WLAN_SENSE_DISABLED = 200, + INFF_E_ICMP_ECHO_REQ = 202, + /* this determines event mask length which must match + * minimum length check in device firmware so it is + * hard-coded here. + */ + INFF_E_LAST = 203 +}; + +#define INFF_EVENTING_MASK_LEN DIV_ROUND_UP(INFF_E_LAST, 8) + +/* flags field values in struct inff_event_msg */ +#define INFF_EVENT_MSG_LINK 0x01 +#define INFF_EVENT_MSG_FLUSHTXQ 0x02 +#define INFF_EVENT_MSG_GROUP 0x04 + +/* status field values in struct inff_event_msg */ +#define INFF_E_STATUS_SUCCESS 0 +#define INFF_E_STATUS_FAIL 1 +#define INFF_E_STATUS_TIMEOUT 2 +#define INFF_E_STATUS_NO_NETWORKS 3 +#define INFF_E_STATUS_ABORT 4 +#define INFF_E_STATUS_NO_ACK 5 +#define INFF_E_STATUS_UNSOLICITED 6 +#define INFF_E_STATUS_ATTEMPT 7 +#define INFF_E_STATUS_PARTIAL 8 +#define INFF_E_STATUS_NEWSCAN 9 +#define INFF_E_STATUS_NEWASSOC 10 +#define INFF_E_STATUS_11HQUIET 11 +#define INFF_E_STATUS_SUPPRESS 12 +#define INFF_E_STATUS_NOCHANS 13 +#define INFF_E_STATUS_CS_ABORT 15 +#define INFF_E_STATUS_ERROR 16 + +/* status field values for PSK_SUP event */ +#define INFF_E_STATUS_FWSUP_WAIT_M1 4 +#define INFF_E_STATUS_FWSUP_PREP_M2 5 +#define INFF_E_STATUS_FWSUP_COMPLETED 6 +#define INFF_E_STATUS_FWSUP_TIMEOUT 7 +#define INFF_E_STATUS_FWSUP_WAIT_M3 8 +#define INFF_E_STATUS_FWSUP_PREP_M4 9 +#define INFF_E_STATUS_FWSUP_WAIT_G1 10 +#define INFF_E_STATUS_FWSUP_PREP_G2 11 + +/* reason field values in struct inff_event_msg */ +#define INFF_E_REASON_INITIAL_ASSOC 0 +#define INFF_E_REASON_LOW_RSSI 1 +#define INFF_E_REASON_DEAUTH 2 +#define INFF_E_REASON_DISASSOC 3 +#define INFF_E_REASON_BCNS_LOST 4 +#define INFF_E_REASON_MINTXRATE 9 +#define INFF_E_REASON_TXFAIL 10 + +#define INFF_E_REASON_LINK_BSSCFG_DIS 4 +#define INFF_E_REASON_FAST_ROAM_FAILED 5 +#define INFF_E_REASON_DIRECTED_ROAM 6 +#define INFF_E_REASON_TSPEC_REJECTED 7 +#define INFF_E_REASON_BETTER_AP 8 + +#define INFF_E_REASON_TDLS_PEER_DISCOVERED 0 +#define INFF_E_REASON_TDLS_PEER_CONNECTED 1 +#define INFF_E_REASON_TDLS_PEER_DISCONNECTED 2 + +/* reason field values for PSK_SUP event */ +#define INFF_E_REASON_FWSUP_OTHER 0 +#define INFF_E_REASON_FWSUP_DECRYPT_KEY_DATA 1 +#define INFF_E_REASON_FWSUP_BAD_UCAST_WEP128 2 +#define INFF_E_REASON_FWSUP_BAD_UCAST_WEP40 3 +#define INFF_E_REASON_FWSUP_UNSUP_KEY_LEN 4 +#define INFF_E_REASON_FWSUP_PW_KEY_CIPHER 5 +#define INFF_E_REASON_FWSUP_MSG3_TOO_MANY_IE 6 +#define INFF_E_REASON_FWSUP_MSG3_IE_MISMATCH 7 +#define INFF_E_REASON_FWSUP_NO_INSTALL_FLAG 8 +#define INFF_E_REASON_FWSUP_MSG3_NO_GTK 9 +#define INFF_E_REASON_FWSUP_GRP_KEY_CIPHER 10 +#define INFF_E_REASON_FWSUP_GRP_MSG1_NO_GTK 11 +#define INFF_E_REASON_FWSUP_GTK_DECRYPT_FAIL 12 +#define INFF_E_REASON_FWSUP_SEND_FAIL 13 +#define INFF_E_REASON_FWSUP_DEAUTH 14 +#define INFF_E_REASON_FWSUP_WPA_PSK_TMO 15 +#define INFF_E_REASON_FWSUP_WPA_PSK_M1_TMO 16 +#define INFF_E_REASON_FWSUP_WPA_PSK_M3_TMO 17 + +/* action field values for inff_ifevent */ +#define INFF_E_IF_ADD 1 +#define INFF_E_IF_DEL 2 +#define INFF_E_IF_CHANGE 3 + +/* flag field values for inff_ifevent */ +#define INFF_E_IF_FLAG_NOIF 1 + +/* role field values for inff_ifevent */ +#define INFF_E_IF_ROLE_STA 0 +#define INFF_E_IF_ROLE_AP 1 +#define INFF_E_IF_ROLE_WDS 2 +#define INFF_E_IF_ROLE_P2P_GO 3 +#define INFF_E_IF_ROLE_P2P_CLIENT 4 +#define INFF_E_IF_ROLE_WLAN_SENSE 10 + +/** + * definitions for event packet validation. + */ +#define INFILCP_INF_SUBTYPE_EVENT 1 +#define INFILCP_SUBTYPE_VENDOR_LONG 32769 + +/** + * struct inff_ethhdr - Infineon specific ether header. + * + * @subtype: subtype for this packet. + * @length: TODO: length of appended data. + * @version: version indication. + * @oui: OUI of this packet. + * @usr_subtype: subtype for this OUI. + */ +struct inff_ethhdr { + __be16 subtype; + __be16 length; + u8 version; + u8 oui[3]; + __be16 usr_subtype; +} __packed; + +struct inff_event_msg_be { + __be16 version; + __be16 flags; + __be32 event_type; + __be32 status; + __be32 reason; + __be32 auth_type; + __be32 datalen; + u8 addr[ETH_ALEN]; + char ifname[IFNAMSIZ]; + u8 ifidx; + u8 bsscfgidx; +} __packed; + +/** + * struct inff_event - contents of Infineon event packet. + * + * @eth: standard ether header. + * @hdr: Infineon specific ether header. + * @msg: common part of the actual event message. + */ +struct inff_event { + struct ethhdr eth; + struct inff_ethhdr hdr; + struct inff_event_msg_be msg; +} __packed; + +/** + * struct inff_event_msg - firmware event message. + * + * @version: version information. + * @flags: event flags. + * @event_code: firmware event code. + * @status: status information. + * @reason: reason code. + * @auth_type: authentication type. + * @datalen: length of event data buffer. + * @addr: ether address. + * @ifname: interface name. + * @ifidx: interface index. + * @bsscfgidx: bsscfg index. + */ +struct inff_event_msg { + u16 version; + u16 flags; + u32 event_code; + u32 status; + u32 reason; + s32 auth_type; + u32 datalen; + u8 addr[ETH_ALEN]; + char ifname[IFNAMSIZ]; + u8 ifidx; + u8 bsscfgidx; +}; + +struct inff_if_event { + u8 ifidx; + u8 action; + u8 flags; + u8 bsscfgidx; + u8 role; +}; + +enum event_msgs_ext_command { + EVENTMSGS_NONE = 0, + EVENTMSGS_SET_BIT = 1, + EVENTMSGS_RESET_BIT = 2, + EVENTMSGS_SET_MASK = 3 +}; + +#define EVENTMSGS_VER 1 +#define EVENTMSGS_EXT_STRUCT_SIZE offsetof(struct eventmsgs_ext, mask[0]) + +/* len- for SET it would be mask size from the application to the firmware */ +/* for GET it would be actual firmware mask size */ +/* maxgetsize - is only used for GET. indicate max mask size that the */ +/* application can read from the firmware */ +struct eventmsgs_ext { + u8 ver; + u8 command; + u8 len; + u8 maxgetsize; + u8 mask[]; +}; + +typedef int (*inff_fweh_handler_t)(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data); + +/** + * struct inff_fweh_info - firmware event handling information. + * + * @p2pdev_setup_ongoing: P2P device creation in progress. + * @wlan_sensedev_setup_ongoing: WLAN Sense device creation in progress. + * @event_work: event worker. + * @evt_q_lock: lock for event queue protection. + * @event_q: event queue. + * @evt_handler: registered event handlers. + */ +struct inff_fweh_info { + bool p2pdev_setup_ongoing; + bool wlan_sensedev_setup_ongoing; + struct work_struct event_work; + spinlock_t evt_q_lock; /* lock for event queue protection */ + struct list_head event_q; + + int (*evt_handler[INFF_E_LAST])(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data); +}; + +const char *inff_fweh_event_name(enum inff_fweh_event_code code); + +void inff_fweh_attach(struct inff_pub *drvr); +void inff_fweh_detach(struct inff_pub *drvr); +int inff_fweh_register(struct inff_pub *drvr, enum inff_fweh_event_code code, + int (*handler)(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data)); +void inff_fweh_unregister(struct inff_pub *drvr, + enum inff_fweh_event_code code); +int inff_fweh_activate_events(struct inff_if *ifp); +void inff_fweh_process_event(struct inff_pub *drvr, + struct inff_event *event_packet, + u32 packet_len, gfp_t gfp); +void inff_fweh_process_skb(struct inff_pub *drvr, struct sk_buff *skb, u16 stype, + gfp_t gfp); +void inff_fweh_p2pdev_setup(struct inff_if *ifp, bool ongoing); +void inff_fweh_wlan_sensedev_setup(struct inff_if *ifp, bool ongoing); + +s32 inff_notify_connect_status(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 +inff_notify_sched_scan_results(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_roaming_status(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 +inff_notify_assoc_resp_ie(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_assoc_req_ie(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 +inff_notify_ext_assoc_frame_rx(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_beacon_loss(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_rssi_change_ind(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_mgmt_tx_status(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_auth_frame_rx(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_ext_auth_request(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_vif_event(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_rssi(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_mic_status(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 inff_notify_roaming_status(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +s32 +inff_notify_csa_completion_ind(struct inff_if *ifp, const struct inff_event_msg *e, void *data); + +void inff_register_event_handlers(struct inff_cfg80211_info *cfg); + +#endif /* INFF_FWEH_H */ -- 2.25.1 Implementation of the firmware signalling used for flow control during Data transmission between Host Driver and Device firmware via the SDIO bus. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/fwsignal.c | 2739 +++++++++++++++++ .../net/wireless/infineon/inffmac/fwsignal.h | 53 + 2 files changed, 2792 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/fwsignal.c create mode 100644 drivers/net/wireless/infineon/inffmac/fwsignal.h diff --git a/drivers/net/wireless/infineon/inffmac/fwsignal.c b/drivers/net/wireless/infineon/inffmac/fwsignal.c new file mode 100644 index 000000000000..985382576c34 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fwsignal.c @@ -0,0 +1,2739 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "chanspec.h" +#include "core.h" +#include "debug.h" +#include "bus.h" +#include "fwil.h" +#include "fwil_types.h" +#include "fweh.h" +#include "fwsignal.h" +#include "p2p.h" +#include "cfg80211.h" +#include "proto.h" +#include "bcdc.h" +#include "common.h" + +/** + * DOC: Firmware Signalling + * + * Firmware can send signals to host and vice versa, which are passed in the + * data packets using TLV based header. This signalling layer is on top of the + * BDC bus protocol layer. + */ +/* + * enum inff_fws_tlv_type - definition of tlv identifiers. + */ +enum inff_fws_tlv_type { + INFF_FWS_TYPE_MAC_OPEN = 1, + INFF_FWS_TYPE_MAC_CLOSE = 2, + INFF_FWS_TYPE_MAC_REQUEST_CREDIT = 3, + INFF_FWS_TYPE_TXSTATUS = 4, + INFF_FWS_TYPE_PKTTAG = 5, + INFF_FWS_TYPE_MACDESC_ADD = 6, + INFF_FWS_TYPE_MACDESC_DEL = 7, + INFF_FWS_TYPE_RSSI = 8, + INFF_FWS_TYPE_INTERFACE_OPEN = 9, + INFF_FWS_TYPE_INTERFACE_CLOSE = 10, + INFF_FWS_TYPE_FIFO_CREDITBACK = 11, + INFF_FWS_TYPE_PENDING_TRAFFIC_BMP = 12, + INFF_FWS_TYPE_MAC_REQUEST_PACKET = 13, + INFF_FWS_TYPE_HOST_REORDER_RXPKTS = 14, + INFF_FWS_TYPE_TRANS_ID = 18, + INFF_FWS_TYPE_COMP_TXSTATUS = 19, + INFF_FWS_TYPE_FILLER = 255, + INFF_FWS_TYPE_INVALID +}; + +#define INFF_FWS_TYPE_FIFO_CREDITBACK_V2_LEN 12 +#define INFF_FIFO_CREDITBACK_TX_OFFSET 6 + +/* + * enum inff_fws_tlv_len - definition of tlv lengths. + */ +enum inff_fws_tlv_len { + INFF_FWS_TYPE_MAC_OPEN_LEN = 1, + INFF_FWS_TYPE_MAC_CLOSE_LEN = 1, + INFF_FWS_TYPE_MAC_REQUEST_CREDIT_LEN = 2, + INFF_FWS_TYPE_TXSTATUS_LEN = 4, + INFF_FWS_TYPE_PKTTAG_LEN = 4, + INFF_FWS_TYPE_MACDESC_ADD_LEN = 8, + INFF_FWS_TYPE_MACDESC_DEL_LEN = 8, + INFF_FWS_TYPE_RSSI_LEN = 1, + INFF_FWS_TYPE_INTERFACE_OPEN_LEN = 1, + INFF_FWS_TYPE_INTERFACE_CLOSE_LEN = 1, + INFF_FWS_TYPE_FIFO_CREDITBACK_LEN = 6, + INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN = 2, + INFF_FWS_TYPE_MAC_REQUEST_PACKET_LEN = 3, + INFF_FWS_TYPE_HOST_REORDER_RXPKTS_LEN = 10, + INFF_FWS_TYPE_TRANS_ID_LEN = 6, + INFF_FWS_TYPE_COMP_TXSTATUS_LEN = 1, + INFF_FWS_TYPE_FILLER_LEN = 0 +}; + +/* AMPDU rx reordering definitions */ +#define INFF_RXREORDER_FLOWID_OFFSET 0 +#define INFF_RXREORDER_MAXIDX_OFFSET 2 +#define INFF_RXREORDER_FLAGS_OFFSET 4 +#define INFF_RXREORDER_CURIDX_OFFSET 6 +#define INFF_RXREORDER_EXPIDX_OFFSET 8 + +#define INFF_RXREORDER_DEL_FLOW 0x01 +#define INFF_RXREORDER_FLUSH_ALL 0x02 +#define INFF_RXREORDER_CURIDX_VALID 0x04 +#define INFF_RXREORDER_EXPIDX_VALID 0x08 +#define INFF_RXREORDER_NEW_HOLE 0x10 + +#ifdef DEBUG +/* + * inff_fws_tlv_names - array of tlv names. + */ +static struct { + enum inff_fws_tlv_type id; + const char *name; +} inff_fws_tlv_names[] = { + {1, "MAC_OPEN"}, + {2, "MAC_CLOSE"}, + {3, "MAC_REQUEST_CREDIT"}, + {4, "TXSTATUS"}, + {5, "PKTTAG"}, + {6, "MACDESC_ADD"}, + {7, "MACDESC_DEL"}, + {8, "RSSI"}, + {9, "INTERFACE_OPEN"}, + {10, "INTERFACE_CLOSE"}, + {11, "FIFO_CREDITBACK"}, + {12, "PENDING_TRAFFIC_BMP"}, + {13, "MAC_REQUEST_PACKET"}, + {14, "HOST_REORDER_RXPKTS"}, + {18, "TRANS_ID"}, + {19, "COMP_TXSTATUS"}, + {255, "FILLER"} +}; + +static const char *inff_fws_get_tlv_name(enum inff_fws_tlv_type id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(inff_fws_tlv_names); i++) + if (inff_fws_tlv_names[i].id == id) + return inff_fws_tlv_names[i].name; + + return "INVALID"; +} +#else +static const char *inff_fws_get_tlv_name(enum inff_fws_tlv_type id) +{ + return "NODEBUG"; +} +#endif /* DEBUG */ + +/* + * The PKTTAG tlv has additional bytes when firmware-signalling + * mode has REUSESEQ flag set. + */ +#define INFF_FWS_TYPE_SEQ_LEN 2 + +/* + * flags used to enable tlv signalling from firmware. + */ +#define INFF_FWS_FLAGS_RSSI_SIGNALS 0x0001 +#define INFF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002 +#define INFF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004 +#define INFF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008 +#define INFF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010 +#define INFF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020 +#define INFF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040 + +#define INFF_FWS_MAC_DESC_TABLE_SIZE 32 +#define INFF_FWS_MAC_DESC_ID_INVALID 0xff + +#define INFF_FWS_HOSTIF_FLOWSTATE_OFF 0 +#define INFF_FWS_HOSTIF_FLOWSTATE_ON 1 +#define INFF_FWS_FLOWCONTROL_HIWATER ((256 * 8) - 256) +#define INFF_FWS_FLOWCONTROL_LOWATER 256 +#define INFF_FWS_FLOWCONTROL_SHQUEUE_HIWATER 128 +#define INFF_FWS_FLOWCONTROL_SHQUEUE_LOWATER 64 + +#define INFF_FWS_PSQ_PREC_COUNT ((INFF_FWS_FIFO_COUNT + 1) * 2) +#define INFF_FWS_PSQ_LEN (256 * 8) +#define INFF_FWS_SHQUEUE_PSQ_LEN 256 + +#define INFF_FWS_HTOD_FLAG_PKTFROMHOST 0x01 +#define INFF_FWS_HTOD_FLAG_PKT_REQUESTED 0x02 + +#define INFF_FWS_RET_OK_NOSCHEDULE 0 +#define INFF_FWS_RET_OK_SCHEDULE 1 + +#define INFF_FWS_MODE_REUSESEQ_SHIFT 3 /* seq reuse */ +#define INFF_FWS_MODE_SET_REUSESEQ(x, val) { \ + typeof(x) _x = (x); \ + ((_x) = \ + ((_x) & ~(1 << INFF_FWS_MODE_REUSESEQ_SHIFT)) | \ + (((val) & 1) << INFF_FWS_MODE_REUSESEQ_SHIFT)); \ + } +#define INFF_FWS_MODE_GET_REUSESEQ(x) \ + (((x) >> INFF_FWS_MODE_REUSESEQ_SHIFT) & 1) + +/** + * enum inff_fws_skb_state - indicates processing state of skb. + * + * @INFF_FWS_SKBSTATE_NEW: sk_buff is newly arrived in the driver. + * @INFF_FWS_SKBSTATE_DELAYED: sk_buff had to wait on queue. + * @INFF_FWS_SKBSTATE_SUPPRESSED: sk_buff has been suppressed by firmware. + * @INFF_FWS_SKBSTATE_TIM: allocated for TIM update info. + */ +enum inff_fws_skb_state { + INFF_FWS_SKBSTATE_NEW, + INFF_FWS_SKBSTATE_DELAYED, + INFF_FWS_SKBSTATE_SUPPRESSED, + INFF_FWS_SKBSTATE_TIM +}; + +/** + * struct inff_skbuff_cb - control buffer associated with skbuff. + * + * @bus_flags: 2 bytes reserved for bus specific parameters + * @if_flags: holds interface index and packet related flags. + * @htod: host to device packet identifier (used in PKTTAG tlv). + * @htod_seq: this 16-bit is original seq number for every suppress packet. + * @state: transmit state of the packet. + * @mac: descriptor related to destination for this packet. + * + * This information is stored in control buffer struct sk_buff::cb, which + * provides 48 bytes of storage so this structure should not exceed that. + */ +struct inff_skbuff_cb { + u16 bus_flags; + u16 if_flags; + u32 htod; + u16 htod_seq; + enum inff_fws_skb_state state; + struct inff_fws_mac_descriptor *mac; +}; + +/* + * macro casting skbuff control buffer to struct inff_skbuff_cb. + */ +#define inff_skbcb(skb) ((struct inff_skbuff_cb *)((skb)->cb)) + +/* + * sk_buff control if flags + * + * b[11] - packet sent upon firmware request. + * b[10] - packet only contains signalling data. + * b[9] - packet is a tx packet. + * b[8] - packet used requested credit + * b[7] - interface in AP mode. + * b[3:0] - interface index. + */ +#define INFF_SKB_IF_FLAGS_REQUESTED_MASK 0x0800 +#define INFF_SKB_IF_FLAGS_REQUESTED_SHIFT 11 +#define INFF_SKB_IF_FLAGS_SIGNAL_ONLY_MASK 0x0400 +#define INFF_SKB_IF_FLAGS_SIGNAL_ONLY_SHIFT 10 +#define INFF_SKB_IF_FLAGS_TRANSMIT_MASK 0x0200 +#define INFF_SKB_IF_FLAGS_TRANSMIT_SHIFT 9 +#define INFF_SKB_IF_FLAGS_REQ_CREDIT_MASK 0x0100 +#define INFF_SKB_IF_FLAGS_REQ_CREDIT_SHIFT 8 +#define INFF_SKB_IF_FLAGS_IF_AP_MASK 0x0080 +#define INFF_SKB_IF_FLAGS_IF_AP_SHIFT 7 +#define INFF_SKB_IF_FLAGS_INDEX_MASK 0x000f +#define INFF_SKB_IF_FLAGS_INDEX_SHIFT 0 + +#define inff_skb_if_flags_set_field(skb, field, value) \ + inff_maskset16(&(inff_skbcb(skb)->if_flags), \ + INFF_SKB_IF_FLAGS_ ## field ## _MASK, \ + INFF_SKB_IF_FLAGS_ ## field ## _SHIFT, (value)) +#define inff_skb_if_flags_get_field(skb, field) \ + inff_maskget16(inff_skbcb(skb)->if_flags, \ + INFF_SKB_IF_FLAGS_ ## field ## _MASK, \ + INFF_SKB_IF_FLAGS_ ## field ## _SHIFT) + +/* + * sk_buff control packet identifier + * + * 32-bit packet identifier used in PKTTAG tlv from host to dongle. + * + * - Generated at the host driver + * - Seen as a generic sequence number by firmware except for the flags field. + * + * Generation : b[31] => generation number for this packet [host->fw] + * OR, current generation number [fw->host] + * Flags : b[30:27] => command, status flags + * FIFO-AC : b[26:24] => AC-FIFO id + * h-slot : b[23:8] => hanger-slot + * freerun : b[7:0] => A free running counter + */ +#define INFF_SKB_HTOD_TAG_GENERATION_MASK 0x80000000 +#define INFF_SKB_HTOD_TAG_GENERATION_SHIFT 31 +#define INFF_SKB_HTOD_TAG_FLAGS_MASK 0x78000000 +#define INFF_SKB_HTOD_TAG_FLAGS_SHIFT 27 +#define INFF_SKB_HTOD_TAG_FIFO_MASK 0x07000000 +#define INFF_SKB_HTOD_TAG_FIFO_SHIFT 24 +#define INFF_SKB_HTOD_TAG_HSLOT_MASK 0x00ffff00 +#define INFF_SKB_HTOD_TAG_HSLOT_SHIFT 8 +#define INFF_SKB_HTOD_TAG_FREERUN_MASK 0x000000ff +#define INFF_SKB_HTOD_TAG_FREERUN_SHIFT 0 + +#define inff_skb_htod_tag_set_field(skb, field, value) \ + inff_maskset32(&(inff_skbcb(skb)->htod), \ + INFF_SKB_HTOD_TAG_ ## field ## _MASK, \ + INFF_SKB_HTOD_TAG_ ## field ## _SHIFT, (value)) +#define inff_skb_htod_tag_get_field(skb, field) \ + inff_maskget32(inff_skbcb(skb)->htod, \ + INFF_SKB_HTOD_TAG_ ## field ## _MASK, \ + INFF_SKB_HTOD_TAG_ ## field ## _SHIFT) + +#define INFF_SKB_HTOD_SEQ_FROMFW_MASK 0x2000 +#define INFF_SKB_HTOD_SEQ_FROMFW_SHIFT 13 +#define INFF_SKB_HTOD_SEQ_FROMDRV_MASK 0x1000 +#define INFF_SKB_HTOD_SEQ_FROMDRV_SHIFT 12 +#define INFF_SKB_HTOD_SEQ_NR_MASK 0x0fff +#define INFF_SKB_HTOD_SEQ_NR_SHIFT 0 + +#define inff_skb_htod_seq_set_field(skb, field, value) \ + inff_maskset16(&(inff_skbcb(skb)->htod_seq), \ + INFF_SKB_HTOD_SEQ_ ## field ## _MASK, \ + INFF_SKB_HTOD_SEQ_ ## field ## _SHIFT, (value)) +#define inff_skb_htod_seq_get_field(skb, field) \ + inff_maskget16(inff_skbcb(skb)->htod_seq, \ + INFF_SKB_HTOD_SEQ_ ## field ## _MASK, \ + INFF_SKB_HTOD_SEQ_ ## field ## _SHIFT) + +#define INFF_FWS_TXSTAT_GENERATION_MASK 0x80000000 +#define INFF_FWS_TXSTAT_GENERATION_SHIFT 31 +#define INFF_FWS_TXSTAT_FLAGS_MASK 0x78000000 +#define INFF_FWS_TXSTAT_FLAGS_SHIFT 27 +#define INFF_FWS_TXSTAT_FIFO_MASK 0x07000000 +#define INFF_FWS_TXSTAT_FIFO_SHIFT 24 +#define INFF_FWS_TXSTAT_HSLOT_MASK 0x00FFFF00 +#define INFF_FWS_TXSTAT_HSLOT_SHIFT 8 +#define INFF_FWS_TXSTAT_FREERUN_MASK 0x000000FF +#define INFF_FWS_TXSTAT_FREERUN_SHIFT 0 + +#define inff_txstatus_get_field(txs, field) \ + inff_maskget32(txs, INFF_FWS_TXSTAT_ ## field ## _MASK, \ + INFF_FWS_TXSTAT_ ## field ## _SHIFT) + +/* How long to defer borrowing in jiffies */ +#define INFF_FWS_BORROW_DEFER_PERIOD (HZ / 10) + +/** + * enum inff_fws_txstatus - txstatus flag values. + * + * @INFF_FWS_TXSTATUS_DISCARD: + * host is free to discard the packet. + * @INFF_FWS_TXSTATUS_CORE_SUPPRESS: + * 802.11 core suppressed the packet. + * @INFF_FWS_TXSTATUS_FW_PS_SUPPRESS: + * firmware suppress the packet as device is already in PS mode. + * @INFF_FWS_TXSTATUS_FW_TOSSED: + * firmware tossed the packet. + * @INFF_FWS_TXSTATUS_FW_DISCARD_NOACK: + * firmware tossed the packet after retries. + * @INFF_FWS_TXSTATUS_FW_SUPPRESS_ACKED: + * firmware wrongly reported suppressed previously, now fixing to acked. + * @INFF_FWS_TXSTATUS_HOST_TOSSED: + * host tossed the packet. + */ +enum inff_fws_txstatus { + INFF_FWS_TXSTATUS_DISCARD, + INFF_FWS_TXSTATUS_CORE_SUPPRESS, + INFF_FWS_TXSTATUS_FW_PS_SUPPRESS, + INFF_FWS_TXSTATUS_FW_TOSSED, + INFF_FWS_TXSTATUS_FW_DISCARD_NOACK, + INFF_FWS_TXSTATUS_FW_SUPPRESS_ACKED, + INFF_FWS_TXSTATUS_HOST_TOSSED +}; + +enum inff_fws_fcmode { + INFF_FWS_FCMODE_NONE, + INFF_FWS_FCMODE_IMPLIED_CREDIT, + INFF_FWS_FCMODE_EXPLICIT_CREDIT +}; + +enum inff_fws_mac_desc_state { + INFF_FWS_STATE_OPEN = 1, + INFF_FWS_STATE_CLOSE +}; + +/** + * struct inff_fws_mac_descriptor - firmware signalling data per node/interface + * + * @name: name of the descriptor. + * @occupied: slot is in use. + * @mac_handle: handle for mac entry determined by firmware. + * @interface_id: interface index. + * @state: current state. + * @suppressed: mac entry is suppressed. + * @generation: generation bit. + * @ac_bitmap: ac queue bitmap. + * @requested_credit: credits requested by firmware. + * @requested_packet: packet requested by firmware. + * @ea: ethernet address. + * @seq: per-node free-running sequence. + * @psq: power-save queue. + * @transit_count: packet in transit to firmware. + * @suppr_transit_count: suppressed packet in transit to firmware. + * @send_tim_signal: if set tim signal will be sent. + * @traffic_pending_bmp: traffic pending bitmap. + * @traffic_lastreported_bmp: traffic last reported bitmap. + */ +struct inff_fws_mac_descriptor { + char name[16]; + u8 occupied; + u8 mac_handle; + u8 interface_id; + u8 state; + bool suppressed; + u8 generation; + u8 ac_bitmap; + u8 requested_credit; + u8 requested_packet; + u8 ea[ETH_ALEN]; + u8 seq[INFF_FWS_FIFO_COUNT]; + struct pktq psq; + int transit_count; + int suppr_transit_count; + bool send_tim_signal; + u8 traffic_pending_bmp; + u8 traffic_lastreported_bmp; +}; + +#define INFF_FWS_HANGER_MAXITEMS 3072 +#define INFF_BORROW_RATIO 3 + +/** + * enum inff_fws_hanger_item_state - state of hanger item. + * + * @INFF_FWS_HANGER_ITEM_STATE_FREE: item is free for use. + * @INFF_FWS_HANGER_ITEM_STATE_INUSE: item is in use. + * @INFF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED: item was suppressed. + */ +enum inff_fws_hanger_item_state { + INFF_FWS_HANGER_ITEM_STATE_FREE = 1, + INFF_FWS_HANGER_ITEM_STATE_INUSE, + INFF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED +}; + +/** + * struct inff_fws_hanger_item - single entry for tx pending packet. + * + * @state: entry is either free or occupied. + * @pkt: packet itself. + */ +struct inff_fws_hanger_item { + enum inff_fws_hanger_item_state state; + struct sk_buff *pkt; +}; + +/** + * struct inff_fws_hanger - holds packets awaiting firmware txstatus. + * + * @pushed: packets pushed to await txstatus. + * @popped: packets popped upon handling txstatus. + * @failed_to_push: packets that could not be pushed. + * @failed_to_pop: packets that could not be popped. + * @failed_slotfind: packets for which failed to find an entry. + * @slot_pos: last returned item index for a free entry. + * @items: array of hanger items. + */ +struct inff_fws_hanger { + u32 pushed; + u32 popped; + u32 failed_to_push; + u32 failed_to_pop; + u32 failed_slotfind; + u32 slot_pos; + struct inff_fws_hanger_item items[INFF_FWS_HANGER_MAXITEMS]; +}; + +struct inff_fws_macdesc_table { + struct inff_fws_mac_descriptor nodes[INFF_FWS_MAC_DESC_TABLE_SIZE]; + struct inff_fws_mac_descriptor iface[INFF_MAX_IFS]; + struct inff_fws_mac_descriptor other; +}; + +struct inff_fws_stats { + u32 tlv_parse_failed; + u32 tlv_invalid_type; + u32 header_only_pkt; + u32 header_pulls; + u32 pkt2bus; + u32 send_pkts[5]; + u32 requested_sent[5]; + u32 generic_error; + u32 mac_update_failed; + u32 mac_ps_update_failed; + u32 if_update_failed; + u32 packet_request_failed; + u32 credit_request_failed; + u32 rollback_success; + u32 rollback_failed; + u32 delayq_full_error; + u32 supprq_full_error; + u32 txs_indicate; + u32 txs_discard; + u32 txs_supp_core; + u32 txs_supp_ps; + u32 txs_tossed; + u32 txs_host_tossed; + u32 bus_flow_block; + u32 fws_flow_block; + u32 cnt_recv_err; + u32 cnt_cleanup_if; +}; + +struct inff_fws_info { + struct inff_pub *drvr; + spinlock_t spinlock; /* spinlock for fw signal resource protection */ + ulong flags; + struct inff_fws_stats stats; + struct inff_fws_hanger hanger; + enum inff_fws_fcmode fcmode; + bool fw_signals; + bool bcmc_credit_check; + struct inff_fws_macdesc_table desc; + struct workqueue_struct *fws_wq; + struct work_struct fws_dequeue_work; + u32 fifo_enqpkt[INFF_FWS_FIFO_COUNT]; + int fifo_credit[INFF_FWS_FIFO_COUNT]; + int init_fifo_credit[INFF_FWS_FIFO_COUNT]; + int credits_borrowed[INFF_FWS_FIFO_AC_VO + 1] + [INFF_FWS_FIFO_AC_VO + 1]; + int deq_node_pos[INFF_FWS_FIFO_COUNT]; + u32 fifo_credit_map; + u32 fifo_delay_map; + unsigned long borrow_defer_timestamp; + bool bus_flow_blocked; + bool creditmap_received; + bool credit_recover; + bool sdio_recv_error; + u8 mode; + bool avoid_queueing; + int fws_psq_len; + int fws_psq_hi_water; + int fws_psq_low_water; +}; + +/** + * inff_fws_get_tlv_len() - returns defined length for given tlv id. + * + * @fws: firmware-signalling information. + * @id: identifier of the TLV. + * + * Return: the specified length for the given TLV; Otherwise -EINVAL. + */ +static int inff_fws_get_tlv_len(struct inff_fws_info *fws, + enum inff_fws_tlv_type id) +{ + switch (id) { + case INFF_FWS_TYPE_MAC_OPEN: + return INFF_FWS_TYPE_MAC_OPEN_LEN; + case INFF_FWS_TYPE_MAC_CLOSE: + return INFF_FWS_TYPE_MAC_CLOSE_LEN; + case INFF_FWS_TYPE_MAC_REQUEST_CREDIT: + return INFF_FWS_TYPE_MAC_REQUEST_CREDIT_LEN; + case INFF_FWS_TYPE_TXSTATUS: + return INFF_FWS_TYPE_TXSTATUS_LEN; + case INFF_FWS_TYPE_PKTTAG: + return INFF_FWS_TYPE_PKTTAG_LEN; + case INFF_FWS_TYPE_MACDESC_ADD: + return INFF_FWS_TYPE_MACDESC_ADD_LEN; + case INFF_FWS_TYPE_MACDESC_DEL: + return INFF_FWS_TYPE_MACDESC_DEL_LEN; + case INFF_FWS_TYPE_RSSI: + return INFF_FWS_TYPE_RSSI_LEN; + case INFF_FWS_TYPE_INTERFACE_OPEN: + return INFF_FWS_TYPE_INTERFACE_OPEN_LEN; + case INFF_FWS_TYPE_INTERFACE_CLOSE: + return INFF_FWS_TYPE_INTERFACE_CLOSE_LEN; + case INFF_FWS_TYPE_FIFO_CREDITBACK: + return INFF_FWS_TYPE_FIFO_CREDITBACK_LEN; + case INFF_FWS_TYPE_PENDING_TRAFFIC_BMP: + return INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; + case INFF_FWS_TYPE_MAC_REQUEST_PACKET: + return INFF_FWS_TYPE_MAC_REQUEST_PACKET_LEN; + case INFF_FWS_TYPE_HOST_REORDER_RXPKTS: + return INFF_FWS_TYPE_HOST_REORDER_RXPKTS_LEN; + case INFF_FWS_TYPE_TRANS_ID: + return INFF_FWS_TYPE_TRANS_ID_LEN; + case INFF_FWS_TYPE_COMP_TXSTATUS: + return INFF_FWS_TYPE_COMP_TXSTATUS_LEN; + case INFF_FWS_TYPE_FILLER: + return INFF_FWS_TYPE_FILLER_LEN; + default: + fws->stats.tlv_invalid_type++; + break; + } + return -EINVAL; +} + +static void inff_fws_lock(struct inff_fws_info *fws) + __acquires(&fws->spinlock) +{ + spin_lock_irqsave(&fws->spinlock, fws->flags); +} + +static void inff_fws_unlock(struct inff_fws_info *fws) + __releases(&fws->spinlock) +{ + spin_unlock_irqrestore(&fws->spinlock, fws->flags); +} + +static bool inff_fws_ifidx_match(struct sk_buff *skb, void *arg) +{ + u32 ifidx = inff_skb_if_flags_get_field(skb, INDEX); + return ifidx == *(int *)arg; +} + +static void inff_fws_hanger_init(struct inff_fws_hanger *hanger) +{ + int i; + + memset(hanger, 0, sizeof(*hanger)); + for (i = 0; i < ARRAY_SIZE(hanger->items); i++) + hanger->items[i].state = INFF_FWS_HANGER_ITEM_STATE_FREE; +} + +static u32 inff_fws_hanger_get_free_slot(struct inff_fws_hanger *h) +{ + u32 i; + + i = (h->slot_pos + 1) % INFF_FWS_HANGER_MAXITEMS; + + while (i != h->slot_pos) { + if (h->items[i].state == INFF_FWS_HANGER_ITEM_STATE_FREE) { + h->slot_pos = i; + goto done; + } + i++; + if (i == INFF_FWS_HANGER_MAXITEMS) + i = 0; + } + inff_err("all slots occupied\n"); + h->failed_slotfind++; + i = INFF_FWS_HANGER_MAXITEMS; +done: + return i; +} + +static int inff_fws_hanger_pushpkt(struct inff_fws_hanger *h, + struct sk_buff *pkt, u32 slot_id) +{ + if (slot_id >= INFF_FWS_HANGER_MAXITEMS) + return -ENOENT; + + if (h->items[slot_id].state != INFF_FWS_HANGER_ITEM_STATE_FREE) { + inff_err("slot is not free\n"); + h->failed_to_push++; + return -EINVAL; + } + + h->items[slot_id].state = INFF_FWS_HANGER_ITEM_STATE_INUSE; + h->items[slot_id].pkt = pkt; + h->pushed++; + return 0; +} + +static inline int inff_fws_hanger_poppkt(struct inff_fws_hanger *h, + u32 slot_id, struct sk_buff **pktout, + bool remove_item) +{ + if (slot_id >= INFF_FWS_HANGER_MAXITEMS) + return -ENOENT; + + if (h->items[slot_id].state == INFF_FWS_HANGER_ITEM_STATE_FREE) { + inff_err("entry not in use\n"); + h->failed_to_pop++; + return -EINVAL; + } + + *pktout = h->items[slot_id].pkt; + if (remove_item) { + h->items[slot_id].state = INFF_FWS_HANGER_ITEM_STATE_FREE; + h->items[slot_id].pkt = NULL; + h->popped++; + } + return 0; +} + +static void +inff_fws_flow_control_check(struct inff_fws_info *fws, struct pktq *pq, + u8 if_id) +{ + struct inff_if *ifp = inff_get_ifp(fws->drvr, if_id); + + if (WARN_ON(!ifp)) + return; + + if ((ifp->netif_stop & INFF_NETIF_STOP_REASON_FWS_FC) && + pq->len <= fws->fws_psq_low_water) + inff_txflowblock_if(ifp, + INFF_NETIF_STOP_REASON_FWS_FC, false); + if (!(ifp->netif_stop & INFF_NETIF_STOP_REASON_FWS_FC) && + pq->len >= fws->fws_psq_hi_water) { + fws->stats.fws_flow_block++; + inff_txflowblock_if(ifp, INFF_NETIF_STOP_REASON_FWS_FC, true); + } +} + +static void inff_fws_psq_flush(struct inff_fws_info *fws, struct pktq *q, + int ifidx) +{ + struct inff_fws_hanger_item *hi; + bool (*matchfn)(struct sk_buff *, void *) = NULL; + struct sk_buff *skb; + int prec; + u32 hslot; + int skbidx; + + if (ifidx != -1) + matchfn = inff_fws_ifidx_match; + for (prec = 0; prec < q->num_prec; prec++) { + skb = inff_pktq_pdeq_match(q, prec, matchfn, &ifidx); + while (skb) { + hslot = inff_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = INFF_FWS_HANGER_ITEM_STATE_FREE; + inff_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + true); + skbidx = inff_skb_if_flags_get_field(skb, INDEX); + inff_fws_flow_control_check(fws, q, skbidx); + inff_pkt_buf_free_skb(skb); + skb = inff_pktq_pdeq_match(q, prec, matchfn, &ifidx); + } + } +} + +static int inff_fws_hanger_mark_suppressed(struct inff_fws_hanger *h, + u32 slot_id) +{ + if (slot_id >= INFF_FWS_HANGER_MAXITEMS) + return -ENOENT; + + if (h->items[slot_id].state == INFF_FWS_HANGER_ITEM_STATE_FREE) { + inff_err("entry not in use\n"); + return -EINVAL; + } + + h->items[slot_id].state = INFF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED; + return 0; +} + +static void inff_fws_hanger_cleanup(struct inff_fws_info *fws, + bool (*fn)(struct sk_buff *, void *), + int ifidx) +{ + struct inff_fws_hanger *h = &fws->hanger; + struct sk_buff *skb; + int i; + enum inff_fws_hanger_item_state s; + + for (i = 0; i < ARRAY_SIZE(h->items); i++) { + s = h->items[i].state; + if (s == INFF_FWS_HANGER_ITEM_STATE_INUSE || + s == INFF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED) { + skb = h->items[i].pkt; + if (!fn || fn(skb, &ifidx)) { + /* suppress packets freed from psq */ + if (s == INFF_FWS_HANGER_ITEM_STATE_INUSE) + inff_pkt_buf_free_skb(skb); + h->items[i].state = + INFF_FWS_HANGER_ITEM_STATE_FREE; + } + } + } +} + +static void inff_fws_macdesc_set_name(struct inff_fws_info *fws, + struct inff_fws_mac_descriptor *desc) +{ + if (desc == &fws->desc.other) + strscpy(desc->name, "MAC-OTHER", sizeof(desc->name)); + else if (desc->mac_handle) + scnprintf(desc->name, sizeof(desc->name), "MAC-%d:%d", + desc->mac_handle, desc->interface_id); + else + scnprintf(desc->name, sizeof(desc->name), "MACIF:%d", + desc->interface_id); +} + +static void inff_fws_macdesc_init(struct inff_fws_mac_descriptor *desc, + u8 *addr, u8 ifidx) +{ + inff_dbg(TRACE, + "enter: desc %p ea=%pM, ifidx=%u\n", desc, addr, ifidx); + desc->occupied = 1; + desc->state = INFF_FWS_STATE_OPEN; + desc->requested_credit = 0; + desc->requested_packet = 0; + /* depending on use may need ifp->bsscfgidx instead */ + desc->interface_id = ifidx; + desc->ac_bitmap = 0xff; /* update this when handling APSD */ + if (addr) + memcpy(&desc->ea[0], addr, ETH_ALEN); +} + +static void inff_fws_macdesc_reset(struct inff_fws_mac_descriptor *entry) +{ + int i; + + inff_fws_macdesc_init(entry, entry->ea, entry->interface_id); + entry->mac_handle = 0; + entry->suppressed = 0; + entry->transit_count = 0; + entry->suppr_transit_count = 0; + entry->generation = 0; + + for (i = 0; i < INFF_FWS_FIFO_COUNT; i++) + entry->seq[i] = 0; + + entry->send_tim_signal = 0; + entry->traffic_pending_bmp = 0; + entry->traffic_lastreported_bmp = 0; +} + +static +void inff_fws_macdesc_deinit(struct inff_fws_mac_descriptor *desc) +{ + inff_dbg(TRACE, + "enter: ea=%pM, ifidx=%u\n", desc->ea, desc->interface_id); + desc->occupied = 0; + desc->state = INFF_FWS_STATE_CLOSE; + desc->requested_credit = 0; + desc->requested_packet = 0; +} + +static struct inff_fws_mac_descriptor * +inff_fws_macdesc_lookup(struct inff_fws_info *fws, u8 *ea) +{ + struct inff_fws_mac_descriptor *entry; + int i; + + if (!ea) + return ERR_PTR(-EINVAL); + + entry = &fws->desc.nodes[0]; + for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) { + if (entry->occupied && !memcmp(entry->ea, ea, ETH_ALEN)) + return entry; + entry++; + } + + return ERR_PTR(-ENOENT); +} + +static struct inff_fws_mac_descriptor* +inff_fws_macdesc_find(struct inff_fws_info *fws, struct inff_if *ifp, u8 *da) +{ + struct inff_fws_mac_descriptor *entry; + bool multicast; + + multicast = is_multicast_ether_addr(da); + + /* Multicast destination, STA and P2P clients get the interface entry. + * STA/GC gets the Mac Entry for TDLS destinations, TDLS destinations + * have their own entry. + */ + if (multicast && ifp->fws_desc) { + entry = ifp->fws_desc; + goto done; + } + + entry = inff_fws_macdesc_lookup(fws, da); + if (IS_ERR(entry)) + entry = ifp->fws_desc; + +done: + return entry; +} + +static bool inff_fws_macdesc_closed(struct inff_fws_info *fws, + struct inff_fws_mac_descriptor *entry, + int fifo) +{ + struct inff_fws_mac_descriptor *if_entry; + bool closed; + + /* for unique destination entries the related interface + * may be closed. + */ + if (entry->mac_handle) { + if_entry = &fws->desc.iface[entry->interface_id]; + if (if_entry->state == INFF_FWS_STATE_CLOSE) + return true; + } + /* an entry is closed when the state is closed and + * the firmware did not request anything. + */ + closed = entry->state == INFF_FWS_STATE_CLOSE && + !entry->requested_credit && !entry->requested_packet; + + /* Or firmware does not allow traffic for given fifo */ + return closed || !(entry->ac_bitmap & BIT(fifo)); +} + +static void inff_fws_macdesc_cleanup(struct inff_fws_info *fws, + struct inff_fws_mac_descriptor *entry, + int ifidx) +{ + if (entry->occupied && (ifidx == -1 || ifidx == entry->interface_id)) { + inff_fws_psq_flush(fws, &entry->psq, ifidx); + entry->occupied = !!(entry->psq.len); + } +} + +static void inff_fws_bus_txq_cleanup(struct inff_fws_info *fws, + bool (*fn)(struct sk_buff *, void *), + int ifidx) +{ + struct inff_fws_hanger_item *hi; + struct pktq *txq; + struct sk_buff *skb; + int prec; + u32 hslot; + + txq = inff_bus_gettxq(fws->drvr->bus_if); + if (IS_ERR(txq)) { + inff_dbg(TRACE, "no txq to clean up\n"); + return; + } + + for (prec = 0; prec < txq->num_prec; prec++) { + skb = inff_pktq_pdeq_match(txq, prec, fn, &ifidx); + while (skb) { + hslot = inff_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = INFF_FWS_HANGER_ITEM_STATE_FREE; + inff_pkt_buf_free_skb(skb); + skb = inff_pktq_pdeq_match(txq, prec, fn, &ifidx); + } + } +} + +static void inff_fws_cleanup(struct inff_fws_info *fws, int ifidx) +{ + int i; + struct inff_fws_mac_descriptor *table; + bool (*matchfn)(struct sk_buff *, void *) = NULL; + + if (!fws) + return; + + if (ifidx != -1) + matchfn = inff_fws_ifidx_match; + + /* cleanup individual nodes */ + table = &fws->desc.nodes[0]; + for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) + inff_fws_macdesc_cleanup(fws, &table[i], ifidx); + + inff_fws_macdesc_cleanup(fws, &fws->desc.other, ifidx); + inff_fws_bus_txq_cleanup(fws, matchfn, ifidx); + inff_fws_hanger_cleanup(fws, matchfn, ifidx); +} + +static u8 inff_fws_hdrpush(struct inff_fws_info *fws, struct sk_buff *skb) +{ + struct inff_fws_mac_descriptor *entry = inff_skbcb(skb)->mac; + u8 *wlh; + u16 data_offset = 0; + u8 fillers; + __le32 pkttag = cpu_to_le32(inff_skbcb(skb)->htod); + __le16 pktseq = cpu_to_le16(inff_skbcb(skb)->htod_seq); + + inff_dbg(TRACE, "enter: %s, idx=%d hslot=%d htod %X seq %X\n", + entry->name, inff_skb_if_flags_get_field(skb, INDEX), + (le32_to_cpu(pkttag) >> 8) & 0xffff, + inff_skbcb(skb)->htod, inff_skbcb(skb)->htod_seq); + if (entry->send_tim_signal) + data_offset += 2 + INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; + if (INFF_FWS_MODE_GET_REUSESEQ(fws->mode)) + data_offset += INFF_FWS_TYPE_SEQ_LEN; + /* +2 is for Type[1] and Len[1] in TLV, plus TIM signal */ + data_offset += 2 + INFF_FWS_TYPE_PKTTAG_LEN; + fillers = round_up(data_offset, 4) - data_offset; + data_offset += fillers; + + skb_push(skb, data_offset); + wlh = skb->data; + + wlh[0] = INFF_FWS_TYPE_PKTTAG; + wlh[1] = INFF_FWS_TYPE_PKTTAG_LEN; + memcpy(&wlh[2], &pkttag, sizeof(pkttag)); + if (INFF_FWS_MODE_GET_REUSESEQ(fws->mode)) { + wlh[1] += INFF_FWS_TYPE_SEQ_LEN; + memcpy(&wlh[2 + INFF_FWS_TYPE_PKTTAG_LEN], &pktseq, + sizeof(pktseq)); + } + wlh += wlh[1] + 2; + + if (entry->send_tim_signal) { + entry->send_tim_signal = false; + wlh[0] = INFF_FWS_TYPE_PENDING_TRAFFIC_BMP; + wlh[1] = INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN; + wlh[2] = entry->mac_handle; + wlh[3] = entry->traffic_pending_bmp; + inff_dbg(TRACE, "adding TIM info: handle %d bmp 0x%X\n", + entry->mac_handle, entry->traffic_pending_bmp); + wlh += INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2; + entry->traffic_lastreported_bmp = entry->traffic_pending_bmp; + } + if (fillers) + memset(wlh, INFF_FWS_TYPE_FILLER, fillers); + + return (u8)(data_offset >> 2); +} + +static bool inff_fws_tim_update(struct inff_fws_info *fws, + struct inff_fws_mac_descriptor *entry, + int fifo, bool send_immediately) +{ + struct sk_buff *skb; + struct inff_skbuff_cb *skcb; + s32 err; + u32 len; + u8 data_offset; + int ifidx; + + /* check delayedQ and suppressQ in one call using bitmap */ + if (inff_pktq_mlen(&entry->psq, 3 << (fifo * 2)) == 0) + entry->traffic_pending_bmp &= ~NBITVAL(fifo); + else + entry->traffic_pending_bmp |= NBITVAL(fifo); + + entry->send_tim_signal = false; + if (entry->traffic_lastreported_bmp != entry->traffic_pending_bmp) + entry->send_tim_signal = true; + if (send_immediately && entry->send_tim_signal && + entry->state == INFF_FWS_STATE_CLOSE) { + /* create a dummy packet and sent that. The traffic */ + /* bitmap info will automatically be attached to that packet */ + len = INFF_FWS_TYPE_PKTTAG_LEN + 2 + + INFF_FWS_TYPE_SEQ_LEN + + INFF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN + 2 + + 4 + fws->drvr->hdrlen; + skb = inff_pkt_buf_get_skb(len); + if (!skb) + return false; + skb_pull(skb, len); + skcb = inff_skbcb(skb); + skcb->mac = entry; + skcb->state = INFF_FWS_SKBSTATE_TIM; + skcb->htod = 0; + skcb->htod_seq = 0; + data_offset = inff_fws_hdrpush(fws, skb); + ifidx = inff_skb_if_flags_get_field(skb, INDEX); + inff_fws_unlock(fws); + err = inff_proto_txdata(fws->drvr, ifidx, data_offset, skb); + inff_fws_lock(fws); + if (err) + inff_pkt_buf_free_skb(skb); + return true; + } + return false; +} + +static int inff_fws_rssi_indicate(struct inff_fws_info *fws, s8 rssi) +{ + inff_dbg(CTL, "rssi %d\n", rssi); + return 0; +} + +static +int inff_fws_macdesc_indicate(struct inff_fws_info *fws, u8 type, u8 *data) +{ + struct inff_fws_mac_descriptor *entry, *existing; + u8 mac_handle; + u8 ifidx; + u8 *addr; + + mac_handle = *data++; + ifidx = *data++; + addr = data; + + entry = &fws->desc.nodes[mac_handle & 0x1F]; + if (type == INFF_FWS_TYPE_MACDESC_DEL) { + if (entry->occupied) { + inff_dbg(TRACE, "deleting %s mac %pM\n", + entry->name, addr); + inff_fws_lock(fws); + inff_fws_macdesc_cleanup(fws, entry, -1); + inff_fws_macdesc_deinit(entry); + inff_fws_unlock(fws); + } else { + fws->stats.mac_update_failed++; + } + return 0; + } + + existing = inff_fws_macdesc_lookup(fws, addr); + if (IS_ERR(existing)) { + if (!entry->occupied) { + inff_fws_lock(fws); + entry->mac_handle = mac_handle; + inff_fws_macdesc_init(entry, addr, ifidx); + inff_fws_macdesc_set_name(fws, entry); + inff_pktq_init(&entry->psq, + INFF_FWS_PSQ_PREC_COUNT, + fws->fws_psq_len); + inff_fws_unlock(fws); + inff_dbg(TRACE, "add %s mac %pM\n", entry->name, addr); + } else { + fws->stats.mac_update_failed++; + } + } else { + if (entry != existing) { + inff_dbg(TRACE, "copy mac %s\n", existing->name); + inff_fws_lock(fws); + memcpy(entry, existing, + offsetof(struct inff_fws_mac_descriptor, psq)); + entry->mac_handle = mac_handle; + inff_fws_macdesc_deinit(existing); + inff_fws_macdesc_set_name(fws, entry); + inff_fws_unlock(fws); + inff_dbg(TRACE, "relocate %s mac %pM\n", entry->name, + addr); + } else { + inff_dbg(TRACE, "use existing\n"); + WARN_ON(entry->mac_handle != mac_handle); + /* TODO: what should we do here: continue, reinit, .. */ + } + } + return 0; +} + +static int inff_fws_macdesc_state_indicate(struct inff_fws_info *fws, + u8 type, u8 *data) +{ + struct inff_fws_mac_descriptor *entry; + u8 mac_handle; + int ret; + + mac_handle = data[0]; + entry = &fws->desc.nodes[mac_handle & 0x1F]; + if (!entry->occupied) { + fws->stats.mac_ps_update_failed++; + return -ESRCH; + } + inff_fws_lock(fws); + /* a state update should wipe old credits */ + entry->requested_credit = 0; + entry->requested_packet = 0; + if (type == INFF_FWS_TYPE_MAC_OPEN) { + entry->state = INFF_FWS_STATE_OPEN; + ret = INFF_FWS_RET_OK_SCHEDULE; + } else { + entry->state = INFF_FWS_STATE_CLOSE; + inff_fws_tim_update(fws, entry, INFF_FWS_FIFO_AC_BK, false); + inff_fws_tim_update(fws, entry, INFF_FWS_FIFO_AC_BE, false); + inff_fws_tim_update(fws, entry, INFF_FWS_FIFO_AC_VI, false); + inff_fws_tim_update(fws, entry, INFF_FWS_FIFO_AC_VO, true); + ret = INFF_FWS_RET_OK_NOSCHEDULE; + } + inff_fws_unlock(fws); + return ret; +} + +static int inff_fws_interface_state_indicate(struct inff_fws_info *fws, + u8 type, u8 *data) +{ + struct inff_fws_mac_descriptor *entry; + u8 ifidx; + int ret; + + ifidx = data[0]; + + if (ifidx >= INFF_MAX_IFS) { + ret = -ERANGE; + goto fail; + } + + entry = &fws->desc.iface[ifidx]; + if (!entry->occupied) { + ret = -ESRCH; + goto fail; + } + + inff_dbg(TRACE, "%s (%d): %s\n", inff_fws_get_tlv_name(type), type, + entry->name); + inff_fws_lock(fws); + switch (type) { + case INFF_FWS_TYPE_INTERFACE_OPEN: + entry->state = INFF_FWS_STATE_OPEN; + ret = INFF_FWS_RET_OK_SCHEDULE; + break; + case INFF_FWS_TYPE_INTERFACE_CLOSE: + entry->state = INFF_FWS_STATE_CLOSE; + ret = INFF_FWS_RET_OK_NOSCHEDULE; + break; + default: + ret = -EINVAL; + inff_fws_unlock(fws); + goto fail; + } + inff_fws_unlock(fws); + return ret; + +fail: + fws->stats.if_update_failed++; + return ret; +} + +static int inff_fws_request_indicate(struct inff_fws_info *fws, u8 type, + u8 *data) +{ + struct inff_fws_mac_descriptor *entry; + + entry = &fws->desc.nodes[data[1] & 0x1F]; + if (!entry->occupied) { + if (type == INFF_FWS_TYPE_MAC_REQUEST_CREDIT) + fws->stats.credit_request_failed++; + else + fws->stats.packet_request_failed++; + return -ESRCH; + } + + inff_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n", + inff_fws_get_tlv_name(type), type, entry->name, + data[0], data[2]); + inff_fws_lock(fws); + if (type == INFF_FWS_TYPE_MAC_REQUEST_CREDIT) + entry->requested_credit = data[0]; + else + entry->requested_packet = data[0]; + + entry->ac_bitmap = data[2]; + inff_fws_unlock(fws); + return INFF_FWS_RET_OK_SCHEDULE; +} + +static void +inff_fws_macdesc_use_req_credit(struct inff_fws_mac_descriptor *entry, + struct sk_buff *skb) +{ + if (entry->requested_credit > 0) { + entry->requested_credit--; + inff_skb_if_flags_set_field(skb, REQUESTED, 1); + inff_skb_if_flags_set_field(skb, REQ_CREDIT, 1); + if (entry->state != INFF_FWS_STATE_CLOSE) + inff_err("requested credit set while mac not closed!\n"); + } else if (entry->requested_packet > 0) { + entry->requested_packet--; + inff_skb_if_flags_set_field(skb, REQUESTED, 1); + inff_skb_if_flags_set_field(skb, REQ_CREDIT, 0); + if (entry->state != INFF_FWS_STATE_CLOSE) + inff_err("requested packet set while mac not closed!\n"); + } else { + inff_skb_if_flags_set_field(skb, REQUESTED, 0); + inff_skb_if_flags_set_field(skb, REQ_CREDIT, 0); + } +} + +static void inff_fws_macdesc_return_req_credit(struct sk_buff *skb) +{ + struct inff_fws_mac_descriptor *entry = inff_skbcb(skb)->mac; + + if ((inff_skb_if_flags_get_field(skb, REQ_CREDIT)) && + entry->state == INFF_FWS_STATE_CLOSE) + entry->requested_credit++; +} + +static void inff_fws_return_credits(struct inff_fws_info *fws, + u8 fifo, u8 credits) +{ + int lender_ac; + int *borrowed; + int *fifo_credit; + + if (!credits) + return; + + fws->fifo_credit_map |= 1 << fifo; + + if (fifo >= INFF_FWS_FIFO_AC_BK && + fifo <= INFF_FWS_FIFO_AC_VO) + for (lender_ac = INFF_FWS_FIFO_AC_VO; lender_ac >= 0; + lender_ac--) { + borrowed = &fws->credits_borrowed[fifo][lender_ac]; + if (*borrowed) { + fws->fifo_credit_map |= (1 << lender_ac); + fifo_credit = &fws->fifo_credit[lender_ac]; + if (*borrowed >= credits) { + *borrowed -= credits; + *fifo_credit += credits; + return; + } + + credits -= *borrowed; + *fifo_credit += *borrowed; + *borrowed = 0; + } + } + + if (credits) + fws->fifo_credit[fifo] += credits; + + if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo]) + fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo]; +} + +static void inff_fws_schedule_deq(struct inff_fws_info *fws) +{ + /* only schedule dequeue when there are credits for delayed traffic */ + if ((fws->fifo_credit_map & fws->fifo_delay_map) || + (!inff_fws_fc_active(fws) && fws->fifo_delay_map)) + queue_work(fws->fws_wq, &fws->fws_dequeue_work); +} + +static int inff_fws_enq(struct inff_fws_info *fws, + enum inff_fws_skb_state state, int fifo, + struct sk_buff *p) +{ + struct inff_pub *drvr = fws->drvr; + int prec = 2 * fifo; + u32 *qfull_stat = &fws->stats.delayq_full_error; + struct inff_fws_mac_descriptor *entry; + struct pktq *pq; + struct sk_buff_head *queue; + struct sk_buff *p_head; + struct sk_buff *p_tail; + u32 fr_new; + u32 fr_compare; + + entry = inff_skbcb(p)->mac; + if (!entry) { + iphy_err(drvr, "no mac descriptor found for skb %p\n", p); + return -ENOENT; + } + + inff_dbg(DATA, "enter: fifo %d skb %p\n", fifo, p); + if (state == INFF_FWS_SKBSTATE_SUPPRESSED) { + prec += 1; + qfull_stat = &fws->stats.supprq_full_error; + + /* Fix out of order delivery of frames. Dont assume frame */ + /* can be inserted at the end, but look for correct position */ + pq = &entry->psq; + if (pktq_full(pq) || pktq_pfull(pq, prec)) { + *qfull_stat += 1; + return -ENFILE; + } + queue = &pq->q[prec].skblist; + + p_head = skb_peek(queue); + p_tail = skb_peek_tail(queue); + fr_new = inff_skb_htod_tag_get_field(p, FREERUN); + + while (p_head != p_tail) { + fr_compare = inff_skb_htod_tag_get_field(p_tail, + FREERUN); + /* be sure to handle wrap of 256 */ + if ((fr_new > fr_compare && + ((fr_new - fr_compare) < 128)) || + (fr_new < fr_compare && + ((fr_compare - fr_new) > 128))) + break; + p_tail = skb_queue_prev(queue, p_tail); + } + /* Position found. Determine what to do */ + if (!p_tail) { + /* empty list */ + __skb_queue_tail(queue, p); + } else { + fr_compare = inff_skb_htod_tag_get_field(p_tail, + FREERUN); + if ((fr_new > fr_compare && + ((fr_new - fr_compare) < 128)) || + (fr_new < fr_compare && + ((fr_compare - fr_new) > 128))) { + /* After tail */ + __skb_queue_after(queue, p_tail, p); + } else { + /* Before tail */ + __skb_insert(p, p_tail->prev, p_tail, queue); + } + } + + /* Complete the counters and statistics */ + pq->len++; + if (pq->hi_prec < prec) + pq->hi_prec = (u8)prec; + } else if (!inff_pktq_penq(&entry->psq, prec, p)) { + *qfull_stat += 1; + return -ENFILE; + } + + /* increment total enqueued packet count */ + fws->fifo_delay_map |= 1 << fifo; + fws->fifo_enqpkt[fifo]++; + + /* update the sk_buff state */ + inff_skbcb(p)->state = state; + + /* + * A packet has been pushed so update traffic + * availability bitmap, if applicable + */ + inff_fws_tim_update(fws, entry, fifo, true); + inff_fws_flow_control_check(fws, &entry->psq, + inff_skb_if_flags_get_field(p, INDEX)); + return 0; +} + +static struct sk_buff *inff_fws_deq(struct inff_fws_info *fws, int fifo) +{ + struct inff_fws_mac_descriptor *table; + struct inff_fws_mac_descriptor *entry; + struct sk_buff *p; + int num_nodes; + int node_pos; + int prec_out; + int pmsk; + int i; + + table = (struct inff_fws_mac_descriptor *)&fws->desc; + num_nodes = sizeof(fws->desc) / sizeof(struct inff_fws_mac_descriptor); + node_pos = fws->deq_node_pos[fifo]; + + for (i = 0; i < num_nodes; i++) { + entry = &table[(node_pos + i) % num_nodes]; + if (!entry->occupied || + inff_fws_macdesc_closed(fws, entry, fifo)) + continue; + + if (entry->suppressed) + pmsk = 2; + else + pmsk = 3; + p = inff_pktq_mdeq(&entry->psq, pmsk << (fifo * 2), &prec_out); + if (!p) { + if (entry->suppressed) { + if (entry->suppr_transit_count) + continue; + entry->suppressed = false; + p = inff_pktq_mdeq(&entry->psq, + 1 << (fifo * 2), &prec_out); + } + } + if (!p) + continue; + + inff_fws_macdesc_use_req_credit(entry, p); + + /* move dequeue position to ensure fair round-robin */ + fws->deq_node_pos[fifo] = (node_pos + i + 1) % num_nodes; + inff_fws_flow_control_check(fws, &entry->psq, + inff_skb_if_flags_get_field(p, + INDEX)); + /* + * A packet has been picked up, update traffic + * availability bitmap, if applicable + */ + inff_fws_tim_update(fws, entry, fifo, false); + + /* + * decrement total enqueued fifo packets and + * clear delay bitmap if done. + */ + fws->fifo_enqpkt[fifo]--; + if (fws->fifo_enqpkt[fifo] == 0) + fws->fifo_delay_map &= ~(1 << fifo); + goto done; + } + p = NULL; +done: + inff_dbg(DATA, "exit: fifo %d skb %p\n", fifo, p); + return p; +} + +static int inff_fws_txstatus_suppressed(struct inff_fws_info *fws, int fifo, + struct sk_buff *skb, + u32 genbit, u16 seq) +{ + struct inff_fws_mac_descriptor *entry = inff_skbcb(skb)->mac; + u32 hslot; + int ret; + + hslot = inff_skb_htod_tag_get_field(skb, HSLOT); + + /* this packet was suppressed */ + if (!entry->suppressed) { + entry->suppressed = true; + entry->suppr_transit_count = entry->transit_count; + inff_dbg(DATA, "suppress %s: transit %d\n", + entry->name, entry->transit_count); + } + + entry->generation = genbit; + + inff_skb_htod_tag_set_field(skb, GENERATION, genbit); + inff_skbcb(skb)->htod_seq = seq; + if (inff_skb_htod_seq_get_field(skb, FROMFW)) { + inff_skb_htod_seq_set_field(skb, FROMDRV, 1); + inff_skb_htod_seq_set_field(skb, FROMFW, 0); + } else { + inff_skb_htod_seq_set_field(skb, FROMDRV, 0); + } + ret = inff_fws_enq(fws, INFF_FWS_SKBSTATE_SUPPRESSED, fifo, skb); + + if (ret != 0) { + /* suppress q is full drop this packet */ + inff_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); + } else { + /* Mark suppressed to avoid a double free during wlfc cleanup */ + inff_fws_hanger_mark_suppressed(&fws->hanger, hslot); + } + + return ret; +} + +static int +inff_fws_txs_process(struct inff_fws_info *fws, u8 flags, u32 hslot, + u32 genbit, u16 seq, u8 compcnt) +{ + struct inff_pub *drvr = fws->drvr; + u32 fifo; + u8 cnt = 0; + int ret; + bool remove_from_hanger = true; + struct sk_buff *skb; + struct inff_skbuff_cb *skcb; + struct inff_fws_mac_descriptor *entry = NULL; + struct inff_if *ifp; + + inff_dbg(DATA, "flags %d\n", flags); + + if (flags == INFF_FWS_TXSTATUS_DISCARD) { + fws->stats.txs_discard += compcnt; + } else if (flags == INFF_FWS_TXSTATUS_CORE_SUPPRESS) { + fws->stats.txs_supp_core += compcnt; + remove_from_hanger = false; + } else if (flags == INFF_FWS_TXSTATUS_FW_PS_SUPPRESS) { + fws->stats.txs_supp_ps += compcnt; + remove_from_hanger = false; + } else if (flags == INFF_FWS_TXSTATUS_FW_TOSSED) { + fws->stats.txs_tossed += compcnt; + } else if (flags == INFF_FWS_TXSTATUS_FW_DISCARD_NOACK) { + fws->stats.txs_discard += compcnt; + } else if (flags == INFF_FWS_TXSTATUS_FW_SUPPRESS_ACKED) { + fws->stats.txs_discard += compcnt; + } else if (flags == INFF_FWS_TXSTATUS_HOST_TOSSED) { + fws->stats.txs_host_tossed += compcnt; + } else { + iphy_err(drvr, "unexpected txstatus\n"); + } + + while (cnt < compcnt) { + ret = inff_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + remove_from_hanger); + if (ret != 0) { + iphy_err(drvr, "no packet in hanger slot: hslot=%d\n", + hslot); + goto cont; + } + + skcb = inff_skbcb(skb); + entry = skcb->mac; + if (WARN_ON(!entry)) { + inff_pkt_buf_free_skb(skb); + goto cont; + } + entry->transit_count--; + if (entry->suppressed && entry->suppr_transit_count) + entry->suppr_transit_count--; + + inff_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, + flags, skcb->htod, seq); + + /* pick up the implicit credit from this packet */ + fifo = inff_skb_htod_tag_get_field(skb, FIFO); + if (fws->fcmode == INFF_FWS_FCMODE_IMPLIED_CREDIT || + (inff_skb_if_flags_get_field(skb, REQ_CREDIT)) || + flags == INFF_FWS_TXSTATUS_HOST_TOSSED) { + inff_fws_return_credits(fws, fifo, 1); + inff_fws_schedule_deq(fws); + } + inff_fws_macdesc_return_req_credit(skb); + + ret = inff_proto_hdrpull(fws->drvr, false, skb, &ifp); + if (ret) { + inff_pkt_buf_free_skb(skb); + goto cont; + } + if (!remove_from_hanger) + ret = inff_fws_txstatus_suppressed(fws, fifo, skb, + genbit, seq); + if (remove_from_hanger || ret) + inff_txfinalize(ifp, skb, true); + +cont: + hslot = (hslot + 1) & (INFF_FWS_TXSTAT_HSLOT_MASK >> + INFF_FWS_TXSTAT_HSLOT_SHIFT); + if (INFF_FWS_MODE_GET_REUSESEQ(fws->mode)) + seq = (seq + 1) & INFF_SKB_HTOD_SEQ_NR_MASK; + + cnt++; + } + + return 0; +} + +static void inff_fws_credit_auto_recover(struct inff_fws_info *fws, u8 *data) +{ + int fifo, i; + u8 *fw_tx = data + INFF_FIFO_CREDITBACK_TX_OFFSET; + u8 *fw_back = data; + int borrowed = 0; + int loan = 0; + int missing = 0; + int host_record_credit; + int in_fw_credit; + + inff_dbg(SDIO, "Enter: tx %pM back %pM\n", fw_tx, fw_back); + inff_dbg(SDIO, "Enter: credit [BK]:%d [BE]:%d [VI]:%d [VO]:%d [BCMC]:%d\n", + fws->fifo_credit[0], fws->fifo_credit[1], fws->fifo_credit[2], + fws->fifo_credit[3], fws->fifo_credit[4]); + + /* must check from highest priority FIFO */ + for (fifo = INFF_FWS_FIFO_COUNT - 1; fifo >= INFF_FWS_FIFO_AC_BK; fifo--) { + /* if no credit lost, continue to check next FIFO */ + if (fws->init_fifo_credit[fifo] == fws->fifo_credit[fifo]) + continue; + + inff_dbg(SDIO, "FIFO %d init: %d current: %d\n", + fifo, fws->init_fifo_credit[fifo], fws->fifo_credit[fifo]); + + if (fifo <= INFF_FWS_FIFO_AC_VO) { + /* how many credit are borrowed from other FIFO */ + for (i = 0; i <= INFF_FWS_FIFO_AC_VO; i++) + borrowed += fws->credits_borrowed[fifo][i]; + + /* how many credit are lend to other FIFO */ + for (i = 0; i <= INFF_FWS_FIFO_AC_VO; i++) + loan += fws->credits_borrowed[i][fifo]; + + inff_dbg(SDIO, "borrowed: %d loan: %d\n", borrowed, loan); + } + + /* calculate missed credit */ + host_record_credit = fws->init_fifo_credit[fifo] - + fws->fifo_credit[fifo] + borrowed - loan; + in_fw_credit = fw_tx[fifo] - fw_back[fifo]; + missing = host_record_credit - in_fw_credit; + inff_dbg(SDIO, "host %d fw %d missing: %d\n", + host_record_credit, in_fw_credit, missing); + + if (missing > 0) + inff_fws_return_credits(fws, fifo, missing); + } + + inff_dbg(SDIO, "Leave: credit [BK]:%d [BE]:%d [VI]:%d [VO]:%d [BCMC]:%d\n", + fws->fifo_credit[0], fws->fifo_credit[1], fws->fifo_credit[2], + fws->fifo_credit[3], fws->fifo_credit[4]); +} + +void inff_fws_recv_err(struct inff_pub *drvr) +{ + struct inff_fws_info *fws = NULL; + + if (!drvr) + return; + + fws = drvr_to_fws(drvr); + + if (!fws) + return; + + inff_dbg(SDIO, "Enter\n"); + + inff_fws_lock(fws); + fws->stats.cnt_recv_err++; + fws->credit_recover = true; + fws->sdio_recv_error = true; + inff_fws_unlock(fws); +} + +static int inff_fws_fifocreditback_indicate(struct inff_fws_info *fws, + s16 len, u8 *data) +{ + int i; + + if (fws->fcmode != INFF_FWS_FCMODE_EXPLICIT_CREDIT) { + inff_dbg(INFO, "ignored\n"); + return INFF_FWS_RET_OK_NOSCHEDULE; + } + + inff_dbg(DATA, "enter: data %pM\n", data); + inff_fws_lock(fws); + for (i = 0; i < INFF_FWS_FIFO_COUNT; i++) + inff_fws_return_credits(fws, i, data[i]); + + inff_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map, + fws->fifo_delay_map); + + /* when bus error happened, try to recover lost credit */ + if (len == INFF_FWS_TYPE_FIFO_CREDITBACK_V2_LEN && fws->credit_recover) { + inff_err("Trigger credit recover\n"); + inff_fws_credit_auto_recover(fws, data); + fws->credit_recover = false; + } + inff_fws_unlock(fws); + return INFF_FWS_RET_OK_SCHEDULE; +} + +static int inff_fws_txstatus_indicate(struct inff_fws_info *fws, u8 type, + u8 *data) +{ + __le32 status_le; + __le16 seq_le; + u32 status; + u32 hslot; + u32 genbit; + u8 flags; + u16 seq; + u8 compcnt; + u8 compcnt_offset = INFF_FWS_TYPE_TXSTATUS_LEN; + + memcpy(&status_le, data, sizeof(status_le)); + status = le32_to_cpu(status_le); + flags = inff_txstatus_get_field(status, FLAGS); + hslot = inff_txstatus_get_field(status, HSLOT); + genbit = inff_txstatus_get_field(status, GENERATION); + if (INFF_FWS_MODE_GET_REUSESEQ(fws->mode)) { + memcpy(&seq_le, &data[INFF_FWS_TYPE_TXSTATUS_LEN], + sizeof(seq_le)); + seq = le16_to_cpu(seq_le); + compcnt_offset += INFF_FWS_TYPE_SEQ_LEN; + } else { + seq = 0; + } + + if (type == INFF_FWS_TYPE_COMP_TXSTATUS) + compcnt = data[compcnt_offset]; + else + compcnt = 1; + fws->stats.txs_indicate += compcnt; + + inff_fws_lock(fws); + inff_fws_txs_process(fws, flags, hslot, genbit, seq, compcnt); + inff_fws_unlock(fws); + return INFF_FWS_RET_OK_NOSCHEDULE; +} + +static int inff_fws_dbg_seqnum_check(struct inff_fws_info *fws, u8 *data) +{ + __le32 timestamp; + + memcpy(×tamp, &data[2], sizeof(timestamp)); + inff_dbg(CTL, "received: seq %d, timestamp %d\n", data[1], + le32_to_cpu(timestamp)); + return 0; +} + +static int inff_fws_notify_credit_map(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_fws_info *fws = drvr_to_fws(drvr); + int i; + u8 *credits = data; + + if (e->datalen < INFF_FWS_FIFO_COUNT) { + iphy_err(drvr, "event payload too small (%d)\n", e->datalen); + return -EINVAL; + } + + fws->creditmap_received = true; + + inff_dbg(TRACE, "enter: credits %pM\n", credits); + inff_fws_lock(fws); + for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { + fws->fifo_credit[i] += credits[i] - fws->init_fifo_credit[i]; + fws->init_fifo_credit[i] = credits[i]; + if (fws->fifo_credit[i] > 0) + fws->fifo_credit_map |= 1 << i; + else + fws->fifo_credit_map &= ~(1 << i); + WARN_ONCE(fws->fifo_credit[i] < 0, + "fifo_credit[%d] is negative(%d)\n", i, + fws->fifo_credit[i]); + } + inff_fws_schedule_deq(fws); + inff_fws_unlock(fws); + return 0; +} + +static int inff_fws_notify_bcmc_credit_support(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_fws_info *fws = drvr_to_fws(ifp->drvr); + + if (fws) { + inff_fws_lock(fws); + fws->bcmc_credit_check = true; + inff_fws_unlock(fws); + } + return 0; +} + +static void inff_rxreorder_get_skb_list(struct inff_ampdu_rx_reorder *rfi, + u8 start, u8 end, + struct sk_buff_head *skb_list) +{ + /* initialize return list */ + __skb_queue_head_init(skb_list); + + if (rfi->pend_pkts == 0) { + inff_dbg(INFO, "no packets in reorder queue\n"); + return; + } + + do { + if (rfi->pktslots[start]) { + __skb_queue_tail(skb_list, rfi->pktslots[start]); + rfi->pktslots[start] = NULL; + } + start++; + if (start > rfi->max_idx) + start = 0; + } while (start != end); + rfi->pend_pkts -= skb_queue_len(skb_list); +} + +void inff_fws_rxreorder(struct inff_if *ifp, struct sk_buff *pkt, bool inirq) +{ + struct inff_pub *drvr = ifp->drvr; + u8 *reorder_data; + u8 flow_id, max_idx, cur_idx, exp_idx, end_idx; + struct inff_ampdu_rx_reorder *rfi; + struct sk_buff_head reorder_list; + struct sk_buff *pnext; + u8 flags; + + reorder_data = ((struct inff_skb_reorder_data *)pkt->cb)->reorder; + flow_id = reorder_data[INFF_RXREORDER_FLOWID_OFFSET]; + flags = reorder_data[INFF_RXREORDER_FLAGS_OFFSET]; + + /* validate flags and flow id */ + if (flags == 0xFF) { + iphy_err(drvr, "invalid flags...so ignore this packet\n"); + inff_netif_rx(ifp, pkt, inirq); + return; + } + + rfi = ifp->drvr->reorder_flows[flow_id]; + if (flags & INFF_RXREORDER_DEL_FLOW) { + inff_dbg(INFO, "flow-%d: delete\n", + flow_id); + + if (!rfi) { + inff_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", + flow_id); + inff_netif_rx(ifp, pkt, inirq); + return; + } + + inff_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx, + &reorder_list); + /* add the last packet */ + __skb_queue_tail(&reorder_list, pkt); + kfree(rfi); + ifp->drvr->reorder_flows[flow_id] = NULL; + goto netif_rx; + } + /* from here on we need a flow reorder instance */ + if (!rfi) { + max_idx = reorder_data[INFF_RXREORDER_MAXIDX_OFFSET]; + + /* allocate space for flow reorder info */ + inff_dbg(INFO, "flow-%d: start, maxidx %d\n", + flow_id, max_idx); + rfi = kzalloc(struct_size(rfi, pktslots, max_idx + 1), + GFP_ATOMIC); + if (!rfi) { + inff_netif_rx(ifp, pkt, inirq); + return; + } + + ifp->drvr->reorder_flows[flow_id] = rfi; + rfi->max_idx = max_idx; + } + if (flags & INFF_RXREORDER_NEW_HOLE) { + if (rfi->pend_pkts) { + inff_rxreorder_get_skb_list(rfi, rfi->exp_idx, + rfi->exp_idx, + &reorder_list); + WARN_ON(rfi->pend_pkts); + } else { + __skb_queue_head_init(&reorder_list); + } + rfi->cur_idx = reorder_data[INFF_RXREORDER_CURIDX_OFFSET]; + rfi->exp_idx = reorder_data[INFF_RXREORDER_EXPIDX_OFFSET]; + rfi->max_idx = reorder_data[INFF_RXREORDER_MAXIDX_OFFSET]; + rfi->pktslots[rfi->cur_idx] = pkt; + rfi->pend_pkts++; + inff_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n", + flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts); + } else if (flags & INFF_RXREORDER_CURIDX_VALID) { + cur_idx = reorder_data[INFF_RXREORDER_CURIDX_OFFSET]; + exp_idx = reorder_data[INFF_RXREORDER_EXPIDX_OFFSET]; + + if (exp_idx == rfi->exp_idx && cur_idx != rfi->exp_idx) { + /* still in the current hole */ + /* enqueue the current on the buffer chain */ + if (rfi->pktslots[cur_idx]) { + inff_dbg(INFO, "HOLE: ERROR buffer pending..free it\n"); + inff_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + rfi->pend_pkts--; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + rfi->cur_idx = cur_idx; + inff_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + /* can return now as there is no reorder + * list to process. + */ + return; + } + if (rfi->exp_idx == cur_idx) { + if (rfi->pktslots[cur_idx]) { + inff_dbg(INFO, "error buffer pending..free it\n"); + inff_pkt_buf_free_skb(rfi->pktslots[cur_idx]); + rfi->pktslots[cur_idx] = NULL; + rfi->pend_pkts--; + } + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + + /* got the expected one. flush from current to expected + * and update expected + */ + inff_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n", + flow_id, cur_idx, exp_idx, rfi->pend_pkts); + + rfi->cur_idx = cur_idx; + rfi->exp_idx = exp_idx; + + inff_rxreorder_get_skb_list(rfi, cur_idx, exp_idx, + &reorder_list); + inff_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n", + flow_id, skb_queue_len(&reorder_list), + rfi->pend_pkts); + } else { + u8 end_idx; + + inff_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n", + flow_id, flags, rfi->cur_idx, rfi->exp_idx, + cur_idx, exp_idx); + if (flags & INFF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + /* flush pkts first */ + inff_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + + if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) { + __skb_queue_tail(&reorder_list, pkt); + } else { + rfi->pktslots[cur_idx] = pkt; + rfi->pend_pkts++; + } + rfi->exp_idx = exp_idx; + rfi->cur_idx = cur_idx; + } + } else { + /* explicitly window move updating the expected index */ + exp_idx = reorder_data[INFF_RXREORDER_EXPIDX_OFFSET]; + + inff_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", + flow_id, flags, rfi->exp_idx, exp_idx); + if (flags & INFF_RXREORDER_FLUSH_ALL) + end_idx = rfi->exp_idx; + else + end_idx = exp_idx; + + inff_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx, + &reorder_list); + __skb_queue_tail(&reorder_list, pkt); + /* set the new expected idx */ + rfi->exp_idx = exp_idx; + } +netif_rx: + skb_queue_walk_safe(&reorder_list, pkt, pnext) { + __skb_unlink(pkt, &reorder_list); + inff_netif_rx(ifp, pkt, inirq); + } +} + +void inff_fws_hdrpull(struct inff_if *ifp, s16 siglen, struct sk_buff *skb) +{ + struct inff_skb_reorder_data *rd; + struct inff_fws_info *fws = drvr_to_fws(ifp->drvr); + u8 *signal_data; + s16 data_len; + u8 type; + s16 len; + u8 *data; + s32 status; + s32 err; + + inff_dbg(HDRS, "enter: ifidx %d, skblen %u, sig %d\n", + ifp->ifidx, skb->len, siglen); + + WARN_ON(siglen > skb->len); + + if (siglen > skb->len) + siglen = skb->len; + + if (!siglen) + return; + /* if flow control disabled, skip to packet data and leave */ + if (!fws || !fws->fw_signals) { + skb_pull(skb, siglen); + return; + } + + fws->stats.header_pulls++; + data_len = siglen; + signal_data = skb->data; + + status = INFF_FWS_RET_OK_NOSCHEDULE; + while (data_len > 0) { + /* extract tlv info */ + type = signal_data[0]; + + /* FILLER type is actually not a TLV, but + * a single byte that can be skipped. + */ + if (type == INFF_FWS_TYPE_FILLER) { + signal_data += 1; + data_len -= 1; + continue; + } + len = signal_data[1]; + data = signal_data + 2; + + inff_dbg(HDRS, "tlv type=%s (%d), len=%d (%d)\n", + inff_fws_get_tlv_name(type), type, len, + inff_fws_get_tlv_len(fws, type)); + + /* abort parsing when length invalid */ + if (data_len < len + 2) + break; + + if (len < inff_fws_get_tlv_len(fws, type)) + break; + + err = INFF_FWS_RET_OK_NOSCHEDULE; + switch (type) { + case INFF_FWS_TYPE_HOST_REORDER_RXPKTS: + rd = (struct inff_skb_reorder_data *)skb->cb; + rd->reorder = data; + break; + case INFF_FWS_TYPE_MACDESC_ADD: + case INFF_FWS_TYPE_MACDESC_DEL: + inff_fws_macdesc_indicate(fws, type, data); + break; + case INFF_FWS_TYPE_MAC_OPEN: + case INFF_FWS_TYPE_MAC_CLOSE: + err = inff_fws_macdesc_state_indicate(fws, type, data); + break; + case INFF_FWS_TYPE_INTERFACE_OPEN: + case INFF_FWS_TYPE_INTERFACE_CLOSE: + err = inff_fws_interface_state_indicate(fws, type, + data); + break; + case INFF_FWS_TYPE_MAC_REQUEST_CREDIT: + case INFF_FWS_TYPE_MAC_REQUEST_PACKET: + err = inff_fws_request_indicate(fws, type, data); + break; + case INFF_FWS_TYPE_TXSTATUS: + case INFF_FWS_TYPE_COMP_TXSTATUS: + inff_fws_txstatus_indicate(fws, type, data); + break; + case INFF_FWS_TYPE_FIFO_CREDITBACK: + err = inff_fws_fifocreditback_indicate(fws, len, data); + break; + case INFF_FWS_TYPE_RSSI: + inff_fws_rssi_indicate(fws, *data); + break; + case INFF_FWS_TYPE_TRANS_ID: + inff_fws_dbg_seqnum_check(fws, data); + break; + case INFF_FWS_TYPE_PKTTAG: + case INFF_FWS_TYPE_PENDING_TRAFFIC_BMP: + default: + fws->stats.tlv_invalid_type++; + break; + } + if (err == INFF_FWS_RET_OK_SCHEDULE) + status = INFF_FWS_RET_OK_SCHEDULE; + signal_data += len + 2; + data_len -= len + 2; + } + + if (data_len != 0) + fws->stats.tlv_parse_failed++; + + if (status == INFF_FWS_RET_OK_SCHEDULE) + inff_fws_schedule_deq(fws); + + /* signalling processing result does + * not affect the actual ethernet packet. + */ + skb_pull(skb, siglen); + + /* this may be a signal-only packet + */ + if (skb->len == 0) + fws->stats.header_only_pkt++; +} + +static u8 inff_fws_precommit_skb(struct inff_fws_info *fws, int fifo, + struct sk_buff *p) +{ + struct inff_skbuff_cb *skcb = inff_skbcb(p); + struct inff_fws_mac_descriptor *entry = skcb->mac; + u8 flags; + + if (skcb->state != INFF_FWS_SKBSTATE_SUPPRESSED) + inff_skb_htod_tag_set_field(p, GENERATION, entry->generation); + flags = INFF_FWS_HTOD_FLAG_PKTFROMHOST; + if (inff_skb_if_flags_get_field(p, REQUESTED)) { + /* + * Indicate that this packet is being sent in response to an + * explicit request from the firmware side. + */ + flags |= INFF_FWS_HTOD_FLAG_PKT_REQUESTED; + } + inff_skb_htod_tag_set_field(p, FLAGS, flags); + return inff_fws_hdrpush(fws, p); +} + +static void inff_fws_rollback_toq(struct inff_fws_info *fws, + struct sk_buff *skb, int fifo) +{ + struct inff_pub *drvr = fws->drvr; + struct inff_fws_mac_descriptor *entry; + struct sk_buff *pktout; + int qidx, hslot; + int rc = 0; + + entry = inff_skbcb(skb)->mac; + if (entry->occupied) { + qidx = 2 * fifo; + if (inff_skbcb(skb)->state == INFF_FWS_SKBSTATE_SUPPRESSED) + qidx++; + + pktout = inff_pktq_penq_head(&entry->psq, qidx, skb); + if (!pktout) { + iphy_err(drvr, "%s queue %d full\n", entry->name, qidx); + rc = -ENOSPC; + } + } else { + iphy_err(drvr, "%s entry removed\n", entry->name); + rc = -ENOENT; + } + + if (rc) { + fws->stats.rollback_failed++; + hslot = inff_skb_htod_tag_get_field(skb, HSLOT); + inff_fws_txs_process(fws, INFF_FWS_TXSTATUS_HOST_TOSSED, + hslot, 0, 0, 1); + } else { + fws->stats.rollback_success++; + inff_fws_return_credits(fws, fifo, 1); + inff_fws_macdesc_return_req_credit(skb); + } +} + +static int inff_fws_borrow_credit(struct inff_fws_info *fws, + int highest_lender_ac, int borrower_ac, + bool borrow_all) +{ + int lender_ac, borrow_limit = 0; + + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { + if (!borrow_all) + borrow_limit = + fws->init_fifo_credit[lender_ac] / INFF_BORROW_RATIO; + else + borrow_limit = 0; + + if (fws->fifo_credit[lender_ac] > borrow_limit) { + fws->credits_borrowed[borrower_ac][lender_ac]++; + fws->fifo_credit[lender_ac]--; + if (fws->fifo_credit[lender_ac] == 0) + fws->fifo_credit_map &= ~(1 << lender_ac); + fws->fifo_credit_map |= (1 << borrower_ac); + inff_dbg(DATA, "borrow credit from: %d\n", lender_ac); + return 0; + } + } + fws->fifo_credit_map &= ~(1 << borrower_ac); + return -ENAVAIL; +} + +static int inff_fws_commit_skb(struct inff_fws_info *fws, int fifo, + struct sk_buff *skb) +{ + struct inff_skbuff_cb *skcb = inff_skbcb(skb); + struct inff_fws_mac_descriptor *entry; + int rc; + u8 ifidx; + u8 data_offset; + + entry = skcb->mac; + if (IS_ERR(entry)) + return PTR_ERR(entry); + + data_offset = inff_fws_precommit_skb(fws, fifo, skb); + entry->transit_count++; + if (entry->suppressed) + entry->suppr_transit_count++; + ifidx = inff_skb_if_flags_get_field(skb, INDEX); + inff_fws_unlock(fws); + rc = inff_proto_txdata(fws->drvr, ifidx, data_offset, skb); + inff_fws_lock(fws); + inff_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name, + skcb->if_flags, skcb->htod, rc); + if (rc < 0) { + entry->transit_count--; + if (entry->suppressed) + entry->suppr_transit_count--; + (void)inff_proto_hdrpull(fws->drvr, false, skb, NULL); + goto rollback; + } + + fws->stats.pkt2bus++; + fws->stats.send_pkts[fifo]++; + if (inff_skb_if_flags_get_field(skb, REQUESTED)) + fws->stats.requested_sent[fifo]++; + + return rc; + +rollback: + inff_fws_rollback_toq(fws, skb, fifo); + return rc; +} + +static int inff_fws_assign_htod(struct inff_fws_info *fws, struct sk_buff *p, + int fifo) +{ + struct inff_skbuff_cb *skcb = inff_skbcb(p); + int rc, hslot; + + skcb->htod = 0; + skcb->htod_seq = 0; + hslot = inff_fws_hanger_get_free_slot(&fws->hanger); + inff_skb_htod_tag_set_field(p, HSLOT, hslot); + inff_skb_htod_tag_set_field(p, FREERUN, skcb->mac->seq[fifo]); + inff_skb_htod_tag_set_field(p, FIFO, fifo); + rc = inff_fws_hanger_pushpkt(&fws->hanger, p, hslot); + if (!rc) + skcb->mac->seq[fifo]++; + else + fws->stats.generic_error++; + return rc; +} + +int inff_fws_process_skb(struct inff_if *ifp, struct sk_buff *skb) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_fws_info *fws = drvr_to_fws(drvr); + struct inff_skbuff_cb *skcb = inff_skbcb(skb); + struct ethhdr *eh = (struct ethhdr *)(skb->data); + int fifo = INFF_FWS_FIFO_BCMC; + bool multicast = is_multicast_ether_addr(eh->h_dest); + int rc = 0; + + inff_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); + + /* set control buffer information */ + skcb->if_flags = 0; + skcb->state = INFF_FWS_SKBSTATE_NEW; + inff_skb_if_flags_set_field(skb, INDEX, ifp->ifidx); + + /* mapping from 802.1d priority to firmware fifo index */ + if (!multicast) + fifo = inff_map_prio_to_aci(drvr->config, skb->priority); + + inff_fws_lock(fws); + if (fifo != INFF_FWS_FIFO_AC_BE && fifo < INFF_FWS_FIFO_BCMC) + fws->borrow_defer_timestamp = jiffies + + INFF_FWS_BORROW_DEFER_PERIOD; + + skcb->mac = inff_fws_macdesc_find(fws, ifp, eh->h_dest); + inff_dbg(DATA, "%s mac %pM multi %d fifo %d\n", skcb->mac->name, + eh->h_dest, multicast, fifo); + if (!inff_fws_assign_htod(fws, skb, fifo)) { + inff_fws_enq(fws, INFF_FWS_SKBSTATE_DELAYED, fifo, skb); + inff_fws_schedule_deq(fws); + } else { + iphy_err(drvr, "no hanger slot available\n"); + rc = -ENOMEM; + } + inff_fws_unlock(fws); + + return rc; +} + +void inff_fws_reset_interface(struct inff_if *ifp) +{ + struct inff_fws_mac_descriptor *entry = ifp->fws_desc; + + inff_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); + if (!entry) + return; + + inff_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); +} + +void inff_fws_add_interface(struct inff_if *ifp) +{ + struct inff_fws_info *fws = drvr_to_fws(ifp->drvr); + struct inff_fws_mac_descriptor *entry; + + if (!ifp->ndev || !inff_fws_queue_skbs(fws)) + return; + + entry = &fws->desc.iface[ifp->ifidx]; + ifp->fws_desc = entry; + inff_fws_macdesc_init(entry, ifp->mac_addr, ifp->ifidx); + inff_fws_macdesc_set_name(fws, entry); + inff_pktq_init(&entry->psq, + INFF_FWS_PSQ_PREC_COUNT, + fws->fws_psq_len); + inff_dbg(TRACE, "added %s\n", entry->name); +} + +void inff_fws_del_interface(struct inff_if *ifp) +{ + struct inff_fws_mac_descriptor *entry = ifp->fws_desc; + struct inff_fws_info *fws = drvr_to_fws(ifp->drvr); + + if (!entry) + return; + + inff_fws_lock(fws); + ifp->fws_desc = NULL; + inff_dbg(TRACE, "deleting %s\n", entry->name); + inff_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx], + ifp->ifidx); + inff_fws_macdesc_deinit(entry); + inff_fws_cleanup(fws, ifp->ifidx); + inff_fws_unlock(fws); +} + +static void inff_fws_dequeue_worker(struct work_struct *worker) +{ + struct inff_fws_info *fws; + struct inff_pub *drvr; + struct sk_buff *skb; + int fifo; + u32 hslot; + u32 ifidx; + int ret; + u32 highest_lender = 0; + + fws = container_of(worker, struct inff_fws_info, fws_dequeue_work); + drvr = fws->drvr; + + inff_fws_lock(fws); + for (fifo = INFF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked; + fifo--) { + if (!inff_fws_fc_active(fws)) { + while ((skb = inff_fws_deq(fws, fifo)) != NULL) { + hslot = inff_skb_htod_tag_get_field(skb, + HSLOT); + inff_fws_hanger_poppkt(&fws->hanger, hslot, + &skb, true); + ifidx = inff_skb_if_flags_get_field(skb, + INDEX); + /* Use proto layer to send data frame */ + inff_fws_unlock(fws); + ret = inff_proto_txdata(drvr, ifidx, 0, skb); + inff_fws_lock(fws); + if (ret < 0) + inff_txfinalize(inff_get_ifp(drvr, + ifidx), + skb, false); + if (fws->bus_flow_blocked) + break; + } + continue; + } + + while ((fws->fifo_credit[fifo]) || + ((!fws->bcmc_credit_check) && + (fifo == INFF_FWS_FIFO_BCMC))) { + skb = inff_fws_deq(fws, fifo); + if (!skb) + break; + fws->fifo_credit[fifo]--; + if (inff_fws_commit_skb(fws, fifo, skb)) + break; + if (fws->bus_flow_blocked) + break; + } + + if (fifo >= INFF_FWS_FIFO_AC_BK && + fifo <= INFF_FWS_FIFO_AC_VO && + fws->fifo_credit[fifo] == 0 && + !fws->bus_flow_blocked) { + highest_lender = fifo - 1; + + /* Borrow Credit for BK access category from Higer AC queues */ + if (fifo == INFF_FWS_FIFO_AC_BK) + highest_lender = INFF_FWS_FIFO_AC_BE; + + while (inff_fws_borrow_credit(fws, highest_lender, + fifo, true) == 0) { + skb = inff_fws_deq(fws, fifo); + if (!skb) { + inff_fws_return_credits(fws, fifo, 1); + break; + } + if (inff_fws_commit_skb(fws, fifo, skb)) + break; + if (fws->bus_flow_blocked) + break; + } + } + } + inff_fws_unlock(fws); +} + +#ifdef DEBUG +static int inff_debugfs_fws_stats_read(struct seq_file *seq, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(seq->private); + struct inff_fws_info *fws = drvr_to_fws(bus_if->drvr); + struct inff_fws_stats *fwstats = &fws->stats; + + seq_printf(seq, + "header_pulls: %8u\t" + "header_only_pkt: %8u\n" + "tlv_parse_failed:%8u\t" + "tlv_invalid_type:%8u\n" + "mac_update_fails:%8u\t" + "ps_update_fails: %8u\t" + "if_update_fails: %8u\n" + "pkt2bus: %8u\t" + "generic_error: %8u\n" + "rollback_success:%8u\t" + "rollback_failed: %8u\n" + "delayq_full: %8u\t" + "supprq_full: %8u\n" + "txs_indicate: %8u\t" + "txs_discard: %8u\n" + "txs_suppr_core: %8u\t" + "txs_suppr_ps: %8u\n" + "txs_tossed: %8u\t" + "txs_host_tossed: %8u\n" + "bus_flow_block: %8u\t" + "fws_flow_block: %8u\n", + fwstats->header_pulls, + fwstats->header_only_pkt, + fwstats->tlv_parse_failed, + fwstats->tlv_invalid_type, + fwstats->mac_update_failed, + fwstats->mac_ps_update_failed, + fwstats->if_update_failed, + fwstats->pkt2bus, + fwstats->generic_error, + fwstats->rollback_success, + fwstats->rollback_failed, + fwstats->delayq_full_error, + fwstats->supprq_full_error, + fwstats->txs_indicate, + fwstats->txs_discard, + fwstats->txs_supp_core, + fwstats->txs_supp_ps, + fwstats->txs_tossed, + fwstats->txs_host_tossed, + fwstats->bus_flow_block, + fwstats->fws_flow_block); + + seq_printf(seq, + "receive error: %8u\t" + "cleanup if: %8u\n\n", + fwstats->cnt_recv_err, + fwstats->cnt_cleanup_if); + + seq_printf(seq, + "send_pkts: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n" + "requested_sent: BK:%u BE:%u VO:%u VI:%u BCMC:%u\n\n", + fwstats->send_pkts[0], fwstats->send_pkts[1], + fwstats->send_pkts[2], fwstats->send_pkts[3], + fwstats->send_pkts[4], + fwstats->requested_sent[0], + fwstats->requested_sent[1], + fwstats->requested_sent[2], + fwstats->requested_sent[3], + fwstats->requested_sent[4]); + + return 0; +} +#else +static int inff_debugfs_fws_stats_read(struct seq_file *seq, void *data) +{ + return 0; +} +#endif + +struct inff_fws_info *inff_fws_attach(struct inff_pub *drvr) +{ + struct inff_fws_info *fws; + struct inff_if *ifp; + u32 tlv = INFF_FWS_FLAGS_RSSI_SIGNALS; + int rc; + u32 mode; + + fws = kzalloc(sizeof(*fws), GFP_KERNEL); + if (!fws) { + rc = -ENOMEM; + goto fail; + } + + spin_lock_init(&fws->spinlock); + + /* store drvr reference */ + fws->drvr = drvr; + fws->fcmode = drvr->settings->fcmode; + + if (drvr->settings->short_psq) { + fws->fws_psq_len = INFF_FWS_SHQUEUE_PSQ_LEN; + fws->fws_psq_hi_water = INFF_FWS_FLOWCONTROL_SHQUEUE_HIWATER; + fws->fws_psq_low_water = INFF_FWS_FLOWCONTROL_SHQUEUE_LOWATER; + } else { + fws->fws_psq_len = INFF_FWS_PSQ_LEN; + fws->fws_psq_hi_water = INFF_FWS_FLOWCONTROL_HIWATER; + fws->fws_psq_low_water = INFF_FWS_FLOWCONTROL_LOWATER; + } + + if (!drvr->bus_if->always_use_fws_queue && + fws->fcmode == INFF_FWS_FCMODE_NONE) { + fws->avoid_queueing = true; + inff_dbg(INFO, "FWS queueing will be avoided\n"); + return fws; + } + + fws->fws_wq = create_singlethread_workqueue("inff_fws_wq"); + if (!fws->fws_wq) { + iphy_err(drvr, "workqueue creation failed\n"); + rc = -EBADF; + goto fail; + } + INIT_WORK(&fws->fws_dequeue_work, inff_fws_dequeue_worker); + + /* enable firmware signalling if fcmode active */ + if (fws->fcmode != INFF_FWS_FCMODE_NONE) + tlv |= INFF_FWS_FLAGS_XONXOFF_SIGNALS | + INFF_FWS_FLAGS_CREDIT_STATUS_SIGNALS | + INFF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE | + INFF_FWS_FLAGS_HOST_RXREORDER_ACTIVE; + + rc = inff_fweh_register(drvr, INFF_E_FIFO_CREDIT_MAP, + inff_fws_notify_credit_map); + if (rc < 0) { + iphy_err(drvr, "register credit map handler failed\n"); + goto fail; + } + rc = inff_fweh_register(drvr, INFF_E_BCMC_CREDIT_SUPPORT, + inff_fws_notify_bcmc_credit_support); + if (rc < 0) { + iphy_err(drvr, "register bcmc credit handler failed\n"); + inff_fweh_unregister(drvr, INFF_E_FIFO_CREDIT_MAP); + goto fail; + } + + /* Setting the iovar may fail if feature is unsupported + * so leave the rc as is so driver initialization can + * continue. Set mode back to none indicating not enabled. + */ + fws->fw_signals = true; + ifp = inff_get_ifp(drvr, 0); + if (inff_fil_iovar_int_set(ifp, "tlv", tlv)) { + iphy_err(drvr, "failed to set bdcv2 tlv signaling\n"); + fws->fcmode = INFF_FWS_FCMODE_NONE; + fws->fw_signals = false; + } + + if (inff_fil_iovar_int_set(ifp, "ampdu_hostreorder", 1)) + inff_dbg(INFO, "enabling AMPDU host-reorder failed\n"); + + /* Enable seq number reuse, if supported */ + if (inff_fil_iovar_int_get(ifp, "wlfc_mode", &mode) == 0) { + if (INFF_FWS_MODE_GET_REUSESEQ(mode)) { + mode = 0; + INFF_FWS_MODE_SET_REUSESEQ(mode, 1); + if (inff_fil_iovar_int_set(ifp, + "wlfc_mode", mode) == 0) { + INFF_FWS_MODE_SET_REUSESEQ(fws->mode, 1); + } + } + } + + inff_fws_hanger_init(&fws->hanger); + inff_fws_macdesc_init(&fws->desc.other, NULL, 0); + inff_fws_macdesc_set_name(fws, &fws->desc.other); + inff_dbg(INFO, "added %s\n", fws->desc.other.name); + inff_pktq_init(&fws->desc.other.psq, + INFF_FWS_PSQ_PREC_COUNT, + fws->fws_psq_len); + + inff_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n", + fws->fw_signals ? "enabled" : "disabled", tlv); + return fws; + +fail: + inff_fws_detach(fws); + return ERR_PTR(rc); +} + +void inff_fws_detach(struct inff_fws_info *fws) +{ + if (!fws) + return; + + if (fws->fws_wq) + destroy_workqueue(fws->fws_wq); + + /* cleanup */ + inff_fws_lock(fws); + inff_fws_cleanup(fws, -1); + inff_fws_unlock(fws); + + /* free top structure */ + kfree(fws); +} + +void inff_fws_debugfs_create(struct inff_pub *drvr) +{ + /* create debugfs file for statistics */ + inff_debugfs_add_entry(drvr, "fws_stats", + inff_debugfs_fws_stats_read); +} + +bool inff_fws_queue_skbs(struct inff_fws_info *fws) +{ + return !fws->avoid_queueing; +} + +bool inff_fws_fc_active(struct inff_fws_info *fws) +{ + if (!fws->creditmap_received) + return false; + + return fws->fcmode != INFF_FWS_FCMODE_NONE; +} + +void inff_fws_bustxcomplete(struct inff_fws_info *fws, struct sk_buff *skb, + bool success) +{ + u32 hslot; + + if (inff_skbcb(skb)->state == INFF_FWS_SKBSTATE_TIM) { + inff_pkt_buf_free_skb(skb); + return; + } + + if (!success) { + inff_fws_lock(fws); + hslot = inff_skb_htod_tag_get_field(skb, HSLOT); + inff_fws_txs_process(fws, INFF_FWS_TXSTATUS_HOST_TOSSED, hslot, + 0, 0, 1); + inff_fws_unlock(fws); + } +} + +void inff_fws_bus_blocked(struct inff_pub *drvr, bool flow_blocked) +{ + struct inff_fws_info *fws = drvr_to_fws(drvr); + struct inff_if *ifp; + int i; + + if (fws->avoid_queueing) { + for (i = 0; i < INFF_MAX_IFS; i++) { + ifp = drvr->iflist[i]; + if (!ifp || !ifp->ndev) + continue; + inff_txflowblock_if(ifp, INFF_NETIF_STOP_REASON_FLOW, + flow_blocked); + } + } else { + fws->bus_flow_blocked = flow_blocked; + if (!flow_blocked) + inff_fws_schedule_deq(fws); + else + fws->stats.bus_flow_block++; + } +} + +void inff_fws_cleanup_interface(struct inff_if *ifp) +{ + struct inff_fws_info *fws = drvr_to_fws(ifp->drvr); + struct inff_fws_mac_descriptor *entry; + struct inff_fws_mac_descriptor *table; + bool (*matchfn)(struct sk_buff *, void *) = inff_fws_ifidx_match; + int ifidx = ifp->ifidx; + int i; + + if (!fws->sdio_recv_error) + return; + + inff_dbg(SDIO, "Enter\n"); + + inff_fws_lock(fws); + + fws->stats.cnt_cleanup_if++; + fws->sdio_recv_error = false; + + entry = &fws->desc.iface[ifidx]; + inff_dbg(SDIO, "iface[%d] mac %pM if %d psq len %d\n", + ifidx, entry->ea, entry->interface_id, entry->psq.len); + + /* cleanup interface */ + inff_fws_psq_flush(fws, &entry->psq, ifidx); + inff_fws_macdesc_reset(entry); + + /* cleanup individual nodes */ + table = &fws->desc.nodes[0]; + for (i = 0; i < ARRAY_SIZE(fws->desc.nodes); i++) + inff_fws_macdesc_cleanup(fws, &table[i], ifidx); + + /* cleanup txq and hanger */ + inff_fws_bus_txq_cleanup(fws, matchfn, ifidx); + inff_fws_hanger_cleanup(fws, matchfn, ifidx); + + inff_fws_unlock(fws); +} diff --git a/drivers/net/wireless/infineon/inffmac/fwsignal.h b/drivers/net/wireless/infineon/inffmac/fwsignal.h new file mode 100644 index 000000000000..f0c59623cd09 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/fwsignal.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_FWSIGNAL_H +#define INFF_FWSIGNAL_H + +/** + * enum inff_fws_fifo - fifo indices used by dongle firmware. + * + * @INFF_FWS_FIFO_FIRST: first fifo, ie. background. + * @INFF_FWS_FIFO_AC_BK: fifo for background traffic. + * @INFF_FWS_FIFO_AC_BE: fifo for best-effort traffic. + * @INFF_FWS_FIFO_AC_VI: fifo for video traffic. + * @INFF_FWS_FIFO_AC_VO: fifo for voice traffic. + * @INFF_FWS_FIFO_BCMC: fifo for broadcast/multicast (AP only). + * @INFF_FWS_FIFO_ATIM: fifo for ATIM (AP only). + * @INFF_FWS_FIFO_COUNT: number of fifos. + */ +enum inff_fws_fifo { + INFF_FWS_FIFO_FIRST, + INFF_FWS_FIFO_AC_BK = INFF_FWS_FIFO_FIRST, + INFF_FWS_FIFO_AC_BE, + INFF_FWS_FIFO_AC_VI, + INFF_FWS_FIFO_AC_VO, + INFF_FWS_FIFO_BCMC, + INFF_FWS_FIFO_ATIM, + INFF_FWS_FIFO_COUNT +}; + +struct inff_fws_info *inff_fws_attach(struct inff_pub *drvr); +void inff_fws_detach(struct inff_fws_info *fws); +void inff_fws_debugfs_create(struct inff_pub *drvr); +bool inff_fws_queue_skbs(struct inff_fws_info *fws); +bool inff_fws_fc_active(struct inff_fws_info *fws); +void inff_fws_hdrpull(struct inff_if *ifp, s16 siglen, struct sk_buff *skb); +int inff_fws_process_skb(struct inff_if *ifp, struct sk_buff *skb); + +void inff_fws_reset_interface(struct inff_if *ifp); +void inff_fws_add_interface(struct inff_if *ifp); +void inff_fws_del_interface(struct inff_if *ifp); +void inff_fws_bustxcomplete(struct inff_fws_info *fws, struct sk_buff *skb, + bool success); +void inff_fws_bus_blocked(struct inff_pub *drvr, bool flow_blocked); +void inff_fws_rxreorder(struct inff_if *ifp, struct sk_buff *skb, bool inirq); +void inff_fws_recv_err(struct inff_pub *drvr); +void inff_fws_cleanup_interface(struct inff_if *ifp); + +#endif /* INFF_FWSIGNAL_H */ -- 2.25.1 Driver imeplementation for packing the data from the Host Driver to the Device firmware in the eXtended Type-Length-Value (XTLV) Format. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/xtlv.c | 106 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/xtlv.h | 37 +++++++ 2 files changed, 143 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/xtlv.c create mode 100644 drivers/net/wireless/infineon/inffmac/xtlv.h diff --git a/drivers/net/wireless/infineon/inffmac/xtlv.c b/drivers/net/wireless/infineon/inffmac/xtlv.c new file mode 100644 index 000000000000..05983783d453 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/xtlv.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2019 Broadcom + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "xtlv.h" + +static int inff_xtlv_header_size(u16 opts) +{ + int len = (int)offsetof(struct inff_xtlv, data); + + if (opts & INFF_XTLV_OPTION_IDU8) + --len; + if (opts & INFF_XTLV_OPTION_LENU8) + --len; + + return len; +} + +int inff_xtlv_data_size(int dlen, u16 opts) +{ + int hsz; + + hsz = inff_xtlv_header_size(opts); + if (opts & INFF_XTLV_OPTION_ALIGN32) + return roundup(dlen + hsz, 4); + + return dlen + hsz; +} + +void inff_xtlv_pack_header(struct inff_xtlv *xtlv, u16 id, u16 len, + const u8 *data, u16 opts) +{ + u8 *data_buf; + u16 mask = INFF_XTLV_OPTION_IDU8 | INFF_XTLV_OPTION_LENU8; + + if (!(opts & mask)) { + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + sizeof(xtlv->id); + + put_unaligned_le16(id, idp); + put_unaligned_le16(len, lenp); + data_buf = lenp + sizeof(u16); + } else if ((opts & mask) == mask) { /* u8 id and u8 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + 1; + + *idp = (u8)id; + *lenp = (u8)len; + data_buf = lenp + sizeof(u8); + } else if (opts & INFF_XTLV_OPTION_IDU8) { /* u8 id, u16 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + 1; + + *idp = (u8)id; + put_unaligned_le16(len, lenp); + data_buf = lenp + sizeof(u16); + } else if (opts & INFF_XTLV_OPTION_LENU8) { /* u16 id, u8 len */ + u8 *idp = (u8 *)xtlv; + u8 *lenp = idp + sizeof(u16); + + put_unaligned_le16(id, idp); + *lenp = (u8)len; + data_buf = lenp + sizeof(u8); + } else { + WARN(true, "Unexpected xtlv option"); + return; + } + + if (opts & INFF_XTLV_OPTION_LENU8) { + WARN_ON(len > 0x00ff); + len &= 0xff; + } + + if (data) + memcpy(data_buf, data, len); +} + +u32 inff_pack_xtlv(u16 id, char *data, u32 len, + char **buf, u16 *buflen) +{ + u32 iolen; + + iolen = inff_xtlv_data_size(len, INFF_XTLV_OPTION_ALIGN32); + + if (iolen > *buflen) { + WARN(true, "xtlv buffer is too short"); + return 0; + } + + inff_xtlv_pack_header((void *)*buf, id, len, data, + INFF_XTLV_OPTION_ALIGN32); + + *buf = *buf + iolen; + *buflen -= iolen; + return iolen; +} diff --git a/drivers/net/wireless/infineon/inffmac/xtlv.h b/drivers/net/wireless/infineon/inffmac/xtlv.h new file mode 100644 index 000000000000..eb7564f608ed --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/xtlv.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2019 Broadcom + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_XTLV_H +#define INFF_XTLV_H + +#include +#include + +/* inf type(id), length, value with w/16 bit id/len. The structure below + * is nominal, and is used to support variable length id and type. See + * xtlv options below. + */ +struct inff_xtlv { + u16 id; + u16 len; + u8 data[]; +}; + +enum inff_xtlv_option { + INFF_XTLV_OPTION_ALIGN32 = BIT(0), + INFF_XTLV_OPTION_IDU8 = BIT(1), + INFF_XTLV_OPTION_LENU8 = BIT(2), +}; + +int inff_xtlv_data_size(int dlen, u16 opts); +void inff_xtlv_pack_header(struct inff_xtlv *xtlv, u16 id, u16 len, + const u8 *data, u16 opts); +u32 inff_pack_xtlv(u16 id, char *data, u32 len, + char **buf, u16 *buflen); + +#endif /* INFF_XTLV_H */ -- 2.25.1 Driver main file to initialize and De-initialize the Kernel module in the kernel. Also maintains some of the common module param registrations. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/common.c | 765 ++++++++++++++++++ .../net/wireless/infineon/inffmac/common.h | 140 ++++ 2 files changed, 905 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/common.c create mode 100644 drivers/net/wireless/infineon/inffmac/common.h diff --git a/drivers/net/wireless/infineon/inffmac/common.c b/drivers/net/wireless/infineon/inffmac/common.c new file mode 100644 index 000000000000..d4cbb8b5bb12 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/common.c @@ -0,0 +1,765 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "chanspec.h" +#include "utils.h" +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "tracepoint.h" +#include "common.h" +#include "firmware.h" +#include "dfu.h" +#include "chip.h" +#include "defs.h" +#include "fweh.h" +#include "hw_ids.h" +#include "pcie.h" +#include "sdio.h" +#include "offload.h" + +MODULE_AUTHOR("Infineon Technologies AG"); +MODULE_DESCRIPTION("Infineon 802.11 wireless LAN fullmac driver."); +MODULE_LICENSE("Dual BSD/GPL"); + +#define INFF_DEFAULT_SCAN_CHANNEL_TIME 40 +#define INFF_DEFAULT_SCAN_UNASSOC_TIME 40 + +/* default boost value for RSSI_DELTA in preferred join selection */ +#define INFF_JOIN_PREF_RSSI_BOOST 8 + +#define INFF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */ + +static int inff_sdiod_txglomsz = INFF_DEFAULT_TXGLOM_SIZE; +module_param_named(txglomsz, inff_sdiod_txglomsz, int, 0); +MODULE_PARM_DESC(txglomsz, "Maximum tx packet chain size [SDIO]"); + +/* Debug level configuration. See debug.h for bits, sysfs modifiable */ +int inff_msg_level; +module_param_named(debug, inff_msg_level, int, 0600); +MODULE_PARM_DESC(debug, "Level of debug output"); + +static int inff_p2p_enable; +module_param_named(p2pon, inff_p2p_enable, int, 0); +MODULE_PARM_DESC(p2pon, "Enable legacy p2p management functionality"); + +static u8 inff_feature_disable[INFF_MAX_FEATURE_BYTES] = {0}; +static int inff_feature_disable_size; +module_param_array_named(feature_disable, inff_feature_disable, byte, + &inff_feature_disable_size, 0644); +MODULE_PARM_DESC(feature_disable, "Disable features (bitmap)"); + +static char inff_firmware_path[INFF_FW_ALTPATH_LEN]; +module_param_string(alternative_fw_path, inff_firmware_path, + INFF_FW_ALTPATH_LEN, 0400); +MODULE_PARM_DESC(alternative_fw_path, "Alternative firmware path"); + +static int inff_fcmode = 2; +module_param_named(fcmode, inff_fcmode, int, 0); +MODULE_PARM_DESC(fcmode, "Mode of firmware signalled flow control"); + +static int inff_roamoff; +module_param_named(roamoff, inff_roamoff, int, 0400); +MODULE_PARM_DESC(roamoff, + "Do not use fw roaming engine: 0=use fw_roam, 1=fw_roam off & report BCNLOST_MSG, 2=fw_roam off & report DISCONNECTED"); + +static int inff_iapp_enable; +module_param_named(iapp, inff_iapp_enable, int, 0); +MODULE_PARM_DESC(iapp, "Enable partial support for the obsoleted Inter-Access Point Protocol"); + +static int inff_eap_restrict; +module_param_named(eap_restrict, inff_eap_restrict, int, 0400); +MODULE_PARM_DESC(eap_restrict, "Block non-802.1X frames until auth finished"); + +static int inff_max_pm; +module_param_named(max_pm, inff_max_pm, int, 0); +MODULE_PARM_DESC(max_pm, "Use max power management mode by default"); + +int inff_pkt_prio_enable; +module_param_named(pkt_prio, inff_pkt_prio_enable, int, 0); +MODULE_PARM_DESC(pkt_prio, "Support for update the packet priority"); + +#ifdef DEBUG +/* always succeed inff_bus_started() */ +static int inff_ignore_probe_fail; +module_param_named(ignore_probe_fail, inff_ignore_probe_fail, int, 0); +MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); +#endif + +static int inff_fw_ap_select; +module_param_named(fw_ap_select, inff_fw_ap_select, int, 0400); +MODULE_PARM_DESC(fw_ap_select, "Allow FW for AP selection"); + +static int inff_disable_6ghz; +module_param_named(disable_6ghz, inff_disable_6ghz, int, 0400); +MODULE_PARM_DESC(disable_6ghz, "Disable 6GHz Operation"); + +static int inff_sdio_in_isr; +module_param_named(sdio_in_isr, inff_sdio_in_isr, int, 0400); +MODULE_PARM_DESC(sdio_in_isr, "Handle SDIO DPC in ISR"); + +static int inff_sdio_rxf_in_kthread; +module_param_named(sdio_rxf_thread, inff_sdio_rxf_in_kthread, int, 0400); +MODULE_PARM_DESC(sdio_rxf_thread, "SDIO RX Frame in Kthread"); + +static int inff_bt_over_sdio; +module_param_named(bt_over_sdio, inff_bt_over_sdio, int, 0); +MODULE_PARM_DESC(bt_over_sdio, "Enable BT over SDIO"); + +static int inff_sdio_idleclk_disable = INFFMAC_AUTO; +module_param_named(sdio_idleclk_disable, inff_sdio_idleclk_disable, int, 0644); +MODULE_PARM_DESC(sdio_idleclk_disable, "Disable SDIO idle clock"); + +static int inff_sdio_bus_idle_time = INFF_DEFAULT_SDIO_IDLE_CONFIG; +module_param_named(bus_idle_time, inff_sdio_bus_idle_time, int, 0644); +MODULE_PARM_DESC(bus_idle_time, "Config SDIO bus idle time"); + +static int inff_short_psq; +module_param_named(short_psq, inff_short_psq, int, 0); +MODULE_PARM_DESC(short_psq, "Use shorter PS Queue"); + +static int inff_tx_cpu = -1; +module_param_named(tx_cpu, inff_tx_cpu, int, 0644); +MODULE_PARM_DESC(tx_cpu, "CPU affinity for TX"); + +static int inff_napi_cpu = -1; +module_param_named(napi_cpu, inff_napi_cpu, int, 0644); +MODULE_PARM_DESC(napi_cpu, "CPU affinity for NAPI"); + +static int inff_napi_enable; +module_param_named(napi_enable, inff_napi_enable, int, 0644); +MODULE_PARM_DESC(napi_enable, "Enable NAPI"); + +static int inff_commonring_depth[5] = {0}; +module_param_array_named(commonring_depth, inff_commonring_depth, + int, NULL, 0644); +MODULE_PARM_DESC(commonring_depth, "PCIe commonrings (CTRL SUB, RXPOST SUB, CTRL CMPL, TX CMPL, RX CMPL) depth"); + +static int inff_flowring_depth; +module_param_named(flowring_depth, inff_flowring_depth, int, 0644); +MODULE_PARM_DESC(flowring_depth, "PCIe flowrings TX SUB depth"); + +static int inff_logring_depth = INFF_LOGRING_DEPTH_DEFAULT; +module_param_named(logring_depth, inff_logring_depth, int, 0); +MODULE_PARM_DESC(logring_depth, "Debug log ring depth"); + +static int inff_logger_level = INFF_LOGGER_LEVEL_DEFAULT; +module_param_named(logger_level, inff_logger_level, int, 0); +MODULE_PARM_DESC(logger_level, "Debug logger level"); + +static int inff_reboot_callback(struct notifier_block *this, unsigned long code, void *unused); +static struct notifier_block inff_reboot_notifier = { + .notifier_call = inff_reboot_callback, + .priority = 1, +}; + +void inff_c_set_joinpref_default(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_join_pref_params join_pref_params[2]; + int err; + + /* Setup join_pref to select target by RSSI (boost on 5GHz) */ + join_pref_params[0].type = INFF_JOIN_PREF_RSSI_DELTA; + join_pref_params[0].len = 2; + join_pref_params[0].rssi_gain = INFF_JOIN_PREF_RSSI_BOOST; + join_pref_params[0].band = WLC_BAND_5G; + + join_pref_params[1].type = INFF_JOIN_PREF_RSSI; + join_pref_params[1].len = 2; + join_pref_params[1].rssi_gain = 0; + join_pref_params[1].band = 0; + err = inff_fil_iovar_data_set(ifp, "join_pref", join_pref_params, + sizeof(join_pref_params)); + if (err) + iphy_err(drvr, "Set join_pref error (%d)\n", err); +} + +static int inff_c_download(struct inff_if *ifp, u16 flag, + struct inff_dload_data_le *dload_buf, + u32 len) +{ + s32 err; + + flag |= (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT); + dload_buf->flag = cpu_to_le16(flag); + dload_buf->dload_type = cpu_to_le16(DL_TYPE_CLM); + dload_buf->len = cpu_to_le32(len); + dload_buf->crc = cpu_to_le32(0); + + err = inff_fil_iovar_data_set(ifp, "clmload", dload_buf, + struct_size(dload_buf, data, len)); + + return err; +} + +static int inff_c_process_clm_blob(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_bus *bus = drvr->bus_if; + struct inff_dload_data_le *chunk_buf; + const struct firmware *clm = NULL; + u32 chunk_len; + u32 datalen; + u32 cumulative_len; + u16 dl_flag = DL_BEGIN; + u32 status; + s32 err; + + inff_dbg(TRACE, "Enter\n"); + + err = inff_bus_get_blob(bus, &clm, INFF_BLOB_CLM); + if (err || !clm) { + inff_info("no clm_blob available (err=%d), device may have limited channels available\n", + err); + return 0; + } + + chunk_buf = kzalloc(struct_size(chunk_buf, data, MAX_CHUNK_LEN), + GFP_KERNEL); + if (!chunk_buf) { + err = -ENOMEM; + goto done; + } + + datalen = clm->size; + cumulative_len = 0; + do { + if (datalen > MAX_CHUNK_LEN) { + chunk_len = MAX_CHUNK_LEN; + } else { + chunk_len = datalen; + dl_flag |= DL_END; + } + memcpy(chunk_buf->data, clm->data + cumulative_len, chunk_len); + + err = inff_c_download(ifp, dl_flag, chunk_buf, chunk_len); + + dl_flag &= ~DL_BEGIN; + + cumulative_len += chunk_len; + datalen -= chunk_len; + } while ((datalen > 0) && (err == 0)); + + if (err) { + iphy_err(drvr, "clmload (%zu byte file) failed (%d)\n", + clm->size, err); + /* Retrieve clmload_status and print */ + err = inff_fil_iovar_int_get(ifp, "clmload_status", &status); + if (err) + iphy_err(drvr, "get clmload_status failed (%d)\n", err); + else + inff_dbg(INFO, "clmload_status=%d\n", status); + err = -EIO; + } + + kfree(chunk_buf); +done: + release_firmware(clm); + return err; +} + +int inff_c_set_cur_etheraddr(struct inff_if *ifp, const u8 *addr) +{ + s32 err; + + err = inff_fil_iovar_data_set(ifp, "cur_etheraddr", addr, ETH_ALEN); + if (err < 0) + iphy_err(ifp->drvr, "Setting cur_etheraddr failed, %d\n", err); + + return err; +} + +/* On some boards there is no eeprom to hold the nvram, in this case instead + * a board specific nvram is loaded from /lib/firmware. On most boards the + * macaddr setting in the /lib/firmware nvram file is ignored because the + * wifibt chip has a unique MAC programmed into the chip itself. + * But in some cases the actual MAC from the /lib/firmware nvram file gets + * used, leading to MAC conflicts. + * The MAC addresses in the troublesome nvram files seem to all come from + * the same nvram file template, so we only need to check for 1 known + * address to detect this. + */ +static const u8 inff_default_mac_address[ETH_ALEN] = { + 0x00, 0x90, 0x4c, 0xc5, 0x12, 0x38 +}; + +int inff_c_preinit_dcmds(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + s8 eventmask[INFF_EVENTING_MASK_LEN]; + u8 buf[INFF_DCMD_SMLEN]; + struct inff_bus *bus; + struct inff_rev_info_le revinfo; + struct inff_rev_info *ri; + struct inff_wlc_version_le wlc_ver; + char *clmver; + char *ptr; + s32 err; + struct eventmsgs_ext *eventmask_msg = NULL; + u8 msglen; + + if (is_valid_ether_addr(ifp->mac_addr)) { + /* set mac address */ + err = inff_c_set_cur_etheraddr(ifp, ifp->mac_addr); + if (err < 0) + goto done; + } else { + /* retrieve mac address */ + err = inff_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, + sizeof(ifp->mac_addr)); + if (err < 0) { + iphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err); + goto done; + } + + if (ether_addr_equal_unaligned(ifp->mac_addr, inff_default_mac_address)) { + iphy_err(drvr, "Default MAC is used, replacing with random MAC to avoid conflicts\n"); + eth_random_addr(ifp->mac_addr); + ifp->ndev->addr_assign_type = NET_ADDR_RANDOM; + err = inff_c_set_cur_etheraddr(ifp, ifp->mac_addr); + if (err < 0) + goto done; + } + } + + memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); + memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); + + bus = ifp->drvr->bus_if; + ri = &ifp->drvr->revinfo; + + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_REVINFO, + &revinfo, sizeof(revinfo)); + if (err < 0) { + iphy_err(drvr, "retrieving revision info failed, %d\n", err); + strscpy(ri->chipname, "UNKNOWN", sizeof(ri->chipname)); + } else { + ri->vendorid = le32_to_cpu(revinfo.vendorid); + ri->deviceid = le32_to_cpu(revinfo.deviceid); + ri->radiorev = le32_to_cpu(revinfo.radiorev); + ri->corerev = le32_to_cpu(revinfo.corerev); + ri->boardid = le32_to_cpu(revinfo.boardid); + ri->boardvendor = le32_to_cpu(revinfo.boardvendor); + ri->boardrev = le32_to_cpu(revinfo.boardrev); + ri->driverrev = le32_to_cpu(revinfo.driverrev); + ri->ucoderev = le32_to_cpu(revinfo.ucoderev); + ri->bus = le32_to_cpu(revinfo.bus); + ri->phytype = le32_to_cpu(revinfo.phytype); + ri->phyrev = le32_to_cpu(revinfo.phyrev); + ri->anarev = le32_to_cpu(revinfo.anarev); + ri->chippkg = le32_to_cpu(revinfo.chippkg); + ri->nvramrev = le32_to_cpu(revinfo.nvramrev); + + /* use revinfo if not known yet */ + if (!bus->chip) { + bus->chip = le32_to_cpu(revinfo.chipnum); + bus->chiprev = le32_to_cpu(revinfo.chiprev); + } + } + ri->result = err; + + if (bus->chip) + inff_chip_name(bus->chip, bus->chiprev, + ri->chipname, sizeof(ri->chipname)); + + /* Do any CLM downloading */ + err = inff_c_process_clm_blob(ifp); + if (err < 0) { + iphy_err(drvr, "download CLM blob file failed, %d\n", err); + goto done; + } + + /* query for 'ver' to get version info from firmware */ + memset(buf, 0, sizeof(buf)); + err = inff_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); + if (err < 0) { + iphy_err(drvr, "Retrieving version information failed, %d\n", + err); + goto done; + } + buf[sizeof(buf) - 1] = '\0'; + ptr = (char *)buf; + strsep(&ptr, "\n"); + + /* Print fw version info */ + inff_info("Firmware: %s %s\n", ri->chipname, buf); + + /* locate firmware version number for ethtool */ + ptr = strrchr(buf, ' '); + if (!ptr) { + iphy_err(drvr, "Retrieving version number failed"); + goto done; + } + strscpy(ifp->drvr->fwver, ptr + 1, sizeof(ifp->drvr->fwver)); + + /* Get wlc interface version, set to 0 for legacy chip + * that is not supporting wlc_ver iovar + */ + err = inff_fil_iovar_data_get(ifp, "wlc_ver", &wlc_ver, sizeof(wlc_ver)); + if (err < 0) { + ifp->drvr->wlc_ver.wlc_ver_major = 0; + ifp->drvr->wlc_ver.wlc_ver_minor = 0; + } else { + ifp->drvr->wlc_ver.wlc_ver_major = le16_to_cpu(wlc_ver.wlc_ver_major); + ifp->drvr->wlc_ver.wlc_ver_minor = le16_to_cpu(wlc_ver.wlc_ver_minor); + } + inff_dbg(TRACE, "wlc interface version, major=%d, minor=%d\n", + ifp->drvr->wlc_ver.wlc_ver_major, + ifp->drvr->wlc_ver.wlc_ver_minor); + + /* Query for 'clmver' to get CLM version info from firmware */ + memset(buf, 0, sizeof(buf)); + err = inff_fil_iovar_data_get(ifp, "clmver", buf, sizeof(buf)); + if (err) { + inff_dbg(TRACE, "retrieving clmver failed, %d\n", err); + } else { + buf[sizeof(buf) - 1] = '\0'; + clmver = (char *)buf; + + /* Replace all newline/linefeed characters with space + * character + */ + strreplace(clmver, '\n', ' '); + + /* store CLM version for adding it to revinfo debugfs file */ + memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver)); + + inff_dbg(INFO, "CLM version = %s\n", clmver); + } + + /* set apsta */ + err = inff_fil_iovar_int_set(ifp, "apsta", 1); + if (err) + inff_info("failed setting apsta, %d\n", err); + + /* set mpc */ + err = inff_fil_iovar_int_set(ifp, "mpc", 1); + if (err) { + iphy_err(drvr, "failed setting mpc\n"); + goto done; + } + + inff_c_set_joinpref_default(ifp); + + /* Setup event_msgs, enable E_IF */ + err = inff_fil_iovar_data_get(ifp, "event_msgs", eventmask, + INFF_EVENTING_MASK_LEN); + if (err) { + iphy_err(drvr, "Get event_msgs error (%d)\n", err); + goto done; + } + setbit(eventmask, INFF_E_IF); + err = inff_fil_iovar_data_set(ifp, "event_msgs", eventmask, + INFF_EVENTING_MASK_LEN); + if (err) { + iphy_err(drvr, "Set event_msgs error (%d)\n", err); + goto done; + } + + /* Enable event_msg_ext specific to 43022 chip */ + if (bus->chip == INF_CC_43022_CHIP_ID) { + /* Program event_msg_ext to support event larger than 128 */ + msglen = (roundup(INFF_E_LAST, NBBY) / NBBY) + + EVENTMSGS_EXT_STRUCT_SIZE; + /* Allocate buffer for eventmask_msg */ + eventmask_msg = kzalloc(msglen, GFP_KERNEL); + if (!eventmask_msg) { + err = -ENOMEM; + goto done; + } + + /* Read the current programmed event_msgs_ext */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->len = roundup(INFF_E_LAST, NBBY) / NBBY; + err = inff_fil_iovar_data_get(ifp, "event_msgs_ext", + eventmask_msg, + msglen); + + /* Enable ULP event */ + inff_dbg(EVENT, "enable event ULP\n"); + setbit(eventmask_msg->mask, INFF_E_ULP); + + /* Write updated Event mask */ + eventmask_msg->ver = EVENTMSGS_VER; + eventmask_msg->command = EVENTMSGS_SET_MASK; + eventmask_msg->len = (roundup(INFF_E_LAST, NBBY) / NBBY); + + err = inff_fil_iovar_data_set(ifp, "event_msgs_ext", + eventmask_msg, msglen); + if (err) { + inff_err("Set event_msgs_ext error (%d)\n", err); + kfree(eventmask_msg); + goto done; + } + kfree(eventmask_msg); + } + /* Setup default scan channel time */ + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_SCAN_CHANNEL_TIME, + INFF_DEFAULT_SCAN_CHANNEL_TIME); + if (err) { + iphy_err(drvr, "INFF_C_SET_SCAN_CHANNEL_TIME error (%d)\n", + err); + goto done; + } + + /* Setup default scan unassoc time */ + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_SCAN_UNASSOC_TIME, + INFF_DEFAULT_SCAN_UNASSOC_TIME); + if (err) { + iphy_err(drvr, "INFF_C_SET_SCAN_UNASSOC_TIME error (%d)\n", + err); + goto done; + } + + /* Enable tx beamforming, errors can be ignored (not supported) */ + (void)inff_fil_iovar_int_set(ifp, "txbf", 1); + err = inff_fil_iovar_int_set(ifp, "chanspec", 0x1001); + if (err < 0) { + iphy_err(drvr, "Initial Channel failed %d\n", err); + goto done; + } + /* add unicast packet filter */ + err = inff_pktfilter_add_remove(ifp->ndev, + INFF_UNICAST_FILTER_NUM, true); + if (err == -INFF_FW_UNSUPPORTED) { + /* FW not support can be ignored */ + err = 0; + goto done; + } else if (err) { + iphy_err(drvr, "Add unicast filter error (%d)\n", err); + } + +done: + return err; +} + +#ifndef CONFIG_INF_TRACING +void __inff_err(struct inff_bus *bus, const char *func, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + if (bus) + dev_err(bus->dev, "%s: %pV", func, &vaf); + else + pr_err("%s: %pV", func, &vaf); + + va_end(args); +} +#endif /* CONFIG_INF_TRACING */ + +#if defined(CONFIG_INF_TRACING) || defined(CONFIG_INF_DEBUG) +void __inff_dbg(u32 level, const char *func, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + va_list args; + + va_start(args, fmt); + vaf.va = &args; + if (inff_msg_level & level) + pr_debug("%s %pV", func, &vaf); + trace_inff_dbg(level, func, &vaf); + va_end(args); +} +#endif /* CONFIG_INF_TRACING || CONFIG_INF_DEBUG */ + +int inff_debugfs_param_read(struct seq_file *s, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(s->private); + + seq_printf(s, "%-20s: %s\n", "Name", "Value"); + seq_printf(s, "%-20s: 0x%x\n", "debug", inff_msg_level); + seq_printf(s, "%-20s: %s\n", "alternative_fw_path", inff_firmware_path); + seq_printf(s, "%-20s: %d\n", "p2pon", !!inff_p2p_enable); + seq_printf(s, "%-20s: %d\n", "fcmode", bus_if->drvr->settings->fcmode); + seq_printf(s, "%-20s: %d\n", "roamoff", !!inff_roamoff); + seq_printf(s, "%-20s: %d\n", "iapp", !!inff_iapp_enable); + seq_printf(s, "%-20s: %d\n", "eap_restrict", !!inff_eap_restrict); + seq_printf(s, "%-20s: %d\n", "max_pm", !!inff_max_pm); +#ifdef DEBUG + seq_printf(s, "%-20s: %d\n", "ignore_probe_fail", !!inff_ignore_probe_fail); +#endif + seq_printf(s, "%-20s: %d\n", "fw_ap_select", !!inff_fw_ap_select); + seq_printf(s, "%-20s: %d\n", "disable_6ghz", !!inff_disable_6ghz); + seq_printf(s, "%-20s: %d\n", "sdio_in_isr", !!inff_sdio_in_isr); + seq_printf(s, "%-20s: %d\n", "pkt_prio", !!inff_pkt_prio_enable); + seq_printf(s, "%-20s: %d\n", "sdio_rxf_thread", !!inff_sdio_rxf_in_kthread); + seq_printf(s, "%-20s: %d\n", "offload_prof", inff_offload_prof); + seq_printf(s, "%-20s: 0x%x\n", "offload_feat", inff_offload_feat); + seq_printf(s, "%-20s: %d\n", "txglomsz", inff_sdiod_txglomsz); + seq_printf(s, "%-20s: %d\n", "bt_over_sdio", !!inff_bt_over_sdio); + seq_printf(s, "%-20s: %d\n", "bus_idle_time", inff_sdio_bus_idle_time); + seq_printf(s, "%-20s: %d\n", "short_psq", !!inff_short_psq); + seq_printf(s, "%-20s: %d\n", "tx_cpu", bus_if->drvr->settings->tx_cpu); + seq_printf(s, "%-20s: %d\n", "napi_cpu", bus_if->drvr->settings->napi_cpu); + seq_printf(s, "%-20s: %d\n", "napi_enable", bus_if->drvr->settings->napi_enable); + + for (int i = 0; i < 5; i++) + seq_printf(s, "%-20s[%d]: %d\n", "commonring_depth", + i, bus_if->drvr->settings->commonring_depth[i]); + seq_printf(s, "%-20s: %d\n", "flowring_depth", bus_if->drvr->settings->flowring_depth); + seq_printf(s, "%-20s: %d\n", "logring_depth", bus_if->drvr->settings->logring_depth); + seq_printf(s, "%-20s: %d\n", "logger_level", bus_if->drvr->settings->logger_level); + + return 0; +} + +struct inff_mp_device *inff_get_module_param(struct device *dev, + enum inff_bus_type bus_type, + u32 chip, u32 chiprev) +{ + struct inff_mp_device *settings; + int i; + + inff_dbg(INFO, "Enter, bus=%d, chip=%d, rev=%d\n", bus_type, chip, + chiprev); + settings = kzalloc(sizeof(*settings), GFP_ATOMIC); + if (!settings) + return NULL; + + /* start by using the module parameters */ + inff_dbg(INFO, "debug: 0x%x\n", inff_msg_level); + strscpy(settings->firmware_path, inff_firmware_path, + INFF_FW_ALTPATH_LEN); + inff_dbg(INFO, "alternative_fw_path: %s\n", settings->firmware_path); + settings->p2p_enable = !!inff_p2p_enable; + inff_dbg(INFO, "p2pon: %d\n", settings->p2p_enable); + memcpy(settings->feature_disable, + inff_feature_disable, + sizeof(inff_feature_disable)); + inff_dbg(INFO, "feature_disable: "); + for (i = 0; i < INFF_MAX_FEATURE_BYTES; i++) + inff_dbg(INFO, "0x%x ", settings->feature_disable[i]); + inff_dbg(INFO, "\n"); + + settings->fcmode = inff_fcmode; + inff_dbg(INFO, "fcmode: %d\n", settings->fcmode); + settings->roamoff = inff_roamoff; + inff_dbg(INFO, "roamoff: %d\n", settings->roamoff); + settings->iapp = !!inff_iapp_enable; + inff_dbg(INFO, "iapp: %d\n", settings->iapp); + settings->eap_restrict = !!inff_eap_restrict; + inff_dbg(INFO, "eap_restrict: %d\n", settings->eap_restrict); + settings->default_pm = !!inff_max_pm ? PM_MAX : PM_FAST; + inff_dbg(INFO, "max_pm: %d\n", !!inff_max_pm); +#ifdef DEBUG + settings->ignore_probe_fail = !!inff_ignore_probe_fail; + inff_dbg(INFO, "ignore_probe_fail: %d\n", settings->ignore_probe_fail); +#endif + settings->fw_ap_select = !!inff_fw_ap_select; + inff_dbg(INFO, "fw_ap_select: %d\n", settings->fw_ap_select); + settings->disable_6ghz = !!inff_disable_6ghz; + inff_dbg(INFO, "disable_6ghz: %d\n", settings->disable_6ghz); + settings->sdio_in_isr = !!inff_sdio_in_isr; + inff_dbg(INFO, "sdio_in_isr: %d\n", settings->sdio_in_isr); + settings->pkt_prio = !!inff_pkt_prio_enable; + inff_dbg(INFO, "pkt_prio: %d\n", settings->pkt_prio); + settings->sdio_rxf_in_kthread_enabled = !!inff_sdio_rxf_in_kthread; + inff_dbg(INFO, "sdio_rxf_thread: %d\n", settings->sdio_rxf_in_kthread_enabled); + + inff_dbg(INFO, "offload_prof: %d\n", inff_offload_prof); + if (inff_offload_prof >= INFF_OFFLOAD_PROF_TYPE_MAX) { + inff_err("Invalid Offload power profile %u, using default profile 1", + inff_offload_prof); + inff_offload_prof = INFF_OFFLOAD_PROF_TYPE_LOW_PWR; + } + settings->offload_prof = inff_offload_prof; + settings->offload_feat = inff_offload_feat; + inff_dbg(INFO, "offload_feat: 0x%x\n", settings->offload_feat); + + settings->bt_over_sdio = !!inff_bt_over_sdio; + inff_dbg(INFO, "bt_over_sdio: %d\n", settings->bt_over_sdio); + settings->short_psq = !!inff_short_psq; + inff_dbg(INFO, "inff_short_psq: %d\n", settings->short_psq); + + settings->idleclk_disable = inff_sdio_idleclk_disable; + inff_dbg(INFO, "idleclk_disable: %d\n", settings->idleclk_disable); + settings->tx_cpu = inff_tx_cpu; + inff_dbg(INFO, "tx_cpu: %d\n", settings->tx_cpu); + settings->napi_cpu = inff_napi_cpu; + inff_dbg(INFO, "napi_cpu: %d\n", settings->napi_cpu); + settings->napi_enable = inff_napi_enable; + inff_dbg(INFO, "napi_enable: %d\n", settings->napi_enable); + + for (i = 0; i < 5; i++) { + settings->commonring_depth[i] = inff_commonring_depth[i]; + inff_dbg(INFO, "commonring_depth[%d] : %d\n", + i, settings->commonring_depth[i]); + } + + settings->flowring_depth = inff_flowring_depth; + inff_dbg(INFO, "flowring_depth : %d\n", settings->flowring_depth); + + if (bus_type == INFF_BUSTYPE_SDIO) { + settings->bus.sdio.txglomsz = inff_sdiod_txglomsz; + inff_dbg(INFO, "txglomsz: %d\n", settings->bus.sdio.txglomsz); + } + + settings->sdio_bus_idle_time = inff_sdio_bus_idle_time; + inff_dbg(INFO, "sdio_bus_idle_time: %d\n", settings->sdio_bus_idle_time); + + settings->logring_depth = inff_logring_depth; + inff_dbg(INFO, "logring_depth : %d\n", settings->logring_depth); + + settings->logger_level = inff_logger_level; + inff_dbg(INFO, "logger_level : %d\n", settings->logger_level); + + return settings; +} + +void inff_release_module_param(struct inff_mp_device *module_param) +{ + kfree(module_param); +} + +static int +inff_reboot_callback(struct notifier_block *this, unsigned long code, void *unused) +{ + inff_dbg(INFO, "code = %ld\n", code); + if (code == SYS_RESTART) + inff_core_exit(); + return NOTIFY_DONE; +} + +static int __init inff_module_init(void) +{ + int err; + + /* Continue the initialization by registering the different busses */ + err = inff_core_init(); + if (!err) + register_reboot_notifier(&inff_reboot_notifier); + + return err; +} + +static void __exit inff_module_exit(void) +{ + inff_core_exit(); + unregister_reboot_notifier(&inff_reboot_notifier); +} + +module_init(inff_module_init); +module_exit(inff_module_exit); diff --git a/drivers/net/wireless/infineon/inffmac/common.h b/drivers/net/wireless/infineon/inffmac/common.h new file mode 100644 index 000000000000..0e679f6394ac --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/common.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2014 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_COMMON_H +#define INFF_COMMON_H + +#include "fwil_types.h" +#include "bus.h" + +#define INFF_FW_ALTPATH_LEN 256 + +#define INFFMAC_DISABLE 0 +#define INFFMAC_ENABLE 1 +#define INFFMAC_AUTO 2 + +#define INFF_DEFAULT_SDIO_IDLE_CONFIG 2 +/* Keeping these macro definition here because these are defined in mmc drivers. + * So for 3rd party mmc, fmac build should not fail due to build error. + */ + +/* SDIO IDLECLOCK Support - reusing pm_caps */ +#ifndef SDIO_IDLECLOCK_DIS +#define SDIO_IDLECLOCK_DIS BIT(2) /* Start SDClock */ +#define SDIO_IDLECLOCK_EN BIT(3) /* Stop SDClock */ +#define SDIO_SDMODE_1BIT BIT(4) /* Set 1-bit Bus mode */ +#define SDIO_SDMODE_4BIT BIT(5) /* Set 4-bit Bus mode */ +#endif /* !SDIO_IDLECLOCK_DIS */ + +struct inff_sdio_platform_data { + int txglomsz; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; +}; + +/** + * struct inff_mp_device - Device module parameters. + * + * @p2p_enable: Legacy P2P0 enable (old wpa_supplicant). + * @feature_disable: Feature_disable bitmask. + * @fcmode: FWS flow control. + * @roamoff: Firmware roaming off? + * @eap_restrict: Not allow data tx/rx until 802.1X auth succeeds + * @default_pm: default power management (PM) mode. + * @ignore_probe_fail: Ignore probe failure. + * @fw_ap_select: Allow FW to select AP. + * @disable_6ghz: Disable 6GHz operation + * @sdio_in_isr: Handle SDIO DPC in ISR. + * @offload_prof: Enable offloads configuration power profile (Low,Mid,High) + * @offload_feat: offloads feature flags to be enabled for selected pwr profile + * @country_codes: If available, pointer to struct for translating country codes + * @bus: Bus specific platform data. Only SDIO at the mmoment. + * @pkt_prio: Support customer dscp to WMM up mapping. + * @idleclk_disable: SDIO bus clock output disable when bus is idle. + * @idle_time_zero: Set idle interval to zero. + * @commonring_depth: Set commonring depth. + * @flowring_depth: Set flowring depth. + * @logring_depth: Set logring depth. + * @logger_level: Set logger level. + */ +#define INFF_MAX_FEATURE_BYTES DIV_ROUND_UP(INFF_FEAT_LAST, 8) +struct inff_mp_device { + char firmware_path[INFF_FW_ALTPATH_LEN]; + bool p2p_enable; + unsigned char feature_disable[INFF_MAX_FEATURE_BYTES]; + int fcmode; + unsigned int roamoff; + bool iapp; + bool eap_restrict; + int default_pm; + bool ignore_probe_fail; + bool fw_ap_select; + bool disable_6ghz; + bool sdio_in_isr; + bool sdio_rxf_in_kthread_enabled; + unsigned int offload_prof; + unsigned int offload_feat; + bool bt_over_sdio; + bool short_psq; + const char *board_type; + unsigned char mac[ETH_ALEN]; + union { + struct inff_sdio_platform_data sdio; + } bus; + bool pkt_prio; + int idleclk_disable; + int sdio_bus_idle_time; + int tx_cpu; + int napi_cpu; + int napi_enable; + int commonring_depth[5]; + int flowring_depth; + int logring_depth; + int logger_level; +}; + +/** + * enum inff_roamoff_mode - using fw roaming and report event mode if not use it. + * + * @INFF_ROAMOFF_DISABLE: use firmware roaming engine + * @INFF_ROAMOFF_EN_BCNLOST_MSG: + * don't use firmware roaming engine, and report to cfg80211 layer by BCNLOST_MSG event + * @INFF_ROAMOFF_EN_DISCONNECT_EVT: + * don't use firmware roaming engine, and report to cfg80211 layer by DISCONNECT event + * @INFF_ROAMOFF_MAX: + * for sanity checking purpose. + */ + +enum inff_roamoff_mode { + INFF_ROAMOFF_DISABLE = 0, + INFF_ROAMOFF_EN_BCNLOST_MSG = 1, + INFF_ROAMOFF_EN_DISCONNECT_EVT = 2, + INFF_ROAMOFF_MAX +}; + +void inff_c_set_joinpref_default(struct inff_if *ifp); + +struct inff_mp_device *inff_get_module_param(struct device *dev, + enum inff_bus_type bus_type, + u32 chip, u32 chiprev); +int inff_debugfs_param_read(struct seq_file *s, void *data); +void inff_release_module_param(struct inff_mp_device *module_param); + +/* Sets dongle media info (drv_version, mac address). */ +int inff_c_preinit_dcmds(struct inff_if *ifp); +int inff_c_set_cur_etheraddr(struct inff_if *ifp, const u8 *addr); + +u8 inff_map_prio_to_prec(void *cfg, u8 prio); + +u8 inff_map_prio_to_aci(void *cfg, u8 prio); + +#endif /* INFF_COMMON_H */ -- 2.25.1 Integrates the driver with cfg80211 subsystem for standard wireless configuration, and handling notifications from the driver. Also has the Function prototypes and structures for cfg80211 integration. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/cfg80211.c | 6749 +++++++++++++++++ .../net/wireless/infineon/inffmac/cfg80211.h | 604 ++ 2 files changed, 7353 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/cfg80211.c create mode 100644 drivers/net/wireless/infineon/inffmac/cfg80211.h diff --git a/drivers/net/wireless/infineon/inffmac/cfg80211.c b/drivers/net/wireless/infineon/inffmac/cfg80211.c new file mode 100644 index 000000000000..11e2a6f08024 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/cfg80211.c @@ -0,0 +1,6749 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "hw_ids.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fwil_types.h" +#include "p2p.h" +#include "btcoex.h" +#include "pno.h" +#include "fwsignal.h" +#include "cfg80211.h" +#include "feature.h" +#include "fwil.h" +#include "proto.h" +#include "vendor.h" +#include "vendor_inf.h" +#include "bus.h" +#include "common.h" +#include "he.h" +#include "eht.h" +#include "twt.h" +#include "offload.h" +#include "pmsr.h" + +#define INFF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) + +#define MGMT_AUTH_FRAME_DWELL_TIME 4000 +#define MGMT_AUTH_FRAME_WAIT_TIME (MGMT_AUTH_FRAME_DWELL_TIME + 100) + +#define INFF_ASSOC_PARAMS_FIXED_SIZE \ + (sizeof(struct inff_assoc_params_le) - sizeof(u16)) + +#define INFF_MAX_CHANSPEC_LIST \ + (INFF_DCMD_MEDLEN / sizeof(__le32) - 1) + +#define RATE_TO_BASE100KBPS(rate) (((rate) * 10) / 2) +#define RATETAB_ENT(_bitrate, _rateid, _flags) \ + { \ + .bitrate = (_bitrate), \ + .hw_value = (_rateid), \ + .flags = (_flags), \ + } + +static struct ieee80211_rate __wl_rates[] = { + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_1M), INF_RATE_1M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_2M), INF_RATE_2M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_5M5), INF_RATE_5M5, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_11M), INF_RATE_11M, IEEE80211_RATE_SHORT_PREAMBLE), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_6M), INF_RATE_6M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_9M), INF_RATE_9M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_12M), INF_RATE_12M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_18M), INF_RATE_18M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_24M), INF_RATE_24M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_36M), INF_RATE_36M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_48M), INF_RATE_48M, 0), + RATETAB_ENT(RATE_TO_BASE100KBPS(INF_RATE_54M), INF_RATE_54M, 0), +}; + +#define wl_g_rates (__wl_rates + 0) +#define wl_g_rates_size ARRAY_SIZE(__wl_rates) +#define wl_a_rates (__wl_rates + 4) +#define wl_a_rates_size (wl_g_rates_size - 4) + +#define CHAN2G(_channel, _freq) { \ + .band = NL80211_BAND_2GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CH5G_FREQ(_channel) (5000 + (5 * (_channel))) +#define CHAN5G(_channel, _freq) { \ + .band = NL80211_BAND_5GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +#define CH6G_FREQ(_channel) (5950 + (5 * (_channel))) +#define CHAN6G(_channel, _freq) { \ + .band = NL80211_BAND_6GHZ, \ + .center_freq = (_freq), \ + .hw_value = (_channel), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + +static struct ieee80211_channel __wl_2ghz_channels[] = { + CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427), + CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447), + CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467), + CHAN2G(13, 2472), CHAN2G(14, 2484) +}; + +static struct ieee80211_channel __wl_5ghz_channels[] = { + CHAN5G(34, CH5G_FREQ(34)), CHAN5G(36, CH5G_FREQ(36)), CHAN5G(38, CH5G_FREQ(38)), + CHAN5G(40, CH5G_FREQ(40)), CHAN5G(42, CH5G_FREQ(42)), CHAN5G(44, CH5G_FREQ(44)), + CHAN5G(46, CH5G_FREQ(46)), CHAN5G(48, CH5G_FREQ(48)), CHAN5G(52, CH5G_FREQ(52)), + CHAN5G(56, CH5G_FREQ(56)), CHAN5G(60, CH5G_FREQ(60)), CHAN5G(64, CH5G_FREQ(64)), + CHAN5G(100, CH5G_FREQ(100)), CHAN5G(104, CH5G_FREQ(104)), CHAN5G(108, CH5G_FREQ(108)), + CHAN5G(112, CH5G_FREQ(112)), CHAN5G(116, CH5G_FREQ(116)), CHAN5G(120, CH5G_FREQ(120)), + CHAN5G(124, CH5G_FREQ(124)), CHAN5G(128, CH5G_FREQ(128)), CHAN5G(132, CH5G_FREQ(132)), + CHAN5G(136, CH5G_FREQ(136)), CHAN5G(140, CH5G_FREQ(140)), CHAN5G(144, CH5G_FREQ(144)), + CHAN5G(149, CH5G_FREQ(149)), CHAN5G(153, CH5G_FREQ(153)), CHAN5G(157, CH5G_FREQ(157)), + CHAN5G(161, CH5G_FREQ(161)), CHAN5G(165, CH5G_FREQ(165)) +}; + +static struct ieee80211_channel __wl_6ghz_channels[] = { + CHAN6G(1, CH6G_FREQ(1)), CHAN6G(5, CH6G_FREQ(5)), CHAN6G(9, CH6G_FREQ(9)), + CHAN6G(13, CH6G_FREQ(13)), CHAN6G(17, CH6G_FREQ(17)), + CHAN6G(21, CH6G_FREQ(21)), CHAN6G(25, CH6G_FREQ(25)), CHAN6G(29, CH6G_FREQ(29)), + CHAN6G(33, CH6G_FREQ(33)), CHAN6G(37, CH6G_FREQ(37)), + CHAN6G(41, CH6G_FREQ(41)), CHAN6G(45, CH6G_FREQ(45)), CHAN6G(49, CH6G_FREQ(49)), + CHAN6G(53, CH6G_FREQ(53)), CHAN6G(57, CH6G_FREQ(57)), + CHAN6G(61, CH6G_FREQ(61)), CHAN6G(65, CH6G_FREQ(65)), CHAN6G(69, CH6G_FREQ(69)), + CHAN6G(73, CH6G_FREQ(73)), CHAN6G(77, CH6G_FREQ(77)), + CHAN6G(81, CH6G_FREQ(81)), CHAN6G(85, CH6G_FREQ(85)), CHAN6G(89, CH6G_FREQ(89)), + CHAN6G(93, CH6G_FREQ(93)), CHAN6G(97, CH6G_FREQ(97)), + CHAN6G(101, CH6G_FREQ(101)), CHAN6G(105, CH6G_FREQ(105)), CHAN6G(109, CH6G_FREQ(109)), + CHAN6G(113, CH6G_FREQ(113)), CHAN6G(117, CH6G_FREQ(117)), + CHAN6G(121, CH6G_FREQ(121)), CHAN6G(125, CH6G_FREQ(125)), CHAN6G(129, CH6G_FREQ(129)), + CHAN6G(133, CH6G_FREQ(133)), CHAN6G(137, CH6G_FREQ(137)), + CHAN6G(141, CH6G_FREQ(141)), CHAN6G(145, CH6G_FREQ(145)), CHAN6G(149, CH6G_FREQ(149)), + CHAN6G(153, CH6G_FREQ(153)), CHAN6G(157, CH6G_FREQ(157)), + CHAN6G(161, CH6G_FREQ(161)), CHAN6G(165, CH6G_FREQ(165)), CHAN6G(169, CH6G_FREQ(169)), + CHAN6G(173, CH6G_FREQ(173)), CHAN6G(177, CH6G_FREQ(177)), + CHAN6G(181, CH6G_FREQ(181)), CHAN6G(185, CH6G_FREQ(185)), CHAN6G(189, CH6G_FREQ(189)), + CHAN6G(193, CH6G_FREQ(193)), CHAN6G(197, CH6G_FREQ(197)), + CHAN6G(201, CH6G_FREQ(201)), CHAN6G(205, CH6G_FREQ(205)), CHAN6G(209, CH6G_FREQ(209)), + CHAN6G(213, CH6G_FREQ(213)), CHAN6G(217, CH6G_FREQ(217)), + CHAN6G(221, CH6G_FREQ(221)), CHAN6G(225, CH6G_FREQ(225)), CHAN6G(229, CH6G_FREQ(229)), + CHAN6G(233, CH6G_FREQ(233)) +}; + +/* Band templates duplicated per wiphy. The channel info + * above is added to the band during setup. + */ +static const struct ieee80211_supported_band __wl_band_2ghz = { + .band = NL80211_BAND_2GHZ, + .bitrates = wl_g_rates, + .n_bitrates = wl_g_rates_size, +}; + +static const struct ieee80211_supported_band __wl_band_5ghz = { + .band = NL80211_BAND_5GHZ, + .bitrates = wl_a_rates, + .n_bitrates = wl_a_rates_size, +}; + +static struct ieee80211_supported_band __wl_band_6ghz = { + .band = NL80211_BAND_6GHZ, + .bitrates = wl_a_rates, + .n_bitrates = wl_a_rates_size, +}; + +/* This is to override regulatory domains defined in cfg80211 module (reg.c) + * By default world regulatory domain defined in reg.c puts the flags + * NL80211_RRF_NO_IR for 5GHz channels (for * 36..48 and 149..165). + * With respect to these flags, wpa_supplicant doesn't * start p2p + * operations on 5GHz channels. All the changes in world regulatory + * domain are to be done here. + */ +static const struct ieee80211_regdomain inff_regdom = { + .n_reg_rules = 5, + .alpha2 = "99", + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412 - 10, 2472 + 10, 40, 6, 20, 0), + /* If any */ + /* IEEE 802.11 channel 14 - Only JP enables + * this and for 802.11b only + */ + REG_RULE(2484 - 10, 2484 + 10, 20, 6, 20, 0), + /* IEEE 802.11a, channel 36..64 */ + REG_RULE(5150 - 10, 5350 + 10, 160, 6, 20, 0), + /* IEEE 802.11a, channel 100..165 */ + REG_RULE(5470 - 10, 5850 + 10, 160, 6, 20, 0), + /* IEEE 802.11ax, 6E */ + REG_RULE(5935 - 10, 7115 + 10, 160, 6, 20, 0), + } +}; + +/* Note: inff_cipher_suites is an array of int defining which cipher suites + * are supported. A pointer to this array and the number of entries is passed + * on to upper layers. AES_CMAC defines whether or not the driver supports MFP. + * So the cipher suite AES_CMAC has to be the last one in the array, and when + * device does not support MFP then the number of suites will be decreased by 4 + */ +static const u32 inff_cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_CCMP_256, + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + /* Keep as last entry: */ + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256 +}; + +static int inff_setup_wiphybands(struct inff_cfg80211_info *cfg); + +u8 nl80211_band_to_fwil(enum nl80211_band band) +{ + switch (band) { + case NL80211_BAND_2GHZ: + return WLC_BAND_2G; + case NL80211_BAND_5GHZ: + return WLC_BAND_5G; + case NL80211_BAND_6GHZ: + return WLC_BAND_6G; + default: + WARN_ON(1); + break; + } + return 0; +} + +u16 chandef_to_chanspec(struct inff_d11inf *d11inf, struct cfg80211_chan_def *ch) +{ + struct inff_chan ch_inf; + s32 primary_offset; + + inff_dbg(TRACE, "chandef: control %d center %d width %d\n", + ch->chan->center_freq, ch->center_freq1, ch->width); + ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1); + primary_offset = ch->chan->center_freq - ch->center_freq1; + switch (ch->width) { + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: + ch_inf.bw = INFF_CHAN_BW_20; + WARN_ON(primary_offset != 0); + break; + case NL80211_CHAN_WIDTH_40: + ch_inf.bw = INFF_CHAN_BW_40; + if (primary_offset > 0) + ch_inf.sb = INFF_CHAN_SB_U; + else + ch_inf.sb = INFF_CHAN_SB_L; + break; + case NL80211_CHAN_WIDTH_80: + ch_inf.bw = INFF_CHAN_BW_80; + if (primary_offset == -30) + ch_inf.sb = INFF_CHAN_SB_LL; + else if (primary_offset == -10) + ch_inf.sb = INFF_CHAN_SB_LU; + else if (primary_offset == 10) + ch_inf.sb = INFF_CHAN_SB_UL; + else + ch_inf.sb = INFF_CHAN_SB_UU; + break; + case NL80211_CHAN_WIDTH_160: + ch_inf.bw = INFF_CHAN_BW_160; + if (primary_offset == -70) + ch_inf.sb = INFF_CHAN_SB_LLL; + else if (primary_offset == -50) + ch_inf.sb = INFF_CHAN_SB_LLU; + else if (primary_offset == -30) + ch_inf.sb = INFF_CHAN_SB_LUL; + else if (primary_offset == -10) + ch_inf.sb = INFF_CHAN_SB_LUU; + else if (primary_offset == 10) + ch_inf.sb = INFF_CHAN_SB_ULL; + else if (primary_offset == 30) + ch_inf.sb = INFF_CHAN_SB_ULU; + else if (primary_offset == 50) + ch_inf.sb = INFF_CHAN_SB_UUL; + else + ch_inf.sb = INFF_CHAN_SB_UUU; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + default: + WARN_ON_ONCE(1); + } + switch (ch->chan->band) { + case NL80211_BAND_2GHZ: + ch_inf.band = INFF_CHAN_BAND_2G; + break; + case NL80211_BAND_5GHZ: + ch_inf.band = INFF_CHAN_BAND_5G; + break; + case NL80211_BAND_6GHZ: + ch_inf.band = INFF_CHAN_BAND_6G; + break; + case NL80211_BAND_60GHZ: + default: + WARN_ON_ONCE(1); + } + d11inf->encchspec(&ch_inf); + + inff_dbg(TRACE, "chanspec: 0x%x\n", ch_inf.chspec); + return ch_inf.chspec; +} + +u16 channel_to_chanspec(struct inff_d11inf *d11inf, + struct ieee80211_channel *ch) +{ + struct inff_chan ch_inf; + + switch (ch->band) { + case NL80211_BAND_2GHZ: + ch_inf.band = INFF_CHAN_BAND_2G; + break; + case NL80211_BAND_5GHZ: + ch_inf.band = INFF_CHAN_BAND_5G; + break; + case NL80211_BAND_6GHZ: + ch_inf.band = INFF_CHAN_BAND_6G; + break; + case NL80211_BAND_60GHZ: + default: + WARN_ON_ONCE(1); + } + ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq); + ch_inf.bw = INFF_CHAN_BW_20; + d11inf->encchspec(&ch_inf); + + return ch_inf.chspec; +} + +static int inff_vif_change_validate(struct inff_cfg80211_info *cfg, + struct inff_cfg80211_vif *vif, + enum nl80211_iftype new_type) +{ + struct inff_cfg80211_vif *pos; + bool check_combos = false; + int ret = 0; + struct iface_combination_params params = { + .num_different_channels = 1, + }; + + list_for_each_entry(pos, &cfg->vif_list, list) + if (pos == vif) { + params.iftype_num[new_type]++; + } else { + /* concurrent interfaces so need check combinations */ + check_combos = true; + params.iftype_num[pos->wdev.iftype]++; + } + + if (check_combos) + ret = cfg80211_check_combinations(cfg->wiphy, ¶ms); + + return ret; +} + +static int inff_vif_add_validate(struct inff_cfg80211_info *cfg, + enum nl80211_iftype new_type) +{ + struct inff_cfg80211_vif *pos; + struct iface_combination_params params = { + .num_different_channels = 1, + }; + + list_for_each_entry(pos, &cfg->vif_list, list) + params.iftype_num[pos->wdev.iftype]++; + + params.iftype_num[new_type]++; + return cfg80211_check_combinations(cfg->wiphy, ¶ms); +} + +void +inff_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + if (wdev->iftype == NL80211_IFTYPE_ADHOC || + wdev->iftype == NL80211_IFTYPE_AP || + wdev->iftype == NL80211_IFTYPE_P2P_GO) + inff_proto_configure_addr_mode(ifp->drvr, ifp->ifidx, + ADDR_DIRECT); + else + inff_proto_configure_addr_mode(ifp->drvr, ifp->ifidx, + ADDR_INDIRECT); +} + +static struct wireless_dev *inff_cfg80211_add_iface(struct wiphy *wiphy, + const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct wireless_dev *wdev = NULL; + int err; + + inff_dbg(TRACE, "enter: %s type %d\n", name, type); + err = inff_vif_add_validate(wiphy_to_cfg(wiphy), type); + if (err) { + iphy_err(drvr, "iface validation failed: err=%d\n", err); + return ERR_PTR(err); + } + switch (type) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + return ERR_PTR(-EOPNOTSUPP); + case NL80211_IFTYPE_MONITOR: + return inff_mon_add_vif(wiphy, name); + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + wdev = inff_apsta_add_vif(wiphy, name, params, type); + break; + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_P2P_DEVICE: + wdev = inff_p2p_add_vif(wiphy, name, name_assign_type, type, params); + break; + case NL80211_IFTYPE_WLAN_SENSE: + wdev = inff_wlan_sense_add_vif(wiphy, name, name_assign_type, type, params); + break; + case NL80211_IFTYPE_UNSPECIFIED: + default: + return ERR_PTR(-EOPNOTSUPP); + } + + if (IS_ERR_OR_NULL(wdev)) + iphy_err(drvr, "add iface %s type %d failed: err=%d\n", name, + type, (int)PTR_ERR(wdev)); + else + inff_cfg80211_update_proto_addr_mode(wdev); + + return wdev; +} + +void inff_set_mpc(struct inff_if *ifp, int mpc) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + + ifp->drvr->req_mpc = mpc; + if (check_vif_up(ifp->vif)) { + err = inff_fil_iovar_int_set(ifp, + "mpc", + ifp->drvr->req_mpc); + if (err) { + iphy_err(drvr, "fail to set mpc\n"); + return; + } + inff_dbg(INFO, "MPC : %d\n", mpc); + } +} + +static +int inff_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = wdev->netdev; + + if (ndev && ndev == cfg_to_ndev(cfg)) + return -EOPNOTSUPP; + + /* vif event pending in firmware */ + if (inff_cfg80211_vif_event_armed(cfg)) + return -EBUSY; + + if (ndev) { + if (test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status) && + cfg->escan_info.ifp == netdev_priv(ndev)) + inff_notify_escan_complete(cfg, netdev_priv(ndev), + true, true); + + inff_fil_iovar_int_set(netdev_priv(ndev), "mpc", 1); + } + + switch (wdev->iftype) { + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_AP_VLAN: + case NL80211_IFTYPE_WDS: + case NL80211_IFTYPE_MESH_POINT: + return -EOPNOTSUPP; + case NL80211_IFTYPE_MONITOR: + return inff_mon_del_vif(wiphy, wdev); + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_AP: + return inff_cfg80211_del_apsta_iface(wiphy, wdev); + case NL80211_IFTYPE_P2P_CLIENT: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_P2P_DEVICE: + return inff_p2p_del_vif(wiphy, wdev); + case NL80211_IFTYPE_WLAN_SENSE: + return inff_wlan_sense_del_vif(wiphy, wdev); + case NL80211_IFTYPE_UNSPECIFIED: + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static s32 +inff_cfg80211_change_iface(struct wiphy *wiphy, struct net_device *ndev, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_vif *vif = ifp->vif; + struct inff_pub *drvr = cfg->pub; + s32 infra = 0; + s32 ap = 0; + s32 err = 0; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d, type=%d\n", ifp->bsscfgidx, + type); + + /* WAR: There are a number of p2p interface related problems which + * need to be handled initially (before doing the validate). + * wpa_supplicant tends to do iface changes on p2p device/client/go + * which are not always possible/allowed. However we need to return + * OK otherwise the wpa_supplicant won't start. The situation differs + * on configuration and setup (p2pon=1 module param). The first check + * is to see if the request is a change to station for p2p iface. + */ + if (type == NL80211_IFTYPE_STATION && + (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT || + vif->wdev.iftype == NL80211_IFTYPE_P2P_GO || + vif->wdev.iftype == NL80211_IFTYPE_P2P_DEVICE)) { + inff_dbg(TRACE, "Ignoring cmd for p2p if\n"); + /* Now depending on whether module param p2pon=1 was used the + * response needs to be either 0 or EOPNOTSUPP. The reason is + * that if p2pon=1 is used, but a newer supplicant is used then + * we should return an error, as this combination won't work. + * In other situations 0 is returned and supplicant will start + * normally. It will give a trace in cfg80211, but it is the + * only way to get it working. Unfortunately this will result + * in situation where we won't support new supplicant in + * combination with module param p2pon=1, but that is the way + * it is. If the user tries this then unloading of driver might + * fail/lock. + */ + if (cfg->p2p.p2pdev_dynamically) + return -EOPNOTSUPP; + else + return 0; + } + err = inff_vif_change_validate(wiphy_to_cfg(wiphy), vif, type); + if (err) { + iphy_err(drvr, "iface validation failed: err=%d\n", err); + return err; + } + switch (type) { + case NL80211_IFTYPE_MONITOR: + case NL80211_IFTYPE_WDS: + iphy_err(drvr, "type (%d) : currently we do not support this type\n", + type); + return -EOPNOTSUPP; + case NL80211_IFTYPE_ADHOC: + infra = 0; + break; + case NL80211_IFTYPE_STATION: + infra = 1; + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + ap = 1; + break; + default: + err = -EINVAL; + goto done; + } + + if (ap) { + if (type == NL80211_IFTYPE_P2P_GO) { + inff_dbg(INFO, "IF Type = P2P GO\n"); + err = inff_p2p_ifchange(cfg, INFF_FIL_P2P_IF_GO); + } + if (!err) + inff_dbg(INFO, "IF Type = AP\n"); + } else { + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_INFRA, infra); + if (err) { + iphy_err(drvr, "WLC_SET_INFRA error (%d)\n", err); + err = -EAGAIN; + goto done; + } + inff_dbg(INFO, "IF Type = %s\n", inff_is_ibssmode(vif) ? + "Adhoc" : "Infra"); + } + ndev->ieee80211_ptr->iftype = type; + + inff_cfg80211_update_proto_addr_mode(&vif->wdev); + inff_setup_wiphybands(cfg); + +done: + inff_dbg(TRACE, "Exit\n"); + + return err; +} + +static s32 inff_set_rts(struct net_device *ndev, u32 rts_threshold) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + + err = inff_fil_iovar_int_set(ifp, "rtsthresh", rts_threshold); + if (err) + iphy_err(drvr, "Error (%d)\n", err); + + return err; +} + +static s32 inff_set_frag(struct net_device *ndev, u32 frag_threshold) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + + err = inff_fil_iovar_int_set(ifp, "fragthresh", + frag_threshold); + if (err) + iphy_err(drvr, "Error (%d)\n", err); + + return err; +} + +static s32 inff_set_retry(struct net_device *ndev, u32 retry, bool l) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + u32 cmd = (l ? INFF_C_SET_LRL : INFF_C_SET_SRL); + + err = inff_fil_cmd_int_set(ifp, cmd, retry); + if (err) { + iphy_err(drvr, "cmd (%d) , error (%d)\n", cmd, err); + return err; + } + return err; +} + +static s32 inff_cfg80211_set_wiphy_params(struct wiphy *wiphy, int radio_idx, + u32 changed) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + s32 err = 0; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (changed & WIPHY_PARAM_RTS_THRESHOLD && + cfg->conf->rts_threshold != wiphy->rts_threshold) { + cfg->conf->rts_threshold = wiphy->rts_threshold; + err = inff_set_rts(ndev, cfg->conf->rts_threshold); + if (!err) + goto done; + } + if (changed & WIPHY_PARAM_FRAG_THRESHOLD && + cfg->conf->frag_threshold != wiphy->frag_threshold) { + cfg->conf->frag_threshold = wiphy->frag_threshold; + err = inff_set_frag(ndev, cfg->conf->frag_threshold); + if (!err) + goto done; + } + if (changed & WIPHY_PARAM_RETRY_LONG && + cfg->conf->retry_long != wiphy->retry_long) { + cfg->conf->retry_long = wiphy->retry_long; + err = inff_set_retry(ndev, cfg->conf->retry_long, true); + if (!err) + goto done; + } + if (changed & WIPHY_PARAM_RETRY_SHORT && + cfg->conf->retry_short != wiphy->retry_short) { + cfg->conf->retry_short = wiphy->retry_short; + err = inff_set_retry(ndev, cfg->conf->retry_short, false); + if (!err) + goto done; + } + +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static int inff_set_sae_password(struct inff_if *ifp, const u8 *pwd_data, + u16 pwd_len) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_wsec_sae_pwd_le sae_pwd; + int err; + + if (pwd_len > INFF_WSEC_MAX_SAE_PASSWORD_LEN) { + iphy_err(drvr, "sae_password must be less than %d\n", + INFF_WSEC_MAX_SAE_PASSWORD_LEN); + return -EINVAL; + } + + sae_pwd.key_len = cpu_to_le16(pwd_len); + memcpy(sae_pwd.key, pwd_data, pwd_len); + + err = inff_fil_iovar_data_set(ifp, "sae_password", &sae_pwd, + sizeof(sae_pwd)); + if (err < 0) + iphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n", + pwd_len); + + return err; +} + +void inff_link_down(struct inff_cfg80211_vif *vif, u16 reason, + bool locally_generated) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); + struct inff_pub *drvr = cfg->pub; + bool bus_up = drvr->bus_if->state == INFF_BUS_UP; + s32 err = 0; + + inff_dbg(TRACE, "Enter\n"); + + if (test_and_clear_bit(INFF_VIF_STATUS_CONNECTED, &vif->sme_state)) { + if (bus_up) { + inff_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n"); + err = inff_fil_cmd_data_set(vif->ifp, + INFF_C_DISASSOC, NULL, 0); + if (err) { + iphy_err(drvr, "WLC_DISASSOC failed (%d)\n", + err); + } else { + if (inff_feat_is_enabled(vif->ifp, INFF_FEAT_TWT)) { + /* Cleanup TWT Session list */ + inff_twt_cleanup_all_sess(vif->ifp); + } + } + } + + if (vif->wdev.iftype == NL80211_IFTYPE_STATION || + vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) + cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, + locally_generated, GFP_KERNEL); + } + clear_bit(INFF_VIF_STATUS_CONNECTING, &vif->sme_state); + clear_bit(INFF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); + clear_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state); + clear_bit(INFF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); + inff_btcoex_set_mode(vif, INFF_BTCOEX_ENABLED, 0); + if (vif->profile.use_fwsup != INFF_PROFILE_FWSUP_NONE) { + if (bus_up) + inff_set_pmk(vif->ifp, NULL, 0); + vif->profile.use_fwsup = INFF_PROFILE_FWSUP_NONE; + } + + inff_dbg(TRACE, "Exit\n"); +} + +static s32 +inff_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_ibss_params *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_pub *drvr = cfg->pub; + struct inff_join_params join_params; + size_t join_params_size = 0; + s32 err = 0; + s32 wsec = 0; + s32 bcnprd; + u16 chanspec; + u32 ssid_len; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (params->ssid) { + inff_dbg(CONN, "SSID: %s\n", params->ssid); + } else { + inff_dbg(CONN, "SSID: NULL, Not supported\n"); + return -EOPNOTSUPP; + } + + set_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + + if (params->bssid) + inff_dbg(CONN, "BSSID: %pM\n", params->bssid); + else + inff_dbg(CONN, "No BSSID specified\n"); + + if (params->chandef.chan) + inff_dbg(CONN, "channel: %d\n", + params->chandef.chan->center_freq); + else + inff_dbg(CONN, "no channel specified\n"); + + if (params->channel_fixed) + inff_dbg(CONN, "fixed channel required\n"); + else + inff_dbg(CONN, "no fixed channel required\n"); + + if (params->ie && params->ie_len) + inff_dbg(CONN, "ie len: %d\n", params->ie_len); + else + inff_dbg(CONN, "no ie specified\n"); + + if (params->beacon_interval) + inff_dbg(CONN, "beacon interval: %d\n", + params->beacon_interval); + else + inff_dbg(CONN, "no beacon interval specified\n"); + + if (params->basic_rates) + inff_dbg(CONN, "basic rates: %08X\n", params->basic_rates); + else + inff_dbg(CONN, "no basic rates specified\n"); + + if (params->privacy) + inff_dbg(CONN, "privacy required\n"); + else + inff_dbg(CONN, "no privacy required\n"); + + /* Configure Privacy for starter */ + if (params->privacy) + wsec |= WEP_ENABLED; + + err = inff_fil_iovar_int_set(ifp, "wsec", wsec); + if (err) { + iphy_err(drvr, "wsec failed (%d)\n", err); + goto done; + } + + /* Configure Beacon Interval for starter */ + if (params->beacon_interval) + bcnprd = params->beacon_interval; + else + bcnprd = 100; + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_BCNPRD, bcnprd); + if (err) { + iphy_err(drvr, "WLC_SET_BCNPRD failed (%d)\n", err); + goto done; + } + + /* Configure required join parameter */ + memset(&join_params, 0, sizeof(struct inff_join_params)); + + /* SSID */ + ssid_len = min_t(u32, params->ssid_len, IEEE80211_MAX_SSID_LEN); + memcpy(join_params.ssid_le.SSID, params->ssid, ssid_len); + join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len); + join_params_size = sizeof(join_params.ssid_le); + + /* BSSID */ + if (params->bssid) { + memcpy(join_params.params_le.bssid, params->bssid, ETH_ALEN); + join_params_size += INFF_ASSOC_PARAMS_FIXED_SIZE; + memcpy(profile->bssid, params->bssid, ETH_ALEN); + } else { + eth_broadcast_addr(join_params.params_le.bssid); + eth_zero_addr(profile->bssid); + } + + /* Channel */ + if (params->chandef.chan) { + u32 target_channel; + + cfg->channel = + ieee80211_frequency_to_channel(params->chandef.chan->center_freq); + if (params->channel_fixed) { + /* adding chanspec */ + chanspec = chandef_to_chanspec(&cfg->d11inf, + ¶ms->chandef); + join_params.params_le.chanspec_list = + cpu_to_le16(chanspec); + join_params.params_le.chanspec_num = cpu_to_le32(1); + join_params_size += sizeof(join_params.params_le); + } + + /* set channel for starter */ + target_channel = cfg->channel; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_CHANNEL, + target_channel); + if (err) { + iphy_err(drvr, "WLC_SET_CHANNEL failed (%d)\n", err); + goto done; + } + } else { + cfg->channel = 0; + } + + cfg->ibss_starter = false; + + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SSID, + &join_params, join_params_size); + if (err) { + iphy_err(drvr, "WLC_SET_SSID failed (%d)\n", err); + goto done; + } + +done: + if (err) + clear_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) { + /* When driver is being unloaded, it can end up here. If an + * error is returned then later on a debug trace in the wireless + * core module will be printed. To avoid this 0 is returned. + */ + return 0; + } + + inff_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING, true); + inff_net_setcarrier(ifp, false); + + inff_dbg(TRACE, "Exit\n"); + + return 0; +} + +static s32 inff_set_wpa_version(struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = ndev_to_prof(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_security *sec; + s32 val = 0; + s32 err = 0; + + if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) { + val = WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED; + } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) { + if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_SAE) + val = WPA3_AUTH_SAE_PSK; + else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_OWE) + val = WPA3_AUTH_OWE; + else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X_SHA256) + val = WPA3_AUTH_1X_SHA256; + else + val = WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED; + } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_3) { + if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_FT_OVER_SAE) + val = WPA3_AUTH_SAE_FBT; + else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_FT_8021X_SHA384) + val = WPA3_AUTH_SAE_FT_1X; + else + val = WPA3_AUTH_SAE_PSK; + } else { + val = WPA_AUTH_DISABLED; + } + inff_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); + err = inff_fil_bsscfg_int_set(ifp, "wpa_auth", val); + if (err) { + iphy_err(drvr, "set wpa_auth failed (%d)\n", err); + return err; + } + sec = &profile->sec; + sec->wpa_versions = sme->crypto.wpa_versions; + return err; +} + +static s32 inff_set_auth_type(struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = ndev_to_prof(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_security *sec; + s32 val = 0; + s32 err = 0; + + switch (sme->auth_type) { + case NL80211_AUTHTYPE_OPEN_SYSTEM: + val = 0; + inff_dbg(CONN, "open system\n"); + break; + case NL80211_AUTHTYPE_SHARED_KEY: + val = 1; + inff_dbg(CONN, "shared key\n"); + break; + case NL80211_AUTHTYPE_SAE: + val = 3; + inff_dbg(CONN, "SAE authentication\n"); + break; + default: + val = 2; + inff_dbg(CONN, "automatic, auth type (%d)\n", sme->auth_type); + break; + } + + err = inff_fil_bsscfg_int_set(ifp, "auth", val); + if (err) { + iphy_err(drvr, "set auth failed (%d)\n", err); + return err; + } + sec = &profile->sec; + sec->auth_type = sme->auth_type; + return err; +} + +static s32 +inff_set_wsec_mode(struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = ndev_to_prof(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_security *sec; + s32 pval = 0; + s32 gval = 0; + s32 wsec; + s32 err = 0; + u32 algos = 0, mask = 0; + + if (sme->crypto.n_ciphers_pairwise) { + switch (sme->crypto.ciphers_pairwise[0]) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + pval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + pval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + pval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + pval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + if (!inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_err("the low layer not support GCMP\n"); + return -EOPNOTSUPP; + } + pval = AES_ENABLED; + algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256); + mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM); + break; + default: + iphy_err(drvr, "invalid cipher pairwise (%d)\n", + sme->crypto.ciphers_pairwise[0]); + return -EINVAL; + } + } + if (sme->crypto.cipher_group) { + switch (sme->crypto.cipher_group) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + gval = WEP_ENABLED; + break; + case WLAN_CIPHER_SUITE_TKIP: + gval = TKIP_ENABLED; + break; + case WLAN_CIPHER_SUITE_CCMP: + gval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + gval = AES_ENABLED; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + if (!inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_err("the low layer not support GCMP\n"); + return -EOPNOTSUPP; + } + gval = AES_ENABLED; + algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256); + mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM); + break; + default: + iphy_err(drvr, "invalid cipher group (%d)\n", + sme->crypto.cipher_group); + return -EINVAL; + } + } + + inff_dbg(CONN, "pval (%d) gval (%d)\n", pval, gval); + inff_dbg(CONN, "algos (0x%x) mask (0x%x)\n", algos, mask); + /* In case of privacy, but no security and WPS then simulate */ + /* setting AES. WPS-2.0 allows no security */ + if (inff_find_wpsie(sme->ie, sme->ie_len) && !pval && !gval && + sme->privacy) + pval = AES_ENABLED; + + wsec = pval | gval; + err = inff_fil_bsscfg_int_set(ifp, "wsec", wsec); + if (err) { + iphy_err(drvr, "error (%d)\n", err); + return err; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_dbg(CONN, + "set_wsec_info algos (0x%x) mask (0x%x)\n", + algos, mask); + err = wl_set_wsec_info_algos(ifp, algos, mask); + if (err) { + inff_err("set wsec_info error (%d)\n", err); + return err; + } + } + + sec = &profile->sec; + sec->cipher_pairwise = sme->crypto.ciphers_pairwise[0]; + sec->cipher_group = sme->crypto.cipher_group; + + return err; +} + +static s32 +inff_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_pub *drvr = ifp->drvr; + s32 val; + s32 err; + s32 okc_enable; + const struct inff_tlv *rsn_ie; + const u8 *ie; + u32 ie_len; + u32 offset; + u16 rsn_cap; + u32 mfp; + u16 count; + u16 pmkid_count; + const u8 *group_mgmt_cs = NULL; + + profile->use_fwsup = INFF_PROFILE_FWSUP_NONE; + profile->is_ft = false; + profile->is_okc = false; + + if (!sme->crypto.n_akm_suites) + return 0; + + err = inff_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); + if (err) { + iphy_err(drvr, "could not get wpa_auth (%d)\n", err); + return err; + } + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA_AUTH_UNSPECIFIED; + if (sme->want_1x) + profile->use_fwsup = INFF_PROFILE_FWSUP_1X; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA_AUTH_PSK; + break; + default: + iphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); + return -EINVAL; + } + } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED | WPA3_AUTH_OWE)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA2_AUTH_UNSPECIFIED; + if (sme->want_1x) + profile->use_fwsup = INFF_PROFILE_FWSUP_1X; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_8021X_SHA256: + val = WPA2_AUTH_1X_SHA256; + if (sme->want_1x) + profile->use_fwsup = INFF_PROFILE_FWSUP_1X; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_PSK_SHA256: + val = WPA2_AUTH_PSK_SHA256; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA2_AUTH_PSK; + break; + case WLAN_AKM_SUITE_FT_8021X: + val = WPA2_AUTH_UNSPECIFIED | WPA2_AUTH_FT; + profile->is_ft = true; + if (sme->want_1x) + profile->use_fwsup = INFF_PROFILE_FWSUP_1X; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_FT_PSK: + val = WPA2_AUTH_PSK | WPA2_AUTH_FT; + profile->is_ft = true; + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) + profile->use_fwsup = INFF_PROFILE_FWSUP_PSK; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_WFA_DPP: + val = WFA_AUTH_DPP; + profile->use_fwsup = INFF_PROFILE_FWSUP_NONE; + break; + case WLAN_AKM_SUITE_OWE: + val = WPA3_AUTH_OWE; + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + break; + case WLAN_AKM_SUITE_8021X_SUITE_B_192: + val = WPA3_AUTH_1X_SUITE_B_SHA384; + if (sme->want_1x) + profile->use_fwsup = INFF_PROFILE_FWSUP_1X; + else + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + + /*Disable intrnal sup for SuiteB*/ + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) + profile->use_fwsup = INFF_PROFILE_FWSUP_NONE; + break; + default: + iphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); + return -EINVAL; + } + } else if (val & (WPA3_AUTH_SAE_PSK | WPA3_AUTH_SAE_FBT)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_SAE: + val = WPA3_AUTH_SAE_PSK; + if (sme->crypto.sae_pwd) { + inff_dbg(INFO, "using SAE offload\n"); + profile->use_fwsup = INFF_PROFILE_FWSUP_SAE; + } + break; + case WLAN_AKM_SUITE_FT_OVER_SAE: + val = WPA3_AUTH_SAE_FBT; + profile->is_ft = true; + if (sme->crypto.sae_pwd) { + inff_dbg(INFO, "using SAE offload\n"); + profile->use_fwsup = INFF_PROFILE_FWSUP_SAE; + } else { + profile->use_fwsup = INFF_PROFILE_FWSUP_ROAM; + } + break; + default: + iphy_err(drvr, "invalid akm suite (%d)\n", + sme->crypto.akm_suites[0]); + return -EINVAL; + } + } + + if (profile->use_fwsup == INFF_PROFILE_FWSUP_1X || + profile->use_fwsup == INFF_PROFILE_FWSUP_ROAM) { + inff_dbg(INFO, "using 1X offload\n"); + err = inff_fil_bsscfg_int_get(netdev_priv(ndev), "okc_enable", + &okc_enable); + if (err) { + iphy_err(drvr, "get okc_enable failed (%d)\n", err); + } else { + inff_dbg(INFO, "get okc_enable (%d)\n", okc_enable); + profile->is_okc = okc_enable; + } + } else if (profile->use_fwsup != INFF_PROFILE_FWSUP_SAE && + (val == WPA3_AUTH_SAE_PSK)) { + inff_dbg(INFO, "not using SAE offload\n"); + err = inff_fil_bsscfg_int_get(netdev_priv(ndev), "okc_enable", + &okc_enable); + if (err) { + iphy_err(drvr, "get okc_enable failed (%d)\n", err); + } else { + inff_dbg(INFO, "get okc_enable (%d)\n", okc_enable); + profile->is_okc = okc_enable; + } + } + + if (!inff_feat_is_enabled(ifp, INFF_FEAT_MFP)) + goto skip_mfp_config; + /* The MFP mode (1 or 2) needs to be determined, parse IEs. The + * IE will not be verified, just a quick search for MFP config + */ + rsn_ie = inff_parse_tlvs((const u8 *)sme->ie, sme->ie_len, + WLAN_EID_RSN); + if (!rsn_ie) + goto skip_mfp_config; + ie = (const u8 *)rsn_ie; + ie_len = rsn_ie->len + TLV_HDR_LEN; + /* Skip unicast suite */ + offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN; + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto skip_mfp_config; + /* Skip multicast suite */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto skip_mfp_config; + /* Skip auth key management suite(s) */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + WPA_IE_SUITE_COUNT_LEN > ie_len) + goto skip_mfp_config; + /* Ready to read capabilities */ + mfp = INFF_MFP_NONE; + rsn_cap = ie[offset] + (ie[offset + 1] << 8); + if (rsn_cap & RSN_CAP_MFPR_MASK) + mfp = INFF_MFP_REQUIRED; + else if (rsn_cap & RSN_CAP_MFPC_MASK) + mfp = INFF_MFP_CAPABLE; + + /* In case of dpp, very low tput is observed if MFPC is set in + * firmmare. Firmware needs to ensure that MFPC is not set when + * MFPR was requested from fmac. However since this change being + * specific to DPP, fmac needs to set wpa_auth prior to mfp, so + * that firmware can use this info to prevent MFPC being set in + * case of dpp. + */ + if (val == WFA_AUTH_DPP) { + inff_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); + err = inff_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); + if (err) { + iphy_err(drvr, "could not set wpa_auth (%d)\n", err); + return err; + } + } + inff_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp); + + offset += RSN_CAP_LEN; + if (mfp && (ie_len - offset >= RSN_PMKID_COUNT_LEN)) { + pmkid_count = ie[offset] + (ie[offset + 1] << 8); + offset += RSN_PMKID_COUNT_LEN + (pmkid_count * WLAN_PMKID_LEN); + if (ie_len - offset >= WPA_IE_MIN_OUI_LEN) { + group_mgmt_cs = &ie[offset]; + if (memcmp(group_mgmt_cs, RSN_OUI, TLV_OUI_LEN) == 0) { + inff_fil_bsscfg_data_set(ifp, "bip", + (void *)group_mgmt_cs, + WPA_IE_MIN_OUI_LEN); + } + } + } + +skip_mfp_config: + if (val != WFA_AUTH_DPP) { + inff_dbg(CONN, "setting wpa_auth to 0x%0x\n", val); + err = inff_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); + if (err) { + iphy_err(drvr, "could not set wpa_auth (%d)\n", err); + return err; + } + } + + return err; +} + +static s32 +inff_set_sharedkey(struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_profile *profile = ndev_to_prof(ndev); + struct inff_cfg80211_security *sec; + struct inff_wsec_key key; + s32 val; + s32 err = 0; + + inff_dbg(CONN, "key len (%d)\n", sme->key_len); + + if (sme->key_len == 0) + return 0; + + sec = &profile->sec; + inff_dbg(CONN, "wpa_versions 0x%x cipher_pairwise 0x%x\n", + sec->wpa_versions, sec->cipher_pairwise); + + if (sec->wpa_versions & (NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2 | + NL80211_WPA_VERSION_3)) + return 0; + + if (!(sec->cipher_pairwise & + (WLAN_CIPHER_SUITE_WEP40 | WLAN_CIPHER_SUITE_WEP104))) + return 0; + + memset(&key, 0, sizeof(key)); + key.len = (u32)sme->key_len; + key.index = (u32)sme->key_idx; + if (key.len > sizeof(key.data)) { + iphy_err(drvr, "Too long key length (%u)\n", key.len); + return -EINVAL; + } + memcpy(key.data, sme->key, key.len); + key.flags = INFF_PRIMARY_KEY; + switch (sec->cipher_pairwise) { + case WLAN_CIPHER_SUITE_WEP40: + key.algo = CRYPTO_ALGO_WEP1; + break; + case WLAN_CIPHER_SUITE_WEP104: + key.algo = CRYPTO_ALGO_WEP128; + break; + default: + iphy_err(drvr, "Invalid algorithm (%d)\n", + sme->crypto.ciphers_pairwise[0]); + return -EINVAL; + } + /* Set the new key/index */ + inff_dbg(CONN, "key length (%d) key index (%d) algo (%d)\n", + key.len, key.index, key.algo); + inff_dbg(CONN, "key \"%s\"\n", key.data); + err = send_key_to_dongle(ifp, &key); + if (err) + return err; + + if (sec->auth_type == NL80211_AUTHTYPE_SHARED_KEY) { + inff_dbg(CONN, "set auth_type to shared key\n"); + val = WL_AUTH_SHARED_KEY; /* shared key */ + err = inff_fil_bsscfg_int_set(ifp, "auth", val); + if (err) + iphy_err(drvr, "set auth failed (%d)\n", err); + } + return err; +} + +static +enum nl80211_auth_type inff_war_auth_type(struct inff_if *ifp, + enum nl80211_auth_type type) +{ + if (type == NL80211_AUTHTYPE_AUTOMATIC && + inff_feat_is_quirk_enabled(ifp, INFF_FEAT_QUIRK_AUTO_AUTH)) { + inff_dbg(CONN, "WAR: use OPEN instead of AUTO\n"); + type = NL80211_AUTHTYPE_OPEN_SYSTEM; + } + return type; +} + +static void inff_set_join_pref(struct inff_if *ifp, + struct cfg80211_bss_selection *bss_select) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_join_pref_params join_pref_params[2]; + enum nl80211_band band; + int err, i = 0; + + join_pref_params[i].len = 2; + join_pref_params[i].rssi_gain = 0; + + if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF) + inff_fil_cmd_int_set(ifp, INFF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO); + + switch (bss_select->behaviour) { + case __NL80211_BSS_SELECT_ATTR_INVALID: + inff_c_set_joinpref_default(ifp); + return; + case NL80211_BSS_SELECT_ATTR_BAND_PREF: + join_pref_params[i].type = INFF_JOIN_PREF_BAND; + band = bss_select->param.band_pref; + join_pref_params[i].band = nl80211_band_to_fwil(band); + i++; + break; + case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST: + join_pref_params[i].type = INFF_JOIN_PREF_RSSI_DELTA; + band = bss_select->param.adjust.band; + join_pref_params[i].band = nl80211_band_to_fwil(band); + join_pref_params[i].rssi_gain = bss_select->param.adjust.delta; + i++; + break; + case NL80211_BSS_SELECT_ATTR_RSSI: + default: + break; + } + join_pref_params[i].type = INFF_JOIN_PREF_RSSI; + join_pref_params[i].len = 2; + join_pref_params[i].rssi_gain = 0; + join_pref_params[i].band = 0; + err = inff_fil_iovar_data_set(ifp, "join_pref", join_pref_params, + sizeof(join_pref_params)); + if (err) + iphy_err(drvr, "Set join_pref error (%d)\n", err); +} + +static bool +wl_cfg80211_is_oce_ap(struct inff_if *ifp, + struct wiphy *wiphy, const u8 *bssid_hint) +{ + struct inff_pub *drvr = ifp->drvr; + const struct inff_tlv *ie; + const struct cfg80211_bss_ies *ies; + struct cfg80211_bss *bss; + const u8 *parse = NULL; + u32 len; + + bss = cfg80211_get_bss(wiphy, NULL, bssid_hint, 0, 0, + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); + if (!bss) { + iphy_err(drvr, "Unable to find AP in the cache"); + return false; + } + + if (rcu_access_pointer(bss->ies)) { + ies = rcu_access_pointer(bss->ies); + parse = ies->data; + len = ies->len; + } else { + iphy_err(drvr, "ies is NULL"); + return false; + } + + while ((ie = inff_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { + if (wl_cfgoce_is_oce_ie((const u8 *)ie, + (u8 const **)&parse, &len) == true) { + return true; + } + } + inff_dbg(TRACE, "OCE IE NOT found"); + return false; +} + +static s32 +inff_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_connect_params *sme) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct ieee80211_channel *chan = sme->channel; + struct inff_pub *drvr = ifp->drvr; + struct inff_join_params join_params; + size_t join_params_size; + const struct inff_tlv *rsn_ie; + const struct inff_vs_tlv *wpa_ie; + const void *ie; + u32 ie_len; + struct inff_ext_join_params_le *ext_join_params; + u16 chanspec; + s32 err = 0; + u32 ssid_len; + bool skip_hints = ifp->drvr->settings->fw_ap_select; + + inff_dbg(TRACE, "Enter\n"); + + if (cfg->pfn_enable && cfg->pfn_connection) { + err = inff_fil_cmd_data_set(ifp, + INFF_C_DISASSOC, NULL, 0); + if (err) { + inff_err("INFF_C_DISASSOC error:%d\n", err); + return -1; + } + cfg->pfn_connection = 0; + + /* Disable pfn */ + err = inff_fil_iovar_int_set(ifp, "pfn", 0); + if (err < 0) { + inff_err("pfn disable error:%d\n", err); + } else { + /* clear pfn */ + err = inff_fil_iovar_data_set(ifp, "pfnclear", NULL, 0); + if (err) + inff_err("pfnclear error:%d\n", err); + } + } + + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (!sme->ssid) { + iphy_err(drvr, "Invalid ssid\n"); + return -EOPNOTSUPP; + } + + /* override bssid_hint for oce networks */ + skip_hints = (skip_hints && + wl_cfg80211_is_oce_ap(ifp, wiphy, sme->bssid_hint)); + if (skip_hints) { + /* Let fw choose the best AP */ + inff_dbg(TRACE, "Skipping bssid & channel hint\n"); + } else { + if (sme->channel_hint) + chan = sme->channel_hint; + + if (sme->bssid_hint) + sme->bssid = sme->bssid_hint; + } + + /* FT Cert: Handling the roam request from supplicant for FT roaming */ + if (sme->prev_bssid && sme->bssid && + inff_feat_is_enabled(ifp, INFF_FEAT_FBT) && + wpa_akm_ft(sme->crypto.akm_suites[0])) { + /* Only reassoc IOVAR required for Roam skip additional IOVAR */ + struct inff_assoc_params_le ext_roam_params; + + inff_dbg(CONN, "Trying to REASSOC For FT\n"); + memset(&ext_roam_params, 0, sizeof(ext_roam_params)); + memcpy(&ext_roam_params.bssid, sme->bssid, ETH_ALEN); + set_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + + err = inff_fil_cmd_data_set(ifp, INFF_C_REASSOC, + &ext_roam_params, + sizeof(ext_roam_params)); + goto done; + } + + if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) { + /* A normal (non P2P) connection request setup. */ + ie = NULL; + ie_len = 0; + /* find the WPA_IE */ + wpa_ie = inff_find_wpaie((u8 *)sme->ie, sme->ie_len); + if (wpa_ie) { + ie = wpa_ie; + ie_len = wpa_ie->len + TLV_HDR_LEN; + } else { + /* find the RSN_IE */ + rsn_ie = inff_parse_tlvs((const u8 *)sme->ie, + sme->ie_len, + WLAN_EID_RSN); + if (rsn_ie) { + ie = rsn_ie; + ie_len = rsn_ie->len + TLV_HDR_LEN; + } + } + inff_fil_iovar_data_set(ifp, "wpaie", ie, ie_len); + } + + err = inff_vif_set_mgmt_ie(ifp->vif, INFF_VNDR_IE_ASSOCREQ_FLAG, + sme->ie, sme->ie_len); + if (err) + iphy_err(drvr, "Set Assoc REQ IE Failed\n"); + else + inff_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); + + set_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + + if (chan) { + cfg->channel = + ieee80211_frequency_to_channel(chan->center_freq); + chanspec = channel_to_chanspec(&cfg->d11inf, chan); + inff_dbg(CONN, "channel=%d, center_req=%d, chanspec=0x%04x\n", + cfg->channel, chan->center_freq, chanspec); + } else { + cfg->channel = 0; + chanspec = 0; + } + + inff_dbg(INFO, "ie (%p), ie_len (%zd)\n", sme->ie, sme->ie_len); + + err = inff_set_wpa_version(ndev, sme); + if (err) { + iphy_err(drvr, "wl_set_wpa_version failed (%d)\n", err); + goto done; + } + + sme->auth_type = inff_war_auth_type(ifp, sme->auth_type); + err = inff_set_auth_type(ndev, sme); + if (err) { + iphy_err(drvr, "wl_set_auth_type failed (%d)\n", err); + goto done; + } + + err = inff_set_wsec_mode(ndev, sme); + if (err) { + iphy_err(drvr, "wl_set_set_cipher failed (%d)\n", err); + goto done; + } + + err = inff_set_key_mgmt(ndev, sme); + if (err) { + iphy_err(drvr, "wl_set_key_mgmt failed (%d)\n", err); + goto done; + } + + err = inff_set_sharedkey(ndev, sme); + if (err) { + iphy_err(drvr, "inff_set_sharedkey failed (%d)\n", err); + goto done; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) { + if (sme->crypto.psk) { + if (profile->use_fwsup != INFF_PROFILE_FWSUP_SAE) { + if (WARN_ON(profile->use_fwsup != + INFF_PROFILE_FWSUP_NONE)) { + err = -EINVAL; + goto done; + } + inff_dbg(INFO, "using PSK offload\n"); + profile->use_fwsup = INFF_PROFILE_FWSUP_PSK; + } + } + + if (profile->use_fwsup != INFF_PROFILE_FWSUP_NONE) { + /* enable firmware supplicant for this interface */ + err = inff_fil_iovar_int_set(ifp, "sup_wpa", 1); + if (err < 0) { + iphy_err(drvr, "failed to enable fw supplicant\n"); + goto done; + } + } else { + err = inff_fil_iovar_int_set(ifp, "sup_wpa", 0); + } + + if (profile->use_fwsup == INFF_PROFILE_FWSUP_PSK) + err = inff_set_pmk(ifp, sme->crypto.psk, + INFF_WSEC_MAX_PSK_LEN); + + /* if upper layer has passed sae_password, + * set it to firmware for the potential transit up roaming use. + */ + if (sme->crypto.sae_pwd && inff_feat_is_enabled(ifp, INFF_FEAT_SAE)) { + /* clean up user-space RSNE */ + if (inff_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) { + iphy_err(drvr, "failed to clean up user-space RSNE\n"); + goto done; + } + err = inff_set_sae_password(ifp, sme->crypto.sae_pwd, + sme->crypto.sae_pwd_len); + if (!err && sme->crypto.psk) + err = inff_set_pmk(ifp, sme->crypto.psk, + INFF_WSEC_MAX_PSK_LEN); + } + if (err) + goto done; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_OWE) && + sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_OWE) { + /* clean up user-space RSNE */ + if (inff_fil_iovar_data_set(ifp, "wpaie", NULL, 0)) { + iphy_err(drvr, "failed to clean up user-space RSNE\n"); + goto done; + } + } + } + /* Join with specific BSSID and cached SSID + * If SSID is zero join based on BSSID only + */ + join_params_size = offsetof(struct inff_ext_join_params_le, assoc_le) + + offsetof(struct inff_assoc_params_le, chanspec_list); + if (cfg->channel) + join_params_size += sizeof(u16); + ext_join_params = kzalloc(sizeof(*ext_join_params), GFP_KERNEL); + if (!ext_join_params) { + err = -ENOMEM; + goto done; + } + ssid_len = min_t(u32, sme->ssid_len, IEEE80211_MAX_SSID_LEN); + ext_join_params->ssid_le.SSID_len = cpu_to_le32(ssid_len); + memcpy(&ext_join_params->ssid_le.SSID, sme->ssid, ssid_len); + if (ssid_len < IEEE80211_MAX_SSID_LEN) + inff_dbg(CONN, "SSID \"%s\", len (%d)\n", + ext_join_params->ssid_le.SSID, ssid_len); + + /* Set up join scan parameters */ + ext_join_params->scan_le.scan_type = -1; + ext_join_params->scan_le.home_time = cpu_to_le32(-1); + + if (sme->bssid) + memcpy(&ext_join_params->assoc_le.bssid, sme->bssid, ETH_ALEN); + else + eth_broadcast_addr(ext_join_params->assoc_le.bssid); + + if (cfg->channel) { + ext_join_params->assoc_le.chanspec_num = cpu_to_le32(1); + ext_join_params->assoc_le.chanspec_list = + cpu_to_le16(chanspec); + + /* Increase dwell time to receive probe response or detect + * beacon from target AP at a noisy air only during connect + * command. + */ + if (INFF_CHSPEC_IS6G(chanspec)) { + ext_join_params->scan_le.active_time = + cpu_to_le32(INFF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS_6E); + ext_join_params->scan_le.passive_time = + cpu_to_le32(INFF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS_6E); + } else { + ext_join_params->scan_le.active_time = + cpu_to_le32(INFF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS); + ext_join_params->scan_le.passive_time = + cpu_to_le32(INFF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS); + } + + /* To sync with presence period of VSDB GO send probe request + * more frequently. Probe request will be stopped when it gets + * probe response from target AP/GO. + */ + ext_join_params->scan_le.nprobes = + cpu_to_le32(INFF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS / + INFF_SCAN_JOIN_PROBE_INTERVAL_MS); + } else { + ext_join_params->scan_le.active_time = cpu_to_le32(-1); + ext_join_params->scan_le.passive_time = cpu_to_le32(-1); + ext_join_params->scan_le.nprobes = cpu_to_le32(-1); + } + + inff_set_join_pref(ifp, &sme->bss_select); + + /* The internal supplicant judges to use assoc or reassoc itself. + * it is not necessary to specify REASSOC + */ + if (sme->prev_bssid && !inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) { + inff_dbg(CONN, "Trying to REASSOC\n"); + join_params_size = sizeof(ext_join_params->assoc_le); + err = inff_fil_cmd_data_set(ifp, INFF_C_REASSOC, + &ext_join_params->assoc_le, join_params_size); + } else { + err = inff_fil_bsscfg_data_set(ifp, "join", ext_join_params, + join_params_size); + } + kfree(ext_join_params); + if (!err) + /* This is it. join command worked, we are done */ + goto done; + + /* join command failed, fallback to set ssid */ + memset(&join_params, 0, sizeof(join_params)); + join_params_size = sizeof(join_params.ssid_le); + + memcpy(&join_params.ssid_le.SSID, sme->ssid, ssid_len); + join_params.ssid_le.SSID_len = cpu_to_le32(ssid_len); + + if (sme->bssid) + memcpy(join_params.params_le.bssid, sme->bssid, ETH_ALEN); + else + eth_broadcast_addr(join_params.params_le.bssid); + + if (cfg->channel) { + join_params.params_le.chanspec_list = cpu_to_le16(chanspec); + join_params.params_le.chanspec_num = cpu_to_le32(1); + join_params_size += sizeof(join_params.params_le); + } + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SSID, + &join_params, join_params_size); + if (err) + iphy_err(drvr, "INFF_C_SET_SSID failed (%d)\n", err); + +done: + if (err) + clear_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev, + u16 reason_code) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_pub *drvr = cfg->pub; + struct inff_scb_val_le scbval; + s32 err = 0; + + inff_dbg(TRACE, "Enter. Reason code = %d\n", reason_code); + if (!check_vif_up(ifp->vif)) + return -EIO; + + clear_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); + clear_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + clear_bit(INFF_VIF_STATUS_EAP_SUCCESS, &ifp->vif->sme_state); + clear_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &ifp->vif->sme_state); + cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL); + + if (cfg->pfn_enable) { + cfg->pfn_connection = 0; + pfn_send_network_blob_fw(wiphy, &ifp->vif->wdev); + } + + memcpy(&scbval.ea, &profile->bssid, ETH_ALEN); + scbval.val = cpu_to_le32(reason_code); + err = inff_fil_cmd_data_set(ifp, INFF_C_DISASSOC, + &scbval, sizeof(scbval)); + if (err) { + iphy_err(drvr, "error (%d)\n", err); + } else { + if (inff_feat_is_enabled(ifp, INFF_FEAT_TWT)) { + /* Cleanup TWT Session list */ + inff_twt_cleanup_all_sess(ifp); + } + } + + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + int radio_idx, enum nl80211_tx_power_setting type, + s32 mbm) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + s32 err; + s32 disable; + u32 qdbm = 127; + + inff_dbg(TRACE, "Enter %d %d\n", type, mbm); + if (!check_vif_up(ifp->vif)) + return -EIO; + + switch (type) { + case NL80211_TX_POWER_AUTOMATIC: + break; + case NL80211_TX_POWER_LIMITED: + case NL80211_TX_POWER_FIXED: + if (mbm < 0) { + iphy_err(drvr, "TX_POWER_FIXED - dbm is negative\n"); + err = -EINVAL; + goto done; + } + qdbm = MBM_TO_DBM(4 * mbm); + if (qdbm > 127) + qdbm = 127; + qdbm |= WL_TXPWR_OVERRIDE; + break; + default: + iphy_err(drvr, "Unsupported type %d\n", type); + err = -EINVAL; + goto done; + } + /* Make sure radio is off or on as far as software is concerned */ + disable = WL_RADIO_SW_DISABLE << 16; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_RADIO, disable); + if (err) + iphy_err(drvr, "WLC_SET_RADIO error (%d)\n", err); + + err = inff_fil_iovar_int_set(ifp, "qtxpower", qdbm); + if (err) + iphy_err(drvr, "qtxpower error (%d)\n", err); + +done: + inff_dbg(TRACE, "Exit %ld (qdbm)\n", qdbm & ~WL_TXPWR_OVERRIDE); + return err; +} + +static s32 +inff_cfg80211_get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, + int radio_idx, unsigned int link_id, s32 *dbm) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif = wdev_to_vif(wdev); + struct inff_pub *drvr = cfg->pub; + s32 qdbm = 0; + s32 err; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(vif)) + return -EIO; + + err = inff_fil_iovar_int_get(vif->ifp, "qtxpower", &qdbm); + if (err) { + iphy_err(drvr, "error (%d)\n", err); + goto done; + } + *dbm = (qdbm & ~WL_TXPWR_OVERRIDE) / 4; + +done: + inff_dbg(TRACE, "Exit (0x%x %d)\n", qdbm, *dbm); + return err; +} + +static s32 +inff_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_idx, bool unicast, + bool multicast) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + u32 index; + u32 wsec; + s32 err = 0; + + inff_dbg(TRACE, "Enter\n"); + inff_dbg(CONN, "key index (%d)\n", key_idx); + if (!check_vif_up(ifp->vif)) + return -EIO; + + err = inff_fil_bsscfg_int_get(ifp, "wsec", &wsec); + if (err) { + iphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err); + goto done; + } + + if (wsec & WEP_ENABLED) { + /* Just select a new current key */ + index = key_idx; + err = inff_fil_cmd_int_set(ifp, + INFF_C_SET_KEY_PRIMARY, index); + if (err) + iphy_err(drvr, "error (%d)\n", err); + } +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_wsec_key *key; + s32 err; + + inff_dbg(TRACE, "Enter\n"); + inff_dbg(CONN, "key index (%d)\n", key_idx); + + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (key_idx >= INFF_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + return -EINVAL; + } + + key = &ifp->vif->profile.key[key_idx]; + + if (key->algo == CRYPTO_ALGO_OFF) { + inff_dbg(CONN, "Ignore clearing of (never configured) key\n"); + return -EINVAL; + } + + memset(key, 0, sizeof(*key)); + key->index = (u32)key_idx; + key->flags = INFF_PRIMARY_KEY; + + /* Clear the key/index */ + err = send_key_to_dongle(ifp, key); + + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, struct key_params *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + struct inff_wsec_key *key; + s32 val; + s32 wsec; + s32 err; + u8 keybuf[8]; + bool ext_key; + u32 algos = 0, mask = 0; + + inff_dbg(TRACE, "Enter\n"); + inff_dbg(CONN, "key index (%d)\n", key_idx); + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (key_idx >= INFF_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + iphy_err(drvr, "invalid key index (%d)\n", key_idx); + return -EINVAL; + } + + if (params->key_len == 0) + return inff_cfg80211_del_key(wiphy, ndev, -1, key_idx, + pairwise, mac_addr); + + if (params->key_len > sizeof(key->data)) { + iphy_err(drvr, "Too long key length (%u)\n", params->key_len); + return -EINVAL; + } + + ext_key = false; + if (mac_addr && params->cipher != WLAN_CIPHER_SUITE_WEP40 && + params->cipher != WLAN_CIPHER_SUITE_WEP104) { + inff_dbg(TRACE, "Ext key, mac %pM", mac_addr); + ext_key = true; + } + + key = &ifp->vif->profile.key[key_idx]; + memset(key, 0, sizeof(*key)); + if ((ext_key) && (!is_multicast_ether_addr(mac_addr))) + memcpy((char *)&key->ea, (void *)mac_addr, ETH_ALEN); + key->len = params->key_len; + key->index = key_idx; + memcpy(key->data, params->key, key->len); + if (!ext_key) + key->flags = INFF_PRIMARY_KEY; + + if (params->seq && params->seq_len == 6) { + /* rx iv */ + u8 *ivptr; + + ivptr = (u8 *)params->seq; + key->rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | + (ivptr[3] << 8) | ivptr[2]; + key->rxiv.lo = (ivptr[1] << 8) | ivptr[0]; + key->iv_initialized = true; + } + + switch (params->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + key->algo = CRYPTO_ALGO_WEP1; + val = WEP_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n"); + break; + case WLAN_CIPHER_SUITE_WEP104: + key->algo = CRYPTO_ALGO_WEP128; + val = WEP_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); + break; + case WLAN_CIPHER_SUITE_TKIP: + if (!inff_is_apmode(ifp->vif)) { + inff_dbg(CONN, "Swapping RX/TX MIC key\n"); + memcpy(keybuf, &key->data[24], sizeof(keybuf)); + memcpy(&key->data[24], &key->data[16], sizeof(keybuf)); + memcpy(&key->data[16], keybuf, sizeof(keybuf)); + } + key->algo = CRYPTO_ALGO_TKIP; + val = TKIP_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + key->algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); + break; + case WLAN_CIPHER_SUITE_CCMP: + key->algo = CRYPTO_ALGO_AES_CCM; + val = AES_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n"); + break; + case WLAN_CIPHER_SUITE_GCMP_256: + if (!inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_err("the low layer not support GCMP\n"); + err = -EOPNOTSUPP; + goto done; + } + key->algo = CRYPTO_ALGO_AES_GCM256; + val = AES_ENABLED; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_GCMP_256\n"); + algos = KEY_ALGO_MASK(CRYPTO_ALGO_AES_GCM256); + mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (!inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_err("the low layer not support GCMP\n"); + err = -EOPNOTSUPP; + goto done; + } + key->algo = CRYPTO_ALGO_BIP_GMAC256; + val = AES_ENABLED; + algos = KEY_ALGO_MASK(CRYPTO_ALGO_BIP_GMAC256); + mask = algos | KEY_ALGO_MASK(CRYPTO_ALGO_AES_CCM); + inff_dbg(CONN, "WLAN_CIPHER_SUITE_BIP_GMAC_256\n"); + break; + default: + iphy_err(drvr, "Invalid cipher (0x%x)\n", params->cipher); + err = -EINVAL; + goto done; + } + + err = send_key_to_dongle(ifp, key); + if (ext_key || err) + goto done; + + err = inff_fil_bsscfg_int_get(ifp, "wsec", &wsec); + if (err) { + iphy_err(drvr, "get wsec error (%d)\n", err); + goto done; + } + wsec |= val; + err = inff_fil_bsscfg_int_set(ifp, "wsec", wsec); + if (err) { + iphy_err(drvr, "set wsec error (%d)\n", err); + goto done; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_GCMP)) { + inff_dbg(CONN, + "set_wsdec_info algos (0x%x) mask (0x%x)\n", + algos, mask); + err = wl_set_wsec_info_algos(ifp, algos, mask); + if (err) { + inff_err("set wsec_info error (%d)\n", err); + return err; + } + } + +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, + int link_id, u8 key_idx, bool pairwise, + const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, + struct key_params *params)) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct key_params params; + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_security *sec; + s32 wsec; + s32 err = 0; + + inff_dbg(TRACE, "Enter\n"); + inff_dbg(CONN, "key index (%d)\n", key_idx); + if (!check_vif_up(ifp->vif)) + return -EIO; + + memset(¶ms, 0, sizeof(params)); + + err = inff_fil_bsscfg_int_get(ifp, "wsec", &wsec); + if (err) { + iphy_err(drvr, "WLC_GET_WSEC error (%d)\n", err); + /* Ignore this error, may happen during DISASSOC */ + err = -EAGAIN; + goto done; + } + if (wsec & WEP_ENABLED) { + sec = &profile->sec; + if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { + params.cipher = WLAN_CIPHER_SUITE_WEP40; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n"); + } else if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP104) { + params.cipher = WLAN_CIPHER_SUITE_WEP104; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); + } + } else if (wsec & TKIP_ENABLED) { + params.cipher = WLAN_CIPHER_SUITE_TKIP; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); + } else if (wsec & AES_ENABLED) { + params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; + inff_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); + } else { + iphy_err(drvr, "Invalid algo (0x%x)\n", wsec); + err = -EINVAL; + goto done; + } + callback(cookie, ¶ms); + +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, + struct net_device *ndev, int link_id, + u8 key_idx) +{ + struct inff_if *ifp = netdev_priv(ndev); + + inff_dbg(TRACE, "Enter key_idx %d\n", key_idx); + + if (inff_feat_is_enabled(ifp, INFF_FEAT_MFP)) + return 0; + + inff_dbg(INFO, "Not supported\n"); + + return -EOPNOTSUPP; +} + +static void inff_convert_sta_flags(u32 fw_sta_flags, struct station_info *si) +{ + struct nl80211_sta_flag_update *sfu; + + inff_dbg(TRACE, "flags %08x\n", fw_sta_flags); + si->filled |= BIT_ULL(NL80211_STA_INFO_STA_FLAGS); + sfu = &si->sta_flags; + sfu->mask = BIT(NL80211_STA_FLAG_WME) | + BIT(NL80211_STA_FLAG_AUTHENTICATED) | + BIT(NL80211_STA_FLAG_ASSOCIATED) | + BIT(NL80211_STA_FLAG_AUTHORIZED); + if (fw_sta_flags & INFF_STA_WME) + sfu->set |= BIT(NL80211_STA_FLAG_WME); + if (fw_sta_flags & INFF_STA_AUTHE) + sfu->set |= BIT(NL80211_STA_FLAG_AUTHENTICATED); + if (fw_sta_flags & INFF_STA_ASSOC) + sfu->set |= BIT(NL80211_STA_FLAG_ASSOCIATED); + if (fw_sta_flags & INFF_STA_AUTHO) + sfu->set |= BIT(NL80211_STA_FLAG_AUTHORIZED); +} + +static void inff_fill_bss_param(struct inff_if *ifp, struct station_info *si) +{ + struct inff_pub *drvr = ifp->drvr; + struct { + __le32 len; + struct inff_bss_info_le bss_le; + } *buf; + u16 capability; + int err; + + buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); + if (!buf) + return; + + buf->len = cpu_to_le32(WL_BSS_INFO_MAX); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BSS_INFO, buf, + WL_BSS_INFO_MAX); + if (err) { + iphy_err(drvr, "Failed to get bss info (%d)\n", err); + goto out_kfree; + } + si->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); + si->bss_param.beacon_interval = le16_to_cpu(buf->bss_le.beacon_period); + si->bss_param.dtim_period = buf->bss_le.dtim_period; + capability = le16_to_cpu(buf->bss_le.capability); + if (capability & IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT) + si->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; + if (capability & WLAN_CAPABILITY_SHORT_PREAMBLE) + si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; + if (capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) + si->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; + +out_kfree: + kfree(buf); +} + +static s32 +inff_cfg80211_get_station_ibss(struct inff_if *ifp, + struct station_info *sinfo, + const u8 *mac) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_scb_val_le scbval; + struct inff_pktcnt_le pktcnt; + s32 err; + u32 rate; + u32 rssi; + + /* Get the current tx rate */ + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_RATE, &rate); + if (err < 0) { + iphy_err(drvr, "INFF_C_GET_RATE error (%d)\n", err); + return err; + } + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + sinfo->txrate.legacy = rate * 5; + + memset(&scbval, 0, sizeof(scbval)); + memcpy(&scbval.ea[0], mac, ETH_ALEN); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_RSSI, &scbval, + sizeof(scbval)); + if (err) { + iphy_err(drvr, "INFF_C_GET_RSSI error (%d)\n", err); + return err; + } + rssi = le32_to_cpu(scbval.val); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + sinfo->signal = rssi; + + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_GET_PKTCNTS, &pktcnt, + sizeof(pktcnt)); + if (err) { + iphy_err(drvr, "INFF_C_GET_GET_PKTCNTS error (%d)\n", err); + return err; + } + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | + BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED); + sinfo->rx_packets = le32_to_cpu(pktcnt.rx_good_pkt); + sinfo->rx_dropped_misc = le32_to_cpu(pktcnt.rx_bad_pkt); + sinfo->tx_packets = le32_to_cpu(pktcnt.tx_good_pkt); + sinfo->tx_failed = le32_to_cpu(pktcnt.tx_bad_pkt); + + return 0; +} + +static s32 +inff_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, + const u8 *mac, struct station_info *sinfo) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + struct inff_scb_val_le scb_val; + s32 err = 0; + struct inff_sta_info_le sta_info_le; + u32 sta_flags; + u32 is_tdls_peer; + s32 total_rssi_avg = 0; + s32 total_rssi = 0; + s32 count_rssi = 0; + int rssi; + u32 i; + + inff_dbg(TRACE, "Enter, MAC %pM\n", mac); + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (inff_is_ibssmode(ifp->vif)) + return inff_cfg80211_get_station_ibss(ifp, sinfo, mac); + + memset(&sta_info_le, 0, sizeof(sta_info_le)); + memcpy(&sta_info_le, mac, ETH_ALEN); + err = inff_fil_iovar_data_get(ifp, "tdls_sta_info", + &sta_info_le, + sizeof(sta_info_le)); + is_tdls_peer = !err; + if (err) { + err = inff_fil_iovar_data_get(ifp, "sta_info", + &sta_info_le, + sizeof(sta_info_le)); + if (err < 0) { + iphy_err(drvr, "GET STA INFO failed, %d\n", err); + goto done; + } + } + inff_dbg(TRACE, "version %d\n", le16_to_cpu(sta_info_le.ver)); + sinfo->filled = BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); + sinfo->inactive_time = le32_to_cpu(sta_info_le.idle) * 1000; + sta_flags = le32_to_cpu(sta_info_le.flags); + inff_convert_sta_flags(sta_flags, sinfo); + sinfo->sta_flags.mask |= BIT(NL80211_STA_FLAG_TDLS_PEER); + if (is_tdls_peer) + sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_TDLS_PEER); + else + sinfo->sta_flags.set &= ~BIT(NL80211_STA_FLAG_TDLS_PEER); + if (sta_flags & INFF_STA_ASSOC) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CONNECTED_TIME); + sinfo->connected_time = le32_to_cpu(sta_info_le.in); + inff_fill_bss_param(ifp, sinfo); + } + if (sta_flags & INFF_STA_SCBSTATS) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); + sinfo->tx_failed = le32_to_cpu(sta_info_le.tx_failures); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); + sinfo->tx_packets = le32_to_cpu(sta_info_le.tx_pkts); + sinfo->tx_packets += le32_to_cpu(sta_info_le.tx_mcast_pkts); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); + sinfo->rx_packets = le32_to_cpu(sta_info_le.rx_ucast_pkts); + sinfo->rx_packets += le32_to_cpu(sta_info_le.rx_mcast_pkts); + if (sinfo->tx_packets) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + sinfo->txrate.legacy = + le32_to_cpu(sta_info_le.tx_rate) / 100; + } + if (sinfo->rx_packets) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE); + sinfo->rxrate.legacy = + le32_to_cpu(sta_info_le.rx_rate) / 100; + } + if (le16_to_cpu(sta_info_le.ver) >= 4) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES); + sinfo->tx_bytes = le64_to_cpu(sta_info_le.tx_tot_bytes); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES); + sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes); + } + for (i = 0; i < INFF_ANT_MAX; i++) { + if (sta_info_le.rssi[i] == 0 || + sta_info_le.rx_lastpkt_rssi[i] == 0) + continue; + sinfo->chains |= BIT(count_rssi); + sinfo->chain_signal[count_rssi] = + sta_info_le.rx_lastpkt_rssi[i]; + sinfo->chain_signal_avg[count_rssi] = + sta_info_le.rssi[i]; + total_rssi += sta_info_le.rx_lastpkt_rssi[i]; + total_rssi_avg += sta_info_le.rssi[i]; + count_rssi++; + } + if (count_rssi) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL); + sinfo->filled |= + BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG); + sinfo->signal = total_rssi / count_rssi; + sinfo->signal_avg = total_rssi_avg / count_rssi; + } else if (test_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { + memset(&scb_val, 0, sizeof(scb_val)); + memcpy(&scb_val.ea[0], mac, ETH_ALEN); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_RSSI, + &scb_val, sizeof(scb_val)); + if (err) { + iphy_err(drvr, "Could not get rssi (%d)\n", + err); + goto done; + } else { + rssi = le32_to_cpu(scb_val.val); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + sinfo->signal = rssi; + inff_dbg(CONN, "RSSI %d dBm\n", rssi); + } + } + } +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static int +inff_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *ndev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + s32 err; + + inff_dbg(TRACE, "Enter, idx %d\n", idx); + + if (idx == 0) { + cfg->assoclist.count = cpu_to_le32(INFF_MAX_ASSOCLIST); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_ASSOCLIST, + &cfg->assoclist, + sizeof(cfg->assoclist)); + if (err) { + /* GET_ASSOCLIST unsupported by firmware of older chips */ + if (err == -EBADE) + iphy_info_once(drvr, "INFF_C_GET_ASSOCLIST unsupported\n"); + else + iphy_err(drvr, "INFF_C_GET_ASSOCLIST failed, err=%d\n", + err); + + cfg->assoclist.count = 0; + return -EOPNOTSUPP; + } + } + if (idx < le32_to_cpu(cfg->assoclist.count)) { + memcpy(mac, cfg->assoclist.mac[idx], ETH_ALEN); + return inff_cfg80211_get_station(wiphy, ndev, mac, sinfo); + } + return -ENOENT; +} + +static s32 +inff_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, + bool enabled, s32 timeout) +{ + s32 pm; + s32 err = 0; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + + inff_dbg(TRACE, "Enter\n"); + + /* + * Powersave enable/disable request is coming from the + * cfg80211 even before the interface is up. In that + * scenario, driver will be storing the power save + * preference in cfg struct to apply this to + * FW later while initializing the dongle + */ + cfg->pwr_save = enabled; + if (!check_vif_up(ifp->vif)) { + inff_dbg(INFO, "Device is not ready, storing the value in cfg_info struct\n"); + goto done; + } + + pm = enabled ? ifp->drvr->settings->default_pm : PM_OFF; + /* Do not enable the power save after assoc if it is a p2p interface */ + if (ifp->vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT) { + inff_dbg(INFO, "Do not enable power save for P2P clients\n"); + pm = PM_OFF; + } + + inff_dbg(INFO, "power save %s\n", (pm ? "enabled" : "disabled")); + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_PM, pm); + if (err) { + if (err == -ENODEV) + iphy_err(drvr, "net_device is not ready yet\n"); + else + iphy_err(drvr, "error (%d)\n", err); + } + +done: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +s32 inff_inform_single_bss(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bi) +{ + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct inff_pub *drvr = cfg->pub; + struct cfg80211_bss *bss; + enum nl80211_band band; + struct inff_chan ch; + u16 channel; + u32 freq; + u16 notify_capability; + u16 notify_interval; + u8 *notify_ie; + size_t notify_ielen; + struct cfg80211_inform_bss bss_data = {}; + const struct inff_tlv *ssid = NULL; + + if (le32_to_cpu(bi->length) > WL_BSS_INFO_MAX) { + iphy_err(drvr, "Bss info is larger than buffer. Discarding\n"); + return -EINVAL; + } + + ch.chspec = le16_to_cpu(bi->chanspec); + cfg->d11inf.decchspec(&ch); + + if (!bi->ctl_ch) + bi->ctl_ch = ch.control_ch_num; + + channel = bi->ctl_ch; + band = inff_d11_chan_band_to_nl80211(ch.band); + + freq = ieee80211_channel_to_frequency(channel, band); + if (!freq) + return -EINVAL; + + bss_data.chan = ieee80211_get_channel(wiphy, freq); + if (!bss_data.chan) + return -EINVAL; + + bss_data.boottime_ns = ktime_to_ns(ktime_get_boottime()); + + notify_capability = le16_to_cpu(bi->capability); + notify_interval = le16_to_cpu(bi->beacon_period); + notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); + notify_ielen = le32_to_cpu(bi->ie_length); + bss_data.signal = (s16)le16_to_cpu(bi->RSSI) * 100; + + ssid = inff_parse_tlvs(notify_ie, notify_ielen, WLAN_EID_SSID); + if (ssid && ssid->data[0] == '\0' && ssid->len == bi->SSID_len) { + /* Update SSID for hidden AP */ + memcpy((u8 *)ssid->data, bi->SSID, bi->SSID_len); + } + + inff_dbg(CONN, "bssid: %pM\n", bi->BSSID); + inff_dbg(CONN, "Channel: %d(%d)\n", channel, freq); + inff_dbg(CONN, "Capability: %X\n", notify_capability); + inff_dbg(CONN, "Beacon interval: %d\n", notify_interval); + inff_dbg(CONN, "Signal: %d\n", bss_data.signal); + + bss = cfg80211_inform_bss_data(wiphy, &bss_data, + CFG80211_BSS_FTYPE_PRESP, + (const u8 *)bi->BSSID, + 0, notify_capability, + notify_interval, notify_ie, + notify_ielen, GFP_KERNEL); + + if (!bss) + return -ENOMEM; + + cfg80211_put_bss(wiphy, bss); + + return 0; +} + +static struct inff_bss_info_le * +next_bss_le(struct inff_scan_results *list, struct inff_bss_info_le *bss) +{ + if (!bss) + return list->bss_info_le; + return (struct inff_bss_info_le *)((unsigned long)bss + + le32_to_cpu(bss->length)); +} + +s32 inff_inform_bss(struct inff_cfg80211_info *cfg) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_scan_results *bss_list; + struct inff_bss_info_le *bi = NULL; /* must be initialized */ + s32 err = 0; + int i; + + bss_list = (struct inff_scan_results *)cfg->escan_info.escan_buf; + if (bss_list->count != 0 && + bss_list->version != INFF_BSS_INFO_VERSION) { + iphy_err(drvr, "Version %d != WL_BSS_INFO_VERSION\n", + bss_list->version); + return -EOPNOTSUPP; + } + inff_dbg(SCAN, "scanned AP count (%d)\n", bss_list->count); + for (i = 0; i < bss_list->count; i++) { + bi = next_bss_le(bss_list, bi); + err = inff_inform_single_bss(cfg, bi); + if (err) + break; + } + return err; +} + +s32 inff_inform_ibss(struct inff_cfg80211_info *cfg, + struct net_device *ndev, const u8 *bssid) +{ + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct inff_pub *drvr = cfg->pub; + struct ieee80211_channel *notify_channel; + struct inff_bss_info_le *bi = NULL; + struct ieee80211_supported_band *band; + struct cfg80211_bss *bss; + struct inff_chan ch; + u8 *buf = NULL; + s32 err = 0; + u32 freq; + u16 notify_capability; + u16 notify_interval; + u8 *notify_ie; + size_t notify_ielen; + s32 notify_signal; + + inff_dbg(TRACE, "Enter\n"); + + buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto cleanup; + } + + *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); + + err = inff_fil_cmd_data_get(netdev_priv(ndev), INFF_C_GET_BSS_INFO, + buf, WL_BSS_INFO_MAX); + if (err) { + iphy_err(drvr, "WLC_GET_BSS_INFO failed: %d\n", err); + goto cleanup; + } + + bi = (struct inff_bss_info_le *)(buf + 4); + + ch.chspec = le16_to_cpu(bi->chanspec); + cfg->d11inf.decchspec(&ch); + + band = wiphy->bands[inff_d11_chan_band_to_nl80211(ch.band)]; + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); + if (!freq) { + err = -EINVAL; + goto cleanup; + } + cfg->channel = freq; + notify_channel = ieee80211_get_channel(wiphy, freq); + if (!notify_channel) { + err = -EINVAL; + goto cleanup; + } + + notify_capability = le16_to_cpu(bi->capability); + notify_interval = le16_to_cpu(bi->beacon_period); + notify_ie = (u8 *)bi + le16_to_cpu(bi->ie_offset); + notify_ielen = le32_to_cpu(bi->ie_length); + notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; + + inff_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq); + inff_dbg(CONN, "capability: %X\n", notify_capability); + inff_dbg(CONN, "beacon interval: %d\n", notify_interval); + inff_dbg(CONN, "signal: %d\n", notify_signal); + + bss = cfg80211_inform_bss(wiphy, notify_channel, + CFG80211_BSS_FTYPE_UNKNOWN, bssid, 0, + notify_capability, notify_interval, + notify_ie, notify_ielen, notify_signal, + GFP_KERNEL); + + if (!bss) { + err = -ENOMEM; + goto cleanup; + } + + cfg80211_put_bss(wiphy, bss); + +cleanup: + + kfree(buf); + + inff_dbg(TRACE, "Exit\n"); + + return err; +} + +static const struct inff_bss_info_le * +inff_update_bss_info(struct inff_cfg80211_info *cfg, struct inff_if *ifp) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_bss_info_le *bi = NULL; + s32 err = 0; + u8 null_mac[6] = {0}; + + inff_dbg(TRACE, "Enter\n"); + if (inff_is_ibssmode(ifp->vif)) + return NULL; + + *(__le32 *)cfg->extra_buf = cpu_to_le32(WL_EXTRA_BUF_MAX); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BSS_INFO, + cfg->extra_buf, WL_EXTRA_BUF_MAX); + if (err) { + iphy_err(drvr, "Could not get bss info %d\n", err); + goto update_bss_info_out; + } + bi = (struct inff_bss_info_le *)(cfg->extra_buf + 4); + + if (!memcmp(null_mac, bi->BSSID, ETH_ALEN)) { + iphy_err(drvr, "NULL mac, don't update bss\n"); + goto update_bss_info_out; + } + + err = inff_inform_single_bss(cfg, bi); + + inff_dbg(TRACE, "Exit"); + return bi; + +update_bss_info_out: + inff_dbg(TRACE, "Exit"); + return NULL; +} + +s32 +inff_compare_update_same_bss(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bss, + struct inff_bss_info_le *bss_info_le) +{ + struct inff_chan ch_bss, ch_bss_info_le; + + ch_bss.chspec = le16_to_cpu(bss->chanspec); + cfg->d11inf.decchspec(&ch_bss); + ch_bss_info_le.chspec = le16_to_cpu(bss_info_le->chanspec); + cfg->d11inf.decchspec(&ch_bss_info_le); + + if (!memcmp(&bss_info_le->BSSID, &bss->BSSID, ETH_ALEN) && + ch_bss.band == ch_bss_info_le.band && + bss_info_le->SSID_len == bss->SSID_len && + !memcmp(bss_info_le->SSID, bss->SSID, bss_info_le->SSID_len)) { + if ((bss->flags & INFF_BSS_RSSI_ON_CHANNEL) == + (bss_info_le->flags & INFF_BSS_RSSI_ON_CHANNEL)) { + s16 bss_rssi = le16_to_cpu(bss->RSSI); + s16 bss_info_rssi = le16_to_cpu(bss_info_le->RSSI); + + /* preserve max RSSI if the measurements are + * both on-channel or both off-channel + */ + if (bss_info_rssi > bss_rssi) + bss->RSSI = bss_info_le->RSSI; + } else if ((bss->flags & INFF_BSS_RSSI_ON_CHANNEL) && + (bss_info_le->flags & INFF_BSS_RSSI_ON_CHANNEL) == 0) { + /* preserve the on-channel rssi measurement + * if the new measurement is off channel + */ + bss->RSSI = bss_info_le->RSSI; + bss->flags |= INFF_BSS_RSSI_ON_CHANNEL; + } + return 1; + } + return 0; +} + +struct inff_pno_net_info_le * +inff_get_netinfo_array(struct inff_pno_scanresults_le *pfn_v1) +{ + struct inff_pno_scanresults_v2_le *pfn_v2; + struct inff_pno_net_info_le *netinfo; + + switch (pfn_v1->version) { + default: + WARN_ON(1); + fallthrough; + case cpu_to_le32(1): + netinfo = (struct inff_pno_net_info_le *)(pfn_v1 + 1); + break; + case cpu_to_le32(2): + pfn_v2 = (struct inff_pno_scanresults_v2_le *)pfn_v1; + netinfo = (struct inff_pno_net_info_le *)(pfn_v2 + 1); + break; + } + + return netinfo; +} + +static s32 inff_config_wowl_pattern(struct inff_if *ifp, u8 cmd[4], + u8 *pattern, u32 patternsize, u8 *mask, + u32 packet_offset) +{ + struct inff_fil_wowl_pattern_le *filter; + u32 masksize; + u32 patternoffset; + u8 *buf; + u32 bufsize; + s32 ret; + + masksize = (patternsize + 7) / 8; + patternoffset = sizeof(*filter) - sizeof(filter->cmd) + masksize; + + bufsize = sizeof(*filter) + patternsize + masksize; + buf = kzalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + filter = (struct inff_fil_wowl_pattern_le *)buf; + + memcpy(filter->cmd, cmd, 4); + filter->masksize = cpu_to_le32(masksize); + filter->offset = cpu_to_le32(packet_offset); + filter->patternoffset = cpu_to_le32(patternoffset); + filter->patternsize = cpu_to_le32(patternsize); + filter->type = cpu_to_le32(INFF_WOWL_PATTERN_TYPE_BITMAP); + + if ((mask) && (masksize)) + memcpy(buf + sizeof(*filter), mask, masksize); + if ((pattern) && (patternsize)) + memcpy(buf + sizeof(*filter) + masksize, pattern, patternsize); + + ret = inff_fil_iovar_data_set(ifp, "wowl_pattern", buf, bufsize); + + kfree(buf); + return ret; +} + +static s32 +inff_wowl_nd_results(struct inff_if *ifp, const struct inff_event_msg *e, + void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_info *cfg = drvr->config; + struct inff_pno_scanresults_le *pfn_result; + struct inff_pno_net_info_le *netinfo; + + inff_dbg(SCAN, "Enter\n"); + + if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) { + inff_dbg(SCAN, "Event data too small. Ignore\n"); + return 0; + } + + pfn_result = (struct inff_pno_scanresults_le *)data; + + if (e->event_code == INFF_E_PFN_NET_LOST) { + inff_dbg(SCAN, "PFN NET LOST event. Ignore\n"); + return 0; + } + + if (le32_to_cpu(pfn_result->count) < 1) { + iphy_err(drvr, "Invalid result count, expected 1 (%d)\n", + le32_to_cpu(pfn_result->count)); + return -EINVAL; + } + + netinfo = inff_get_netinfo_array(pfn_result); + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN) + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN; + memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len); + cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len; + cfg->wowl.nd->n_channels = 1; + cfg->wowl.nd->channels[0] = + ieee80211_channel_to_frequency(netinfo->channel, + netinfo->channel <= CH_MAX_2G_CHANNEL ? + NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); + cfg->wowl.nd_info->n_matches = 1; + cfg->wowl.nd_info->matches[0] = cfg->wowl.nd; + + /* Inform (the resume task) that the net detect information was recvd */ + cfg->wowl.nd_data_completed = true; + wake_up(&cfg->wowl.nd_data_wait); + + return 0; +} + +#ifdef CONFIG_PM + +static void inff_report_wowl_wakeind(struct wiphy *wiphy, struct inff_if *ifp) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_wowl_wakeind_le wake_ind_le; + struct cfg80211_wowlan_wakeup wakeup_data; + struct cfg80211_wowlan_wakeup *wakeup; + u32 wakeind; + s32 err; + long time_left; + + err = inff_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le, + sizeof(wake_ind_le)); + if (err) { + iphy_err(drvr, "Get wowl_wakeind failed, err = %d\n", err); + return; + } + + wakeind = le32_to_cpu(wake_ind_le.ucode_wakeind); + if (wakeind & (INFF_WOWL_MAGIC | INFF_WOWL_DIS | INFF_WOWL_BCN | + INFF_WOWL_RETR | INFF_WOWL_NET | + INFF_WOWL_PFN_FOUND)) { + wakeup = &wakeup_data; + memset(&wakeup_data, 0, sizeof(wakeup_data)); + wakeup_data.pattern_idx = -1; + + if (wakeind & INFF_WOWL_MAGIC) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_MAGIC\n"); + wakeup_data.magic_pkt = true; + } + if (wakeind & INFF_WOWL_DIS) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_DIS\n"); + wakeup_data.disconnect = true; + } + if (wakeind & INFF_WOWL_BCN) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_BCN\n"); + wakeup_data.disconnect = true; + } + if (wakeind & INFF_WOWL_RETR) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_RETR\n"); + wakeup_data.disconnect = true; + } + if (wakeind & INFF_WOWL_NET) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_NET\n"); + /* For now always map to pattern 0, no API to get + * correct information available at the moment. + */ + wakeup_data.pattern_idx = 0; + } + if (wakeind & INFF_WOWL_PFN_FOUND) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_PFN_FOUND\n"); + time_left = wait_event_timeout(cfg->wowl.nd_data_wait, + cfg->wowl.nd_data_completed, + INFF_ND_INFO_TIMEOUT); + if (!time_left) + iphy_err(drvr, "No result for wowl net detect\n"); + else + wakeup_data.net_detect = cfg->wowl.nd_info; + } + if (wakeind & INFF_WOWL_GTK_FAILURE) { + inff_dbg(INFO, "WOWL Wake indicator: INFF_WOWL_GTK_FAILURE\n"); + wakeup_data.gtk_rekey_failure = true; + } + } else { + wakeup = NULL; + } + cfg80211_report_wowlan_wakeup(&ifp->vif->wdev, wakeup, GFP_KERNEL); +} + +#else + +static void inff_report_wowl_wakeind(struct wiphy *wiphy, struct inff_if *ifp) +{ +} + +#endif /* CONFIG_PM */ + +static s32 inff_cfg80211_resume(struct wiphy *wiphy) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_bus *bus_if = drvr->bus_if; + struct inff_cfg80211_info *config = drvr->config; + int retry = INFF_PM_WAIT_MAXRETRY; + s32 power_mode; + + power_mode = cfg->pwr_save ? ifp->drvr->settings->default_pm : PM_OFF; + + inff_dbg(TRACE, "Enter\n"); + + config->pm_state = INFF_CFG80211_PM_STATE_RESUMING; + + if (cfg->wowl.active) { + /* wait for bus resumed */ + while (retry && bus_if->state != INFF_BUS_UP) { + usleep_range(10000, 20000); + retry--; + } + if (!retry && bus_if->state != INFF_BUS_UP) + inff_err("timed out wait for bus resume\n"); + + inff_report_wowl_wakeind(wiphy, ifp); + inff_fil_iovar_int_set(ifp, "wowl_clear", 0); + inff_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); + if (!inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_ARP_ND)) + inff_offload_configure_arp_nd(ifp, true); + inff_fil_cmd_int_set(ifp, INFF_C_SET_PM, + power_mode); + cfg->wowl.active = false; + if (cfg->wowl.nd_enabled) { + inff_cfg80211_sched_scan_stop(cfg->wiphy, ifp->ndev, 0); + inff_fweh_unregister(cfg->pub, INFF_E_PFN_NET_FOUND); + inff_fweh_register(cfg->pub, INFF_E_PFN_NET_FOUND, + inff_notify_sched_scan_results); + cfg->wowl.nd_enabled = false; + } + + /* disable packet filters */ + inff_pktfilter_enable(ifp->ndev, false); + } + /* During resume, disable all offload modules which are enabled + * previously while entering suspend. + */ + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + inff_offload_enable(ifp, inff_offload_feat, false); + + config->pm_state = INFF_CFG80211_PM_STATE_RESUMED; + return 0; +} + +static void inff_configure_wowl(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, + struct cfg80211_wowlan *wowl) +{ + u32 wowl_config; + struct inff_wowl_wakeind_le wowl_wakeind; + u32 i; + + inff_dbg(TRACE, "Suspend, wowl config.\n"); + + if (!inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_ARP_ND)) + inff_offload_configure_arp_nd(ifp, false); + inff_fil_cmd_int_set(ifp, INFF_C_SET_PM, PM_MAX); + + wowl_config = 0; + if (wowl->disconnect) + wowl_config = INFF_WOWL_DIS | INFF_WOWL_BCN | INFF_WOWL_RETR; + if (wowl->magic_pkt) + wowl_config |= INFF_WOWL_MAGIC; + if (wowl->patterns && wowl->n_patterns) { + wowl_config |= INFF_WOWL_NET; + for (i = 0; i < wowl->n_patterns; i++) { + inff_config_wowl_pattern(ifp, "add", + (u8 *)wowl->patterns[i].pattern, + wowl->patterns[i].pattern_len, + (u8 *)wowl->patterns[i].mask, + wowl->patterns[i].pkt_offset); + } + } + if (wowl->nd_config) { + inff_cfg80211_sched_scan_start(cfg->wiphy, ifp->ndev, + wowl->nd_config); + wowl_config |= INFF_WOWL_PFN_FOUND; + + cfg->wowl.nd_data_completed = false; + cfg->wowl.nd_enabled = true; + /* Now reroute the event for PFN to the wowl function. */ + inff_fweh_unregister(cfg->pub, INFF_E_PFN_NET_FOUND); + inff_fweh_register(cfg->pub, INFF_E_PFN_NET_FOUND, + inff_wowl_nd_results); + } + if (wowl->gtk_rekey_failure) + wowl_config |= INFF_WOWL_GTK_FAILURE; + if (!test_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) + wowl_config |= INFF_WOWL_UNASSOC; + + memcpy(&wowl_wakeind, "clear", 6); + inff_fil_iovar_data_set(ifp, "wowl_wakeind", &wowl_wakeind, + sizeof(wowl_wakeind)); + inff_fil_iovar_int_set(ifp, "wowl", wowl_config); + inff_fil_iovar_int_set(ifp, "wowl_activate", 1); + inff_bus_wowl_config(cfg->pub->bus_if, true); + cfg->wowl.active = true; + + /* enable packet filters */ + inff_pktfilter_enable(ifp->ndev, true); +} + +static int inff_keepalive_start(struct inff_if *ifp, unsigned int interval) +{ + struct inff_mkeep_alive_pkt_le kalive = {0}; + int ret = 0; + + /* Configure Null function/data keepalive */ + kalive.version = cpu_to_le16(1); + kalive.period_msec = cpu_to_le32(interval * MSEC_PER_SEC); + kalive.len_bytes = cpu_to_le16(0); + kalive.keep_alive_id = 0; + + ret = inff_fil_iovar_data_set(ifp, "mkeep_alive", &kalive, sizeof(kalive)); + if (ret) + inff_err("keep-alive packet config failed, ret=%d\n", ret); + + return ret; +} + +static s32 inff_cfg80211_suspend(struct wiphy *wiphy, + struct cfg80211_wowlan *wowl) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_vif *vif; + struct inff_cfg80211_info *config = ifp->drvr->config; + + inff_dbg(TRACE, "Enter\n"); + + config->pm_state = INFF_CFG80211_PM_STATE_SUSPENDING; + + /* if the primary net_device is not READY there is nothing + * we can do but pray resume goes smoothly. + */ + if (!check_vif_up(ifp->vif)) + goto exit; + + /* Stop scheduled scan */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_PNO)) + inff_cfg80211_sched_scan_stop(wiphy, ndev, 0); + + /* end any scanning */ + if (test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) + inff_abort_scanning(cfg); + + /* Enable offload features that were not in default (LOW) or user selected + * power profile but should be offloaded to fw in suspend as host goes to + * sleep. These will be disabled on resume. + */ + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + inff_offload_enable(ifp, inff_offload_feat, true); + + if (!wowl || !test_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { + inff_bus_wowl_config(cfg->pub->bus_if, false); + list_for_each_entry(vif, &cfg->vif_list, list) { + if (!test_bit(INFF_VIF_STATUS_READY, &vif->sme_state)) + continue; + /* While going to suspend if associated with AP + * disassociate from AP to save power while system is + * in suspended state + */ + inff_link_down(vif, WLAN_REASON_UNSPECIFIED, true); + /* Make sure WPA_Supplicant receives all the event + * generated due to DISASSOC call to the fw to keep + * the state fw and WPA_Supplicant state consistent + */ + inff_delay(500); + } + /* Configure MPC */ + inff_set_mpc(ifp, 1); + + } else { + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL)) { + /* Configure WOWL parameters */ + inff_configure_wowl(cfg, ifp, wowl); + + /* Prevent disassociation due to inactivity with keep-alive */ + inff_keepalive_start(ifp, 30); + } + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + inff_offload_enable(ifp, INFF_OFFLOAD_WOWLPF, true); + } + +exit: + /* set cfg80211 pm state to cfg80211 suspended state */ + config->pm_state = INFF_CFG80211_PM_STATE_SUSPENDED; + + /* clear any scanning activity */ + cfg->scan_status = 0; + + inff_dbg(TRACE, "Exit\n"); + return 0; +} + +static s32 +inff_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_pmksa *pmksa) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + s32 err; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + inff_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmksa->bssid); + inff_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmksa->pmkid); + + err = inff_update_pmksa(cfg, ifp, pmksa->bssid, pmksa->pmkid, PMKSA_SET); + if (err < 0) { + iphy_err(drvr, + "PMKSA_SET inff_update_pmksa failed: ret=%d\n", + err); + goto exit; + } + + if (pmksa->pmk_len && pmksa->pmk_len < INFF_WSEC_PMK_LEN_SUITEB_192) { + /* external supplicant stores SUITEB-192 PMK */ + if (ifp->vif->profile.is_okc) { + err = inff_fil_iovar_data_set(ifp, "okc_info_pmk", pmksa->pmk, + pmksa->pmk_len); + if (err < 0) + iphy_err(drvr, "okc_info_pmk iovar failed: ret=%d\n", err); + } else { + inff_set_pmk(ifp, pmksa->pmk, pmksa->pmk_len); + } + } + +exit: + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_pmksa *pmksa) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + s32 err; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + inff_dbg(CONN, "del_pmksa - PMK bssid = %pM\n", pmksa->bssid); + + /* TODO: implement PMKID_V2 */ + err = inff_update_pmksa(cfg, ifp, pmksa->bssid, pmksa->pmkid, PMKSA_DELETE); + if (err < 0) { + iphy_err(drvr, + "PMKSA_DELETE inff_update_pmksa failed: ret=%d\n", + err); + return err; + } + + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + s32 err; + + inff_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + memset(&cfg->pmk_list, 0, sizeof(cfg->pmk_list)); + err = inff_update_pmklist(cfg, ifp); + + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static s32 +inff_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_ap_settings *settings) +{ + s32 ie_offset; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct cfg80211_crypto_settings *crypto = &settings->crypto; + const struct inff_tlv *ssid_ie; + const struct inff_tlv *country_ie; + struct inff_ssid_le ssid_le; + s32 err = -EPERM; + struct inff_join_params join_params; + enum nl80211_iftype dev_role; + struct inff_fil_bss_enable_le bss_enable; + u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef); + bool mbss = false; + int is_11d; + bool supports_11d; + bool closednet; + struct inff_p2p_info *p2p = &cfg->p2p; + + inff_dbg(TRACE, "ctrlchn=%d, center=%d, bw=%d, beacon_interval=%d, dtim_period=%d,\n", + settings->chandef.chan->hw_value, + settings->chandef.center_freq1, settings->chandef.width, + settings->beacon_interval, settings->dtim_period); + inff_dbg(TRACE, "ssid=%s(%zu), auth_type=%d, inactivity_timeout=%d\n", + settings->ssid, settings->ssid_len, settings->auth_type, + settings->inactivity_timeout); + dev_role = ifp->vif->wdev.iftype; + + if (dev_role == NL80211_IFTYPE_AP && + inff_feat_is_enabled(ifp, INFF_FEAT_MBSS)) { + struct inff_cfg80211_vif *vif_walk; + + list_for_each_entry(vif_walk, &cfg->vif_list, list) { + if (inff_is_apmode(vif_walk) && + check_vif_up(vif_walk) && + vif_walk != ifp->vif) { + /* found a vif is with the 1st AP type, + * and it doesn't equal to the currect vif calls start_ap. + * then it is mbss case. + */ + mbss = true; + break; + } + } + } + inff_dbg(TRACE, "mbss %s\n", mbss ? "enabled" : "disabled"); + + /* store current 11d setting */ + if (inff_fil_cmd_int_get(ifp, INFF_C_GET_REGULATORY, + &ifp->vif->is_11d)) { + is_11d = false; + supports_11d = false; + } else { + country_ie = inff_parse_tlvs((u8 *)settings->beacon.tail, + settings->beacon.tail_len, + WLAN_EID_COUNTRY); + is_11d = country_ie ? 1 : 0; + supports_11d = true; + } + + memset(&ssid_le, 0, sizeof(ssid_le)); + if (!settings->ssid || settings->ssid_len == 0) { + ie_offset = DOT11_MGMT_HDR_LEN + DOT11_BCN_PRB_FIXED_LEN; + ssid_ie = inff_parse_tlvs((u8 *)&settings->beacon.head[ie_offset], + settings->beacon.head_len - ie_offset, + WLAN_EID_SSID); + if (!ssid_ie || ssid_ie->len > IEEE80211_MAX_SSID_LEN) + return -EINVAL; + + memcpy(ssid_le.SSID, ssid_ie->data, ssid_ie->len); + ssid_le.SSID_len = cpu_to_le32(ssid_ie->len); + inff_dbg(TRACE, "SSID is (%s) in Head\n", ssid_le.SSID); + } else { + memcpy(ssid_le.SSID, settings->ssid, settings->ssid_len); + ssid_le.SSID_len = cpu_to_le32((u32)settings->ssid_len); + } + + if (!mbss) { + inff_set_mpc(ifp, 0); + inff_offload_configure_arp_nd(ifp, false); + } + + /* Parameters shared by all radio interfaces */ + if (!mbss) { + if ((supports_11d) && is_11d != ifp->vif->is_11d) { + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_REGULATORY, + is_11d); + if (err < 0) { + iphy_err(drvr, "Regulatory Set Error, %d\n", + err); + goto exit; + } + } + if (settings->beacon_interval) { + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_BCNPRD, + settings->beacon_interval); + if (err < 0) { + iphy_err(drvr, "Beacon Interval Set Error, %d\n", + err); + goto exit; + } + } + if (settings->dtim_period) { + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_DTIMPRD, + settings->dtim_period); + if (err < 0) { + iphy_err(drvr, "DTIM Interval Set Error, %d\n", + err); + goto exit; + } + } + + if (dev_role == NL80211_IFTYPE_AP && + (ifp->ifidx == 0 || + (!inff_feat_is_enabled(ifp, INFF_FEAT_RSDB) && + !inff_feat_is_enabled(ifp, INFF_FEAT_MCHAN)))) { + err = inff_fil_cmd_int_set(ifp, INFF_C_DOWN, 1); + if (err < 0) { + iphy_err(drvr, "INFF_C_DOWN error %d\n", + err); + goto exit; + } + inff_fil_iovar_int_set(ifp, "apsta", 0); + } + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_INFRA, 1); + if (err < 0) { + iphy_err(drvr, "SET INFRA error %d\n", err); + goto exit; + } + } else if (WARN_ON(supports_11d && (is_11d != ifp->vif->is_11d))) { + /* Multiple-BSS should use same 11d configuration */ + err = -EINVAL; + goto exit; + } + ifp->isap = false; + /* Interface specific setup */ + if (dev_role == NL80211_IFTYPE_AP) { + u32 is_up; + + if ((inff_feat_is_enabled(ifp, INFF_FEAT_MBSS)) && !mbss) { + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_UP, &is_up); + if (err < 0) { + iphy_err(drvr, "INFF_C_GET_UP error (%d)\n", err); + goto exit; + } + + /* mbss must be set in DOWN state. */ + if (is_up) { + err = inff_fil_cmd_int_set(ifp, INFF_C_DOWN, 1); + if (err < 0) { + iphy_err(drvr, "INFF_C_DOWN error (%d)\n", err); + goto exit; + } + } + err = inff_fil_iovar_int_set(ifp, "mbss", 1); + if (err < 0) { + iphy_err(drvr, "set mbss error (%d)\n", err); + goto exit; + } + } + + if (!test_bit(INFF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state)) { + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); + bss_enable.enable = cpu_to_le32(WL_IOV_OP_MANUAL_AP_BSSCFG_CREATE); + err = inff_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) { + iphy_err(drvr, "bss_enable config failed %d\n", err); + goto exit; + } + } + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_AP, 1); + if (err < 0) { + iphy_err(drvr, "setting AP mode failed %d\n", + err); + goto exit; + } + + /* Firmware 10.x requires setting channel after enabling + * AP and before bringing interface up. + */ + err = inff_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + iphy_err(drvr, "Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } + + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_UP, &is_up); + if (err < 0) { + iphy_err(drvr, "INFF_C_GET_UP error (%d)\n", err); + goto exit; + } + + if (!is_up) + err = inff_fil_cmd_int_set(ifp, INFF_C_UP, 1); + if (err < 0) { + iphy_err(drvr, "INFF_C_UP error (%d)\n", err); + goto exit; + } + + if (crypto->psk) { + inff_dbg(INFO, "using PSK offload\n"); + profile->use_fwauth |= BIT(INFF_PROFILE_FWAUTH_PSK); + err = inff_set_pmk(ifp, crypto->psk, + INFF_WSEC_MAX_PSK_LEN); + if (err < 0) + goto exit; + } + if (crypto->sae_pwd) { + inff_dbg(INFO, "using SAE offload\n"); + profile->use_fwauth |= BIT(INFF_PROFILE_FWAUTH_SAE); + err = inff_set_sae_password(ifp, crypto->sae_pwd, + crypto->sae_pwd_len); + if (err < 0) + goto exit; + } + if (profile->use_fwauth == 0) + profile->use_fwauth = BIT(INFF_PROFILE_FWAUTH_NONE); + + err = inff_parse_configure_security(ifp, settings, + NL80211_IFTYPE_AP); + if (err < 0) { + iphy_err(drvr, "inff_parse_configure_security error\n"); + goto exit; + } + + /* On DOWN the firmware removes the WEP keys, reconfigure + * them if they were set. + */ + inff_cfg80211_reconfigure_wep(ifp); + + memset(&join_params, 0, sizeof(join_params)); + /* join parameters starts with ssid */ + memcpy(&join_params.ssid_le, &ssid_le, sizeof(ssid_le)); + /* create softap */ + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SSID, + &join_params, sizeof(join_params)); + if (err < 0) { + iphy_err(drvr, "SET SSID error (%d)\n", err); + goto exit; + } + + closednet = + (settings->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE); + err = inff_fil_iovar_int_set(ifp, "closednet", closednet); + if (err) { + iphy_err(drvr, "%s closednet error (%d)\n", + (closednet ? "enabled" : "disabled"), + err); + goto exit; + } + ifp->isap = true; + inff_dbg(TRACE, "AP mode configuration complete\n"); + } else if (dev_role == NL80211_IFTYPE_P2P_GO) { + err = inff_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + iphy_err(drvr, "Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } + + err = inff_parse_configure_security(ifp, settings, + NL80211_IFTYPE_P2P_GO); + if (err < 0) { + inff_err("inff_parse_configure_security error\n"); + goto exit; + } + + err = inff_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, + sizeof(ssid_le)); + if (err < 0) { + iphy_err(drvr, "setting ssid failed %d\n", err); + goto exit; + } + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); + bss_enable.enable = cpu_to_le32(WL_IOV_OP_BSSCFG_ENABLE); + err = inff_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) { + iphy_err(drvr, "bss_enable config failed %d\n", err); + goto exit; + } + + p2p->afx_hdl.my_listen_chan = chanspec; + ifp->isap = true; + inff_dbg(TRACE, "GO mode configuration complete\n"); + } else { + WARN_ON(1); + } + + /* Set HE BSS Color */ + if (settings->beacon.he_bss_color.enabled) + inff_he_set_bss_color(ifp, settings->beacon.he_bss_color.color); + + inff_vif_clear_mgmt_ies(ifp->vif); + inff_config_ap_mgmt_ie(ifp->vif, &settings->beacon); + set_bit(INFF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + inff_net_setcarrier(ifp, true); + +exit: + if ((err) && !mbss) { + inff_set_mpc(ifp, 1); + inff_offload_configure_arp_nd(ifp, true); + } else { + cfg->num_softap++; + inff_dbg(TRACE, "Num of SoftAP %u\n", cfg->num_softap); + } + return err; +} + +static int inff_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev, + unsigned int link_id) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + s32 err; + struct inff_fil_bss_enable_le bss_enable; + struct inff_join_params join_params; + s32 apsta = 0; + + inff_dbg(TRACE, "Enter\n"); + + if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) { + /* Due to most likely deauths outstanding we sleep */ + /* first to make sure they get processed by fw. */ + msleep(400); + + if (profile->use_fwauth != BIT(INFF_PROFILE_FWAUTH_NONE)) { + if (profile->use_fwauth & BIT(INFF_PROFILE_FWAUTH_PSK)) + inff_set_pmk(ifp, NULL, 0); + if (profile->use_fwauth & BIT(INFF_PROFILE_FWAUTH_SAE)) + inff_set_sae_password(ifp, NULL, 0); + profile->use_fwauth = BIT(INFF_PROFILE_FWAUTH_NONE); + } + + cfg->num_softap--; + + /* Clear bss configuration and SSID */ + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); + bss_enable.enable = cpu_to_le32(WL_IOV_OP_BSSCFG_DISABLE); + err = inff_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) + inff_err("bss_enable config failed %d\n", err); + + memset(&join_params, 0, sizeof(join_params)); + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SSID, + &join_params, sizeof(join_params)); + if (err < 0) + iphy_err(drvr, "SET SSID error (%d)\n", err); + + if (cfg->num_softap) { + inff_dbg(TRACE, "Num of SoftAP %u\n", cfg->num_softap); + return 0; + } + + /* First BSS doesn't get a full reset */ + if (ifp->bsscfgidx == 0) + inff_fil_iovar_int_set(ifp, "closednet", 0); + + err = inff_fil_iovar_int_get(ifp, "apsta", &apsta); + if (err < 0) + inff_err("wl apsta failed (%d)\n", err); + + if (!apsta) { + err = inff_fil_cmd_int_set(ifp, INFF_C_DOWN, 1); + if (err < 0) + iphy_err(drvr, "INFF_C_DOWN error %d\n", err); + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_AP, 0); + if (err < 0) + iphy_err(drvr, "Set AP mode error %d\n", err); + } + if (inff_feat_is_enabled(ifp, INFF_FEAT_MBSS)) + inff_fil_iovar_int_set(ifp, "mbss", 0); + inff_fil_cmd_int_set(ifp, INFF_C_SET_REGULATORY, + ifp->vif->is_11d); + /* Bring device back up so it can be used again */ + err = inff_fil_cmd_int_set(ifp, INFF_C_UP, 1); + if (err < 0) + iphy_err(drvr, "INFF_C_UP error %d\n", err); + + inff_vif_clear_mgmt_ies(ifp->vif); + } else { + bss_enable.bsscfgidx = cpu_to_le32(ifp->bsscfgidx); + bss_enable.enable = cpu_to_le32(WL_IOV_OP_BSSCFG_DISABLE); + err = inff_fil_iovar_data_set(ifp, "bss", &bss_enable, + sizeof(bss_enable)); + if (err < 0) + iphy_err(drvr, "bss_enable config failed %d\n", err); + } + inff_set_mpc(ifp, 1); + clear_bit(INFF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); + inff_offload_configure_arp_nd(ifp, true); + inff_net_setcarrier(ifp, false); + + return err; +} + +static s32 +inff_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_ap_update *info) +{ + struct inff_if *ifp = netdev_priv(ndev); + + inff_dbg(TRACE, "Enter\n"); + + return inff_config_ap_mgmt_ie(ifp->vif, &info->beacon); +} + +static int +inff_cfg80211_del_station(struct wiphy *wiphy, struct net_device *ndev, + struct station_del_parameters *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_scb_val_le scbval; + struct inff_if *ifp = netdev_priv(ndev); + s32 err; + + if (!params->mac) + return -EFAULT; + + inff_dbg(TRACE, "Enter %pM\n", params->mac); + + if (ifp->vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + ifp = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + if (!check_vif_up(ifp->vif)) + return -EIO; + + memcpy(&scbval.ea, params->mac, ETH_ALEN); + scbval.val = cpu_to_le32(params->reason_code); + err = inff_fil_cmd_data_set(ifp, INFF_C_SCB_DEAUTHENTICATE_FOR_REASON, + &scbval, sizeof(scbval)); + if (err) + iphy_err(drvr, "SCB_DEAUTHENTICATE_FOR_REASON failed %d\n", + err); + + inff_dbg(TRACE, "Exit\n"); + return err; +} + +static int +inff_cfg80211_change_station(struct wiphy *wiphy, struct net_device *ndev, + const u8 *mac, struct station_parameters *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp = netdev_priv(ndev); + s32 err; + + inff_dbg(TRACE, "Enter, MAC %pM, mask 0x%04x set 0x%04x\n", mac, + params->sta_flags_mask, params->sta_flags_set); + + /* Ignore all 00 MAC */ + if (is_zero_ether_addr(mac)) + return 0; + + if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) + return 0; + + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SCB_AUTHORIZE, + (void *)mac, ETH_ALEN); + else + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_SCB_DEAUTHORIZE, + (void *)mac, ETH_ALEN); + if (err < 0) + iphy_err(drvr, "Setting SCB (de-)authorize failed, %d\n", err); + + return err; +} + +static void +inff_cfg80211_update_mgmt_frame_registrations(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd) +{ + struct inff_cfg80211_vif *vif; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + + vif->mgmt_rx_reg = upd->interface_stypes; +} + +static int +inff_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_mgmt_tx_params *params, u64 *cookie) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct ieee80211_channel *chan = params->chan; + struct inff_pub *drvr = cfg->pub; + const u8 *buf = params->buf; + size_t len = params->len; + const struct ieee80211_mgmt *mgmt; + struct inff_cfg80211_vif *vif; + s32 err = 0; + s32 ie_offset; + s32 ie_len; + struct inff_fil_action_frame_le *action_frame; + struct inff_fil_af_params_le *af_params; + bool ack = false; + __le32 hw_ch; + struct inff_mf_params_le *mf_params; + u32 mf_params_len; + s32 timeout; + + inff_dbg(TRACE, "Enter\n"); + + *cookie = 0; + + mgmt = (const struct ieee80211_mgmt *)buf; + + if (!ieee80211_is_mgmt(mgmt->frame_control)) { + iphy_err(drvr, "Driver only allows MGMT packet type\n"); + return -EPERM; + } + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + + if (ieee80211_is_probe_resp(mgmt->frame_control)) { + /* Right now the only reason to get a probe response */ + /* is for p2p listen response or for p2p GO from */ + /* wpa_supplicant. Unfortunately the probe is send */ + /* on primary ndev, while dongle wants it on the p2p */ + /* vif. Since this is only reason for a probe */ + /* response to be sent, the vif is taken from cfg. */ + /* If ever desired to send proberesp for non p2p */ + /* response then data should be checked for */ + /* "DIRECT-". Note in future supplicant will take */ + /* dedicated p2p wdev to do this and then this 'hack'*/ + /* is not needed anymore. */ + ie_offset = DOT11_MGMT_HDR_LEN + + DOT11_BCN_PRB_FIXED_LEN; + ie_len = len - ie_offset; + if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif) + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + err = inff_vif_set_mgmt_ie(vif, + INFF_VNDR_IE_PRBRSP_FLAG, + &buf[ie_offset], + ie_len); + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, + GFP_KERNEL); + } else if (ieee80211_is_action(mgmt->frame_control)) { + if (len > INFF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { + iphy_err(drvr, "invalid action frame length\n"); + err = -EINVAL; + goto exit; + } + af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); + if (!af_params) { + err = -ENOMEM; + goto exit; + } + action_frame = &af_params->action_frame; + /* Add the packet Id */ + action_frame->packet_id = cpu_to_le32(*cookie); + /* Add BSSID */ + memcpy(&action_frame->da[0], &mgmt->da[0], ETH_ALEN); + memcpy(&af_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); + /* Add the length exepted for 802.11 header */ + action_frame->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); + /* Add the channel. Use the one specified as parameter if any or + * the current one (got from the firmware) otherwise + */ + if (chan) { + hw_ch = cpu_to_le32(chan->hw_value); + } else { + err = inff_fil_cmd_data_get(vif->ifp, + INFF_C_GET_CHANNEL, + &hw_ch, sizeof(hw_ch)); + if (err) { + iphy_err(drvr, + "unable to get current hw channel\n"); + goto free_af_params; + } + } + af_params->channel = hw_ch; + + af_params->dwell_time = cpu_to_le32(params->wait); + memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], + le16_to_cpu(action_frame->len)); + + inff_dbg(TRACE, "Action frame, cookie=%lld, len=%d, channel=%d\n", + *cookie, le16_to_cpu(action_frame->len), + le32_to_cpu(af_params->channel)); + + ack = inff_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), + af_params, vif, chan); + + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, + GFP_KERNEL); +free_af_params: + kfree(af_params); + } else if (ieee80211_is_auth(mgmt->frame_control)) { + reinit_completion(&vif->mgmt_tx); + clear_bit(INFF_MGMT_TX_ACK, &vif->mgmt_tx_status); + clear_bit(INFF_MGMT_TX_NOACK, &vif->mgmt_tx_status); + clear_bit(INFF_MGMT_TX_OFF_CHAN_COMPLETED, + &vif->mgmt_tx_status); + + mf_params_len = offsetof(struct inff_mf_params_le, data) + + (len - DOT11_MGMT_HDR_LEN); + mf_params = kzalloc(mf_params_len, GFP_KERNEL); + if (!mf_params) { + err = -ENOMEM; + goto exit; + } + + mf_params->dwell_time = cpu_to_le32(MGMT_AUTH_FRAME_DWELL_TIME); + mf_params->len = cpu_to_le16(len - DOT11_MGMT_HDR_LEN); + mf_params->frame_control = mgmt->frame_control; + + if (chan) { + hw_ch = cpu_to_le16(chan->hw_value); + } else { + err = inff_fil_cmd_data_get(vif->ifp, INFF_C_GET_CHANNEL, + &hw_ch, sizeof(hw_ch)); + if (err) { + iphy_err(drvr, "unable to get current hw channel\n"); + goto free_mf_params; + } + } + mf_params->channel = hw_ch; + + memcpy(&mf_params->da[0], &mgmt->da[0], ETH_ALEN); + memcpy(&mf_params->bssid[0], &mgmt->bssid[0], ETH_ALEN); + *cookie = (u64)mf_params->data; + mf_params->packet_id = cpu_to_le32(*cookie); + unsafe_memcpy(mf_params->data, &buf[DOT11_MGMT_HDR_LEN], + le16_to_cpu(mf_params->len), /* alloc enough buf*/); + + inff_dbg(TRACE, "Auth frame, cookie=%d, fc=%04x, len=%d, channel=%d\n", + le32_to_cpu(mf_params->packet_id), + le16_to_cpu(mf_params->frame_control), + le16_to_cpu(mf_params->len), + le16_to_cpu(mf_params->channel)); + + vif->mgmt_tx_id = le32_to_cpu(mf_params->packet_id); + set_bit(INFF_MGMT_TX_SEND_FRAME, &vif->mgmt_tx_status); + + err = inff_fil_bsscfg_data_set(vif->ifp, "mgmt_frame", + mf_params, mf_params_len); + if (err) { + iphy_err(drvr, "Failed to send Auth frame: err=%d\n", + err); + goto tx_status; + } + + timeout = + wait_for_completion_timeout(&vif->mgmt_tx, + MGMT_AUTH_FRAME_WAIT_TIME); + if (test_bit(INFF_MGMT_TX_ACK, &vif->mgmt_tx_status)) { + inff_dbg(TRACE, "TX Auth frame operation is success\n"); + ack = true; + } else { + iphy_err(drvr, "TX Auth frame operation is failed: status=%ld)\n", + vif->mgmt_tx_status); + } + +tx_status: + cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, + GFP_KERNEL); +free_mf_params: + kfree(mf_params); + } else { + inff_dbg(TRACE, "Unhandled, fc=%04x!!\n", mgmt->frame_control); + inff_dbg_hex_dump(true, buf, len, "payload, len=%zu\n", len); + } + +exit: + return err; +} + +static int inff_cfg80211_set_cqm_rssi_range_config(struct wiphy *wiphy, + struct net_device *ndev, + s32 rssi_low, s32 rssi_high) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + int err = 0; + + inff_dbg(TRACE, "low=%d high=%d", rssi_low, rssi_high); + + ifp = netdev_priv(ndev); + vif = ifp->vif; + + if (rssi_low != vif->cqm_rssi_low || rssi_high != vif->cqm_rssi_high) { + /* The firmware will send an event when the RSSI is less than or + * equal to a configured level and the previous RSSI event was + * less than or equal to a different level. Set a third level + * so that we also detect the transition from rssi <= rssi_high + * to rssi > rssi_high. + */ + struct inff_rssi_event_le config = { + .rate_limit_msec = cpu_to_le32(0), + .rssi_level_num = 3, + .rssi_levels = { + clamp_val(rssi_low, S8_MIN, S8_MAX - 2), + clamp_val(rssi_high, S8_MIN + 1, S8_MAX - 1), + S8_MAX, + }, + }; + + err = inff_fil_iovar_data_set(ifp, "rssi_event", &config, + sizeof(config)); + if (err) { + err = -EINVAL; + } else { + vif->cqm_rssi_low = rssi_low; + vif->cqm_rssi_high = rssi_high; + } + } + + return err; +} + +static int +inff_cfg80211_cancel_remain_on_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + u64 cookie) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_vif *vif; + int err = 0; + + inff_dbg(TRACE, "Enter p2p listen cancel\n"); + + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (!vif) { + iphy_err(drvr, "No p2p device available for probe response\n"); + err = -ENODEV; + goto exit; + } + inff_p2p_cancel_remain_on_channel(vif->ifp); +exit: + return err; +} + +int inff_cfg80211_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id, + struct cfg80211_chan_def *chandef) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif = wdev_to_vif(wdev); + struct net_device *ndev = wdev->netdev; + struct inff_pub *drvr = cfg->pub; + struct inff_chan ch; + enum nl80211_band band = 0; + enum nl80211_chan_width width = 0; + u32 chanspec; + int freq, err; + + if (!ndev || drvr->bus_if->state != INFF_BUS_UP) + return -ENODEV; + if (!check_vif_up(vif)) + return -EIO; + err = inff_fil_iovar_int_get(netdev_priv(ndev), "chanspec", &chanspec); + if (err) { + iphy_err(drvr, "chanspec failed (%d)\n", err); + return err; + } + + ch.chspec = chanspec; + cfg->d11inf.decchspec(&ch); + band = inff_d11_chan_band_to_nl80211(ch.band); + + switch (ch.bw) { + case INFF_CHAN_BW_80: + width = NL80211_CHAN_WIDTH_80; + break; + case INFF_CHAN_BW_40: + width = NL80211_CHAN_WIDTH_40; + break; + case INFF_CHAN_BW_20: + width = NL80211_CHAN_WIDTH_20; + break; + case INFF_CHAN_BW_80P80: + width = NL80211_CHAN_WIDTH_80P80; + break; + case INFF_CHAN_BW_160: + width = NL80211_CHAN_WIDTH_160; + break; + } + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band); + if (!freq) + return -EINVAL; + chandef->chan = ieee80211_get_channel(wiphy, freq); + if (!chandef->chan) + return -EINVAL; + chandef->width = width; + chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band); + chandef->center_freq2 = 0; + + return 0; +} + +static int inff_cfg80211_crit_proto_start(struct wiphy *wiphy, + struct wireless_dev *wdev, + enum nl80211_crit_proto_id proto, + u16 duration) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + + /* only DHCP support for now */ + if (proto != NL80211_CRIT_PROTO_DHCP) + return -EINVAL; + + /* suppress and abort scanning */ + set_bit(INFF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); + inff_abort_scanning(cfg); + + return inff_btcoex_set_mode(vif, INFF_BTCOEX_DISABLED, duration); +} + +static void inff_cfg80211_crit_proto_stop(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + + inff_btcoex_set_mode(vif, INFF_BTCOEX_ENABLED, 0); + clear_bit(INFF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); +} + +static s32 +inff_notify_tdls_peer_event(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + switch (e->reason) { + case INFF_E_REASON_TDLS_PEER_DISCOVERED: + inff_dbg(TRACE, "TDLS Peer Discovered\n"); + break; + case INFF_E_REASON_TDLS_PEER_CONNECTED: + inff_dbg(TRACE, "TDLS Peer Connected\n"); + inff_proto_add_tdls_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr); + break; + case INFF_E_REASON_TDLS_PEER_DISCONNECTED: + inff_dbg(TRACE, "TDLS Peer Disconnected\n"); + inff_proto_delete_peer(ifp->drvr, ifp->ifidx, (u8 *)e->addr); + break; + } + + return 0; +} + +static int inff_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper) +{ + int ret; + + switch (oper) { + case NL80211_TDLS_DISCOVERY_REQ: + ret = INFF_TDLS_MANUAL_EP_DISCOVERY; + break; + case NL80211_TDLS_SETUP: + ret = INFF_TDLS_MANUAL_EP_CREATE; + break; + case NL80211_TDLS_TEARDOWN: + ret = INFF_TDLS_MANUAL_EP_DELETE; + break; + default: + inff_err("unsupported operation: %d\n", oper); + ret = -EOPNOTSUPP; + } + return ret; +} + +static int inff_cfg80211_tdls_oper(struct wiphy *wiphy, + struct net_device *ndev, const u8 *peer, + enum nl80211_tdls_operation oper) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp; + struct inff_tdls_iovar_le info; + int ret = 0; + + ret = inff_convert_nl80211_tdls_oper(oper); + if (ret < 0) + return ret; + + ifp = netdev_priv(ndev); + memset(&info, 0, sizeof(info)); + info.mode = (u8)ret; + if (peer) + memcpy(info.ea, peer, ETH_ALEN); + + ret = inff_fil_iovar_data_set(ifp, "tdls_endpoint", + &info, sizeof(info)); + if (ret < 0) + iphy_err(drvr, "tdls_endpoint iovar failed: ret=%d\n", ret); + + return ret; +} + +static int +inff_cfg80211_update_conn_params(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_connect_params *sme, + u32 changed) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp; + int err; + + if (!(changed & UPDATE_ASSOC_IES)) + return 0; + + ifp = netdev_priv(ndev); + err = inff_vif_set_mgmt_ie(ifp->vif, INFF_VNDR_IE_ASSOCREQ_FLAG, + sme->ie, sme->ie_len); + if (err) + iphy_err(drvr, "Set Assoc REQ IE Failed\n"); + else + inff_dbg(TRACE, "Applied Vndr IEs for Assoc request\n"); + + return err; +} + +static int inff_cfg80211_set_pmk(struct wiphy *wiphy, struct net_device *dev, + const struct cfg80211_pmk_conf *conf) +{ + struct inff_if *ifp; + struct inff_pub *drvr; + int ret; + + inff_dbg(TRACE, "enter\n"); + + /* expect using firmware supplicant for 1X */ + ifp = netdev_priv(dev); + drvr = ifp->drvr; + if (WARN_ON(ifp->vif->profile.use_fwsup != INFF_PROFILE_FWSUP_1X && + ifp->vif->profile.use_fwsup != INFF_PROFILE_FWSUP_ROAM && + !ifp->vif->profile.is_ft && + !ifp->vif->profile.is_okc)) + return -EINVAL; + + if (conf->pmk_len > INFF_WSEC_MAX_PMK_LEN) + return -ERANGE; + + if (ifp->vif->profile.is_okc) { + ret = inff_fil_iovar_data_set(ifp, "okc_info_pmk", conf->pmk, + conf->pmk_len); + if (ret < 0) + iphy_err(drvr, "okc_info_pmk iovar failed: ret=%d\n", ret); + } + + return inff_set_pmk(ifp, conf->pmk, conf->pmk_len); +} + +static int inff_cfg80211_del_pmk(struct wiphy *wiphy, struct net_device *dev, + const u8 *aa) +{ + struct inff_if *ifp; + + inff_dbg(TRACE, "enter\n"); + ifp = netdev_priv(dev); + if (WARN_ON(ifp->vif->profile.use_fwsup != INFF_PROFILE_FWSUP_1X)) + return -EINVAL; + + return inff_set_pmk(ifp, NULL, 0); +} + +static int +inff_cfg80211_change_bss(struct wiphy *wiphy, struct net_device *dev, + struct bss_parameters *params) +{ + struct inff_if *ifp; + int ret = 0; + u32 ap_isolate, val; + + inff_dbg(TRACE, "Enter\n"); + ifp = netdev_priv(dev); + if (params->ap_isolate >= 0) { + ap_isolate = (u32)params->ap_isolate; + ret = inff_fil_iovar_int_set(ifp, "ap_isolate", ap_isolate); + if (ret < 0) + inff_err("ap_isolate iovar failed: ret=%d\n", ret); + } + + /* Get ap_isolate value from firmware to detemine whether fmac */ + /* driver supports packet forwarding. */ + if (inff_fil_iovar_int_get(ifp, "ap_isolate", &val) == 0) { + ifp->fmac_pkt_fwd_en = + ((params->ap_isolate == 0) && (val == 1)) ? + true : false; + } else { + inff_err("get ap_isolate iovar failed: ret=%d\n", ret); + ifp->fmac_pkt_fwd_en = false; + } + + return ret; +} + +static int +inff_cfg80211_external_auth(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_external_auth_params *params) +{ + struct inff_if *ifp; + struct inff_pub *drvr; + struct inff_auth_req_status_le auth_status; + int ret = 0; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + + inff_dbg(TRACE, "Enter\n"); + + ifp = netdev_priv(dev); + drvr = ifp->drvr; + if (params->status == WLAN_STATUS_SUCCESS) { + auth_status.flags = cpu_to_le16(INFF_EXTAUTH_SUCCESS); + } else { + iphy_err(drvr, "External authentication failed: status=%d\n", + params->status); + auth_status.flags = cpu_to_le16(INFF_EXTAUTH_FAIL); + } + + memcpy(auth_status.peer_mac, params->bssid, ETH_ALEN); + auth_status.ssid_len = cpu_to_le32(min_t(u8, params->ssid.ssid_len, + IEEE80211_MAX_SSID_LEN)); + memcpy(auth_status.ssid, params->ssid.ssid, auth_status.ssid_len); + memset(auth_status.pmkid, 0, WLAN_PMKID_LEN); + if (params->pmkid) + memcpy(auth_status.pmkid, params->pmkid, WLAN_PMKID_LEN); + + ret = inff_fil_iovar_data_set(ifp, "auth_status", &auth_status, + sizeof(auth_status)); + if (ret < 0) + iphy_err(drvr, "auth_status iovar failed: ret=%d\n", ret); + + if (params->pmkid) { + ret = inff_update_pmksa(cfg, + ifp, + params->bssid, + params->pmkid, + PMKSA_SET); + if (ret < 0) { + iphy_err(drvr, + "PMKSA_SET inff_update_pmksa failed: ret=%d\n", + ret); + } + } + + return ret; +} + +static int +inff_cfg80211_set_bitrate(struct wiphy *wiphy, struct net_device *ndev, + unsigned int link_id, const u8 *addr, + const struct cfg80211_bitrate_mask *mask) +{ + struct inff_if *ifp = netdev_priv(ndev); + s32 ret = TIME_OK; + u8 he, band; + + ret = inff_he_get_enable(ifp, &he, sizeof(he)); + if (!ret && !he) { + inff_dbg(INFO, "Only HE mode rate setting supported\n"); + return -EOPNOTSUPP; + } + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + if (band != NL80211_BAND_2GHZ && band != NL80211_BAND_5GHZ && + band != NL80211_BAND_6GHZ) { + continue; + } + + /* Skip setting HE rates if legacy rate set is called from userspace. + * Also if any one of 2.4, 5 or 6GHz is being called then other two will have + * an invalid he mask of 0xFFF so skip setting he rates for other two bands. + */ + if (!mask->control[band].he_mcs[0] || mask->control[band].he_mcs[0] == 0xFFF) + continue; + + ret = inff_he_set_bitrate(ifp, mask, band); + if (ret) + break; + } + + return ret; +} + +static int +inff_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, + s32 rssi_thold, u32 rssi_hyst) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp; + struct wl_rssi_event rssi; + int err = 0; + + ifp = netdev_priv(dev); + if (rssi_thold == cfg->cqm_info.rssi_threshold) + return err; + + if (rssi_thold == 0) { + rssi.rate_limit_msec = cpu_to_le32(0); + rssi.num_rssi_levels = 0; + rssi.version = WL_RSSI_EVENT_VERSION_NEW; + } else { + rssi.rate_limit_msec = cpu_to_le32(0); + rssi.num_rssi_levels = 3; + rssi.rssi_levels[0] = S8_MIN; + rssi.rssi_levels[1] = rssi_thold; + rssi.rssi_levels[2] = S8_MAX; + rssi.version = WL_RSSI_EVENT_VERSION_OLD; + } + + err = inff_fil_iovar_data_set(ifp, "rssi_event", &rssi, sizeof(rssi)); + if (err < 0) { + inff_err("set rssi_event iovar failed (%d)\n", err); + } else { + cfg->cqm_info.enable = rssi_thold ? 1 : 0; + cfg->cqm_info.rssi_threshold = rssi_thold; + } + + inff_dbg(TRACE, "enable = %d, rssi_threshold = %d\n", + cfg->cqm_info.enable, cfg->cqm_info.rssi_threshold); + return err; +} + +static int +inff_cfg80211_update_owe_info(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_update_owe_info *owe_info) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp; + int err = 0; + struct inff_owe_info_buf *owe_info_buf; + u8 *curr_ie_buf; + struct parsed_extension_ies owe_ecdh_ie; + struct parsed_ext_ie_info *extie_info; + u32 del_add_ie_buf_len = 0; + u32 total_owe_info_len = 0; + u32 pmkid_offset = 0; + struct inff_pub *drvr; + + ifp = netdev_priv(dev); + if (owe_info) { + owe_info_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!owe_info_buf) + return -ENOMEM; + + curr_ie_buf = owe_info_buf->ecdh_ie_info; + + memcpy(owe_info_buf->peer_mac, owe_info->peer, ETH_ALEN); + owe_info_buf->status_le16 = cpu_to_le16(owe_info->status); + + owe_info_buf->with_ecdh = false; + if (owe_info->ie) { + drvr = ifp->drvr; + if (inff_has_pmkid(owe_info->ie, + owe_info->ie_len, + &pmkid_offset)) { + err = inff_update_pmksa(cfg, + ifp, + owe_info_buf->peer_mac, + owe_info->ie + pmkid_offset, + PMKSA_SET); + if (err < 0) { + iphy_err(drvr, + "PMKSA_SET inff_update_pmksa failed: ret=%d\n", + err); + return err; + } + + owe_info_buf->with_pmkid = true; + memcpy(owe_info_buf->pmkid, + owe_info->ie + pmkid_offset, + WLAN_PMKID_LEN); + } + + inff_parse_extension_ies(owe_info->ie, owe_info->ie_len, &owe_ecdh_ie); + if (owe_ecdh_ie.count > 1) { + inff_err("more ecdh_cnt found in info: %d\n", owe_ecdh_ie.count); + return -EINVAL; + } + + if (owe_ecdh_ie.count == 1) { + owe_info_buf->with_ecdh = true; + extie_info = &owe_ecdh_ie.ie_info[0]; + + del_add_ie_buf_len = inff_vndr_ie(curr_ie_buf, + INFF_VNDR_IE_ASSOCRSP_FLAG | + INFF_VNDR_IE_CUSTOM_FLAG, + extie_info->ie_ptr, + extie_info->ie_len, + "add"); + } + } + + total_owe_info_len = sizeof(struct inff_owe_info_buf) + del_add_ie_buf_len; + err = inff_fil_bsscfg_data_set(ifp, "owe_info", owe_info_buf, + total_owe_info_len); + + kfree(owe_info_buf); + } + + if (err) + inff_err("update owe_info error :%d\n", err); + + return err; +} + +static struct cfg80211_ops inff_cfg80211_ops = { + .add_virtual_intf = inff_cfg80211_add_iface, + .del_virtual_intf = inff_cfg80211_del_iface, + .change_virtual_intf = inff_cfg80211_change_iface, + .scan = inff_cfg80211_scan, + .set_wiphy_params = inff_cfg80211_set_wiphy_params, + .join_ibss = inff_cfg80211_join_ibss, + .leave_ibss = inff_cfg80211_leave_ibss, + .get_station = inff_cfg80211_get_station, + .dump_station = inff_cfg80211_dump_station, + .set_tx_power = inff_cfg80211_set_tx_power, + .get_tx_power = inff_cfg80211_get_tx_power, + .add_key = inff_cfg80211_add_key, + .del_key = inff_cfg80211_del_key, + .get_key = inff_cfg80211_get_key, + .set_default_key = inff_cfg80211_config_default_key, + .set_default_mgmt_key = inff_cfg80211_config_default_mgmt_key, + .set_power_mgmt = inff_cfg80211_set_power_mgmt, + .connect = inff_cfg80211_connect, + .disconnect = inff_cfg80211_disconnect, + .suspend = inff_cfg80211_suspend, + .resume = inff_cfg80211_resume, + .set_pmksa = inff_cfg80211_set_pmksa, + .del_pmksa = inff_cfg80211_del_pmksa, + .flush_pmksa = inff_cfg80211_flush_pmksa, + .start_ap = inff_cfg80211_start_ap, + .stop_ap = inff_cfg80211_stop_ap, + .change_beacon = inff_cfg80211_change_beacon, + .set_bitrate_mask = inff_cfg80211_set_bitrate, + .del_station = inff_cfg80211_del_station, + .change_station = inff_cfg80211_change_station, + .sched_scan_start = inff_cfg80211_sched_scan_start, + .sched_scan_stop = inff_cfg80211_sched_scan_stop, + .update_mgmt_frame_registrations = + inff_cfg80211_update_mgmt_frame_registrations, + .mgmt_tx = inff_cfg80211_mgmt_tx, + .set_cqm_rssi_range_config = inff_cfg80211_set_cqm_rssi_range_config, + .remain_on_channel = inff_p2p_remain_on_channel, + .cancel_remain_on_channel = inff_cfg80211_cancel_remain_on_channel, + .get_channel = inff_cfg80211_get_channel, + .start_p2p_device = inff_p2p_start_device, + .stop_p2p_device = inff_p2p_stop_device, + .crit_proto_start = inff_cfg80211_crit_proto_start, + .crit_proto_stop = inff_cfg80211_crit_proto_stop, + .tdls_oper = inff_cfg80211_tdls_oper, + .update_connect_params = inff_cfg80211_update_conn_params, + .set_pmk = inff_cfg80211_set_pmk, + .del_pmk = inff_cfg80211_del_pmk, + .change_bss = inff_cfg80211_change_bss, + .external_auth = inff_cfg80211_external_auth, + .set_cqm_rssi_config = inff_cfg80211_set_cqm_rssi_config, + .update_owe_info = inff_cfg80211_update_owe_info, + .start_pmsr = inff_cfg80211_start_pmsr, + .abort_pmsr = inff_cfg80211_abort_pmsr, + .start_wlan_sense = inff_wlan_sense_start, + .stop_wlan_sense = inff_wlan_sense_stop, +}; + +struct cfg80211_ops *inff_cfg80211_get_ops(struct inff_mp_device *settings) +{ + struct cfg80211_ops *ops; + + ops = kmemdup(&inff_cfg80211_ops, sizeof(inff_cfg80211_ops), + GFP_KERNEL); + + if (ops && settings->roamoff) + ops->update_connect_params = NULL; + + return ops; +} + +void inff_cfg80211_free_netdev(struct net_device *ndev) +{ + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + + ifp = netdev_priv(ndev); + vif = ifp->vif; + + if (vif) { + inff_free_vif(vif); + ifp->vif = NULL; + } +} + +bool inff_is_linkup(struct inff_cfg80211_vif *vif, + const struct inff_event_msg *e) +{ + u32 event = e->event_code; + u32 status = e->status; + + if ((vif->profile.use_fwsup == INFF_PROFILE_FWSUP_PSK || + vif->profile.use_fwsup == INFF_PROFILE_FWSUP_SAE) && + event == INFF_E_PSK_SUP && + status == INFF_E_STATUS_FWSUP_COMPLETED) + set_bit(INFF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); + if (event == INFF_E_LINK && status == INFF_E_STATUS_SUCCESS && + (e->flags & INFF_EVENT_MSG_LINK)) { + inff_dbg(CONN, "Processing set ssid\n"); + memcpy(vif->profile.bssid, e->addr, ETH_ALEN); + if (vif->profile.use_fwsup != INFF_PROFILE_FWSUP_PSK && + vif->profile.use_fwsup != INFF_PROFILE_FWSUP_SAE) + return true; + + set_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state); + } + + if (test_bit(INFF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state) && + test_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state)) { + clear_bit(INFF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); + clear_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state); + return true; + } + return false; +} + +bool inff_is_linkdown(struct inff_cfg80211_vif *vif, + const struct inff_event_msg *e) +{ + u32 event = e->event_code; + u16 flags = e->flags; + u32 status = e->status; + + if (event == INFF_E_DEAUTH || event == INFF_E_DEAUTH_IND || + event == INFF_E_DISASSOC_IND || + (event == INFF_E_LINK && !(flags & INFF_EVENT_MSG_LINK)) || + (event == INFF_E_SET_SSID && status != INFF_E_STATUS_SUCCESS)) { + inff_dbg(CONN, "Processing link down\n"); + clear_bit(INFF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state); + clear_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state); + return true; + } + return false; +} + +bool inff_is_nonetwork(struct inff_cfg80211_info *cfg, + const struct inff_event_msg *e) +{ + u32 event = e->event_code; + u32 status = e->status; + + if (event == INFF_E_LINK && status == INFF_E_STATUS_NO_NETWORKS) { + inff_dbg(CONN, "Processing Link %s & no network found\n", + e->flags & INFF_EVENT_MSG_LINK ? "up" : "down"); + return true; + } + + if (event == INFF_E_SET_SSID && status != INFF_E_STATUS_SUCCESS) { + inff_dbg(CONN, "Processing connecting & no network found\n"); + return true; + } + + if (event == INFF_E_PSK_SUP && + status != INFF_E_STATUS_FWSUP_COMPLETED) { + inff_dbg(CONN, "Processing failed supplicant state: %u\n", + status); + return true; + } + + return false; +} + +u8 inff_map_prio_to_prec(void *config, u8 prio) +{ + struct inff_cfg80211_info *cfg = (struct inff_cfg80211_info *)config; + + if (!cfg) + return (prio == PRIO_8021D_NONE || prio == PRIO_8021D_BE) ? + (prio ^ 2) : prio; + + /* For those AC(s) with ACM flag set to 1, convert its 4-level priority + * to an 8-level precedence which is the same as BE's + */ + if (prio > PRIO_8021D_EE && + cfg->ac_priority[prio] == cfg->ac_priority[PRIO_8021D_BE]) + return cfg->ac_priority[prio] * 2; + + /* Conversion of 4-level priority to 8-level precedence */ + if (prio == PRIO_8021D_BE || prio == PRIO_8021D_BK || + prio == PRIO_8021D_CL || prio == PRIO_8021D_VO) + return cfg->ac_priority[prio] * 2; + else + return cfg->ac_priority[prio] * 2 + 1; +} + +u8 inff_map_prio_to_aci(void *config, u8 prio) +{ + /* Prio here refers to the 802.1d priority in range of 0 to 7. + * ACI here refers to the WLAN AC Index in range of 0 to 3. + * This function will return ACI corresponding to input prio. + */ + struct inff_cfg80211_info *cfg = (struct inff_cfg80211_info *)config; + + if (cfg) + return cfg->ac_priority[prio]; + + return prio; +} + +static void inff_init_wmm_prio(u8 *priority) +{ + /* Initialize AC priority array to default + * 802.1d priority as per following table: + * 802.1d prio 0,3 maps to BE + * 802.1d prio 1,2 maps to BK + * 802.1d prio 4,5 maps to VI + * 802.1d prio 6,7 maps to VO + */ + priority[0] = INFF_FWS_FIFO_AC_BE; + priority[3] = INFF_FWS_FIFO_AC_BE; + priority[1] = INFF_FWS_FIFO_AC_BK; + priority[2] = INFF_FWS_FIFO_AC_BK; + priority[4] = INFF_FWS_FIFO_AC_VI; + priority[5] = INFF_FWS_FIFO_AC_VI; + priority[6] = INFF_FWS_FIFO_AC_VO; + priority[7] = INFF_FWS_FIFO_AC_VO; +} + +void inff_wifi_prioritize_acparams(const + struct inff_cfg80211_edcf_acparam *acp, u8 *priority) +{ + u8 aci; + u8 aifsn; + u8 ecwmin; + u8 ecwmax; + u8 acm; + u8 ranking_basis[EDCF_AC_COUNT]; + u8 aci_prio[EDCF_AC_COUNT]; /* AC_BE, AC_BK, AC_VI, AC_VO */ + u8 index; + + for (aci = 0; aci < EDCF_AC_COUNT; aci++, acp++) { + aifsn = acp->ACI & EDCF_AIFSN_MASK; + acm = (acp->ACI & EDCF_ACM_MASK) ? 1 : 0; + ecwmin = acp->ECW & EDCF_ECWMIN_MASK; + ecwmax = (acp->ECW & EDCF_ECWMAX_MASK) >> EDCF_ECWMAX_SHIFT; + inff_dbg(CONN, "ACI %d aifsn %d acm %d ecwmin %d ecwmax %d\n", + aci, aifsn, acm, ecwmin, ecwmax); + /* Default AC_VO will be the lowest ranking value */ + ranking_basis[aci] = aifsn + ecwmin + ecwmax; + /* Initialise priority starting at 0 (AC_BE) */ + aci_prio[aci] = 0; + + /* If ACM is set, STA can't use this AC as per 802.11. + * Change the ranking to BE + */ + if (aci != AC_BE && aci != AC_BK && acm == 1) + ranking_basis[aci] = ranking_basis[AC_BE]; + } + + /* Ranking method which works for AC priority + * swapping when values for cwmin, cwmax and aifsn are varied + * Compare each aci_prio against each other aci_prio + */ + for (aci = 0; aci < EDCF_AC_COUNT; aci++) { + for (index = 0; index < EDCF_AC_COUNT; index++) { + if (index != aci) { + /* Smaller ranking value has higher priority, + * so increment priority for each ACI which has + * a higher ranking value + */ + if (ranking_basis[aci] < ranking_basis[index]) + aci_prio[aci]++; + } + } + } + + /* By now, aci_prio[] will be in range of 0 to 3. + * Use ACI prio to get the new priority value for + * each 802.1d traffic type, in this range. + */ + if (!(aci_prio[AC_BE] == aci_prio[AC_BK] && + aci_prio[AC_BK] == aci_prio[AC_VI] && + aci_prio[AC_VI] == aci_prio[AC_VO])) { + /* 802.1d 0,3 maps to BE */ + priority[0] = aci_prio[AC_BE]; + priority[3] = aci_prio[AC_BE]; + + /* 802.1d 1,2 maps to BK */ + priority[1] = aci_prio[AC_BK]; + priority[2] = aci_prio[AC_BK]; + + /* 802.1d 4,5 maps to VO */ + priority[4] = aci_prio[AC_VI]; + priority[5] = aci_prio[AC_VI]; + + /* 802.1d 6,7 maps to VO */ + priority[6] = aci_prio[AC_VO]; + priority[7] = aci_prio[AC_VO]; + } else { + /* Initialize to default priority */ + inff_init_wmm_prio(priority); + } + + inff_dbg(CONN, "Adj prio BE 0->%d, BK 1->%d, BK 2->%d, BE 3->%d\n", + priority[0], priority[1], priority[2], priority[3]); + + inff_dbg(CONN, "Adj prio VI 4->%d, VI 5->%d, VO 6->%d, VO 7->%d\n", + priority[4], priority[5], priority[6], priority[7]); +} + +s32 +inff_bss_roaming_done(struct inff_cfg80211_info *cfg, + struct net_device *ndev, + const struct inff_event_msg *e) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct ieee80211_channel *notify_channel = NULL; + struct ieee80211_supported_band *band; + struct inff_bss_info_le *bi; + struct inff_chan ch; + struct cfg80211_roam_info roam_info = {}; + u32 freq; + s32 err = 0; + + inff_dbg(TRACE, "Enter\n"); + + inff_get_assoc_ies(cfg, ifp); + memcpy(profile->bssid, e->addr, ETH_ALEN); + bi = (struct inff_bss_info_le *)inff_update_bss_info(cfg, ifp); + if (!bi) + goto done; + + ch.chspec = le16_to_cpu(bi->chanspec); + cfg->d11inf.decchspec(&ch); + + band = wiphy->bands[inff_d11_chan_band_to_nl80211(ch.band)]; + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); + if (!freq) + err = -EINVAL; + notify_channel = ieee80211_get_channel(wiphy, freq); + if (!notify_channel) + err = -EINVAL; + +done: + + roam_info.links[0].channel = notify_channel; + roam_info.links[0].bssid = profile->bssid; + roam_info.req_ie = conn_info->req_ie; + roam_info.req_ie_len = conn_info->req_ie_len; + roam_info.resp_ie = conn_info->resp_ie; + roam_info.resp_ie_len = conn_info->resp_ie_len; + + cfg80211_roamed(ndev, &roam_info, GFP_KERNEL); + inff_dbg(CONN, "Report roaming result\n"); + + if (((profile->use_fwsup == INFF_PROFILE_FWSUP_1X || + profile->use_fwsup == INFF_PROFILE_FWSUP_ROAM) && + (inff_has_pmkid(roam_info.req_ie, roam_info.req_ie_len, NULL) || + profile->is_ft || profile->is_okc)) || + profile->use_fwsup == INFF_PROFILE_FWSUP_PSK || + profile->use_fwsup == INFF_PROFILE_FWSUP_SAE) { + cfg80211_port_authorized(ndev, profile->bssid, NULL, 0, GFP_KERNEL); + inff_dbg(CONN, "Report port authorized\n"); + } + + clear_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state); + set_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state); + inff_dbg(TRACE, "Exit\n"); + return err; +} + +s32 +inff_bss_connect_done(struct inff_cfg80211_info *cfg, + struct net_device *ndev, const struct inff_event_msg *e, + bool completed) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_profile *profile = &ifp->vif->profile; + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + struct cfg80211_connect_resp_params conn_params; + + inff_dbg(TRACE, "Enter cfg->pfn_enable %d\n", cfg->pfn_enable); + + if (test_and_clear_bit(INFF_VIF_STATUS_CONNECTING, + &ifp->vif->sme_state)) { + memset(&conn_params, 0, sizeof(conn_params)); + if (completed) { + inff_get_assoc_ies(cfg, ifp); + inff_update_bss_info(cfg, ifp); + set_bit(INFF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state); + conn_params.status = WLAN_STATUS_SUCCESS; + } else { + clear_bit(INFF_VIF_STATUS_EAP_SUCCESS, + &ifp->vif->sme_state); + clear_bit(INFF_VIF_STATUS_ASSOC_SUCCESS, + &ifp->vif->sme_state); + conn_params.status = WLAN_STATUS_AUTH_TIMEOUT; + } + conn_params.links[0].bssid = profile->bssid; + conn_params.req_ie = conn_info->req_ie; + conn_params.req_ie_len = conn_info->req_ie_len; + conn_params.resp_ie = conn_info->resp_ie; + conn_params.resp_ie_len = conn_info->resp_ie_len; + + cfg80211_connect_done(ndev, &conn_params, GFP_KERNEL); + inff_dbg(CONN, "Report connect result\n"); + + if ((profile->use_fwsup == INFF_PROFILE_FWSUP_1X && + inff_has_pmkid(conn_params.req_ie, conn_params.req_ie_len, NULL)) || + profile->use_fwsup == INFF_PROFILE_FWSUP_PSK || + profile->use_fwsup == INFF_PROFILE_FWSUP_SAE) { + cfg80211_port_authorized(ndev, profile->bssid, NULL, 0, GFP_KERNEL); + inff_dbg(CONN, "Report port authorized\n"); + } + + } else if (cfg->pfn_enable && completed) { + cfg->pfn_connection = 1; + } + inff_dbg(TRACE, "Exit\n"); + return 0; +} + +static void inff_init_conf(struct inff_cfg80211_conf *conf) +{ + conf->frag_threshold = (u32)-1; + conf->rts_threshold = (u32)-1; + conf->retry_short = (u32)-1; + conf->retry_long = (u32)-1; +} + +static void inff_deinit_priv_mem(struct inff_cfg80211_info *cfg) +{ + kfree(cfg->conf); + cfg->conf = NULL; + kfree(cfg->extra_buf); + cfg->extra_buf = NULL; + kfree(cfg->wowl.nd); + cfg->wowl.nd = NULL; + kfree(cfg->wowl.nd_info); + cfg->wowl.nd_info = NULL; + kfree(cfg->escan_info.escan_buf); + cfg->escan_info.escan_buf = NULL; +} + +static s32 inff_init_priv_mem(struct inff_cfg80211_info *cfg) +{ + cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL); + if (!cfg->conf) + goto init_priv_mem_out; + cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!cfg->extra_buf) + goto init_priv_mem_out; + cfg->wowl.nd = kzalloc(sizeof(*cfg->wowl.nd) + sizeof(u32), GFP_KERNEL); + if (!cfg->wowl.nd) + goto init_priv_mem_out; + cfg->wowl.nd_info = kzalloc(sizeof(*cfg->wowl.nd_info) + + sizeof(struct cfg80211_wowlan_nd_match *), + GFP_KERNEL); + if (!cfg->wowl.nd_info) + goto init_priv_mem_out; + cfg->escan_info.escan_buf = kzalloc(INFF_ESCAN_BUF_SIZE, GFP_KERNEL); + if (!cfg->escan_info.escan_buf) + goto init_priv_mem_out; + + return 0; + +init_priv_mem_out: + inff_deinit_priv_mem(cfg); + + return -ENOMEM; +} + +static s32 wl_init_priv(struct inff_cfg80211_info *cfg) +{ + s32 err = 0; + + cfg->scan_request = NULL; + cfg->pwr_save = true; + cfg->dongle_up = false; /* dongle is not up yet */ + err = inff_init_priv_mem(cfg); + if (err) + return err; + inff_register_event_handlers(cfg); + mutex_init(&cfg->usr_sync); + inff_init_escan(cfg); + inff_init_conf(cfg->conf); + inff_init_wmm_prio(cfg->ac_priority); + init_completion(&cfg->vif_disabled); + return err; +} + +static void wl_deinit_priv(struct inff_cfg80211_info *cfg) +{ + cfg->dongle_up = false; /* dongle down */ + inff_abort_scanning(cfg); + inff_deinit_priv_mem(cfg); + inff_clear_assoc_ies(cfg); +} + +static void init_vif_event(struct inff_cfg80211_vif_event *event) +{ + init_waitqueue_head(&event->vif_wq); + spin_lock_init(&event->vif_event_lock); +} + +static s32 inff_dongle_roam(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u32 bcn_timeout; + __le32 roamtrigger[2]; + __le32 roam_delta[2]; + __le32 bandlist[4]; + u32 n_bands; + int i; + + /* Configure beacon timeout value based upon roaming setting */ + if (ifp->drvr->settings->roamoff < INFF_ROAMOFF_DISABLE || + ifp->drvr->settings->roamoff >= INFF_ROAMOFF_MAX) { + iphy_err(drvr, + "roamoff setting is incorrect (%d), reset it\n", + ifp->drvr->settings->roamoff); + ifp->drvr->settings->roamoff = INFF_ROAMOFF_DISABLE; + } + + if (ifp->drvr->settings->roamoff) + bcn_timeout = INFF_DEFAULT_BCN_TIMEOUT_ROAM_OFF; + else + bcn_timeout = INFF_DEFAULT_BCN_TIMEOUT_ROAM_ON; + err = inff_fil_iovar_int_set(ifp, "bcn_timeout", bcn_timeout); + if (err) { + iphy_err(drvr, "bcn_timeout error (%d)\n", err); + goto roam_setup_done; + } + + /* Enable/Disable built-in roaming to allow supplicant to take care of + * roaming. + */ + inff_dbg(INFO, "Internal Roaming = %s, Mode:%d\n", + ifp->drvr->settings->roamoff ? "Off" : "On", ifp->drvr->settings->roamoff); + err = inff_fil_iovar_int_set(ifp, "roam_off", + ifp->drvr->settings->roamoff ? 1 : 0); + if (err) { + iphy_err(drvr, "roam_off error (%d)\n", err); + goto roam_setup_done; + } + + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BANDLIST, &bandlist, + sizeof(bandlist)); + if (err) { + iphy_err(drvr, "could not obtain band info: err=%d\n", err); + goto roam_setup_done; + } + /* To enhance compatibility set each band's roam properties instead of + * using all band. BAND_5G is 1, BAND_2G is 2 and BAND_6G is 3. + */ + n_bands = le32_to_cpu(bandlist[0]); + for (i = 1; i <= n_bands; i++) { + roamtrigger[0] = cpu_to_le32(WL_ROAM_TRIGGER_LEVEL); + roamtrigger[1] = cpu_to_le32(bandlist[i]); + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_ROAM_TRIGGER, + (void *)roamtrigger, sizeof(roamtrigger)); + if (err) + iphy_err(drvr, "WLC_SET_ROAM_TRIGGER error (%d), band %d\n", + err, bandlist[i]); + + roam_delta[0] = cpu_to_le32(WL_ROAM_DELTA); + roam_delta[1] = cpu_to_le32(bandlist[i]); + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_ROAM_DELTA, + (void *)roam_delta, sizeof(roam_delta)); + if (err) + iphy_err(drvr, "WLC_SET_ROAM_DELTA error (%d), band %d\n", + err, bandlist[i]); + } + + return 0; + +roam_setup_done: + return err; +} + +static s32 +inff_dongle_scantime(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_SCAN_CHANNEL_TIME, + INFF_SCAN_CHANNEL_TIME); + if (err) { + iphy_err(drvr, "Scan assoc time error (%d)\n", err); + goto dongle_scantime_out; + } + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_SCAN_UNASSOC_TIME, + INFF_SCAN_UNASSOC_TIME); + if (err) { + iphy_err(drvr, "Scan unassoc time error (%d)\n", err); + goto dongle_scantime_out; + } + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_SCAN_PASSIVE_TIME, + INFF_SCAN_PASSIVE_TIME); + if (err) { + iphy_err(drvr, "Scan passive time error (%d)\n", err); + goto dongle_scantime_out; + } + +dongle_scantime_out: + return err; +} + +static void inff_update_bw40_channel_flag(struct ieee80211_channel *channel, + struct inff_chan *ch) +{ + u32 ht40_flag; + + ht40_flag = channel->flags & IEEE80211_CHAN_NO_HT40; + if (ch->sb == INFF_CHAN_SB_U) { + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + channel->flags &= ~IEEE80211_CHAN_NO_HT40; + channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; + } else { + /* It should be one of + * IEEE80211_CHAN_NO_HT40 or + * IEEE80211_CHAN_NO_HT40PLUS + */ + channel->flags &= ~IEEE80211_CHAN_NO_HT40; + if (ht40_flag == IEEE80211_CHAN_NO_HT40) + channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; + } +} + +static void inff_wiphy_reset_band_and_channel(struct wiphy *wiphy) +{ + enum nl80211_band band; + struct ieee80211_supported_band *wiphy_band = NULL; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + wiphy_band = wiphy->bands[band]; + if (!wiphy_band) + continue; + + kfree(wiphy_band->channels); + wiphy_band->channels = NULL; + kfree(wiphy_band); + wiphy->bands[band] = NULL; + } +} + +static int inff_fill_band_with_default_chanlist(struct wiphy *wiphy, struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct ieee80211_supported_band *band; + int err, i; + __le32 bandlist[4]; + u32 n_bands; + + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BANDLIST, &bandlist, + sizeof(bandlist)); + if (err) { + iphy_err(drvr, "could not obtain band info: err=%d\n", err); + return err; + } + + inff_wiphy_reset_band_and_channel(wiphy); + + n_bands = le32_to_cpu(bandlist[0]); + for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) { + if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) { + band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz), + GFP_KERNEL); + if (!band) + goto mem_err; + band->channels = kmemdup(&__wl_2ghz_channels, + sizeof(__wl_2ghz_channels), + GFP_KERNEL); + if (!band->channels) + goto mem_err; + + /* restore 2G channels info */ + band->n_channels = ARRAY_SIZE(__wl_2ghz_channels); + wiphy->bands[NL80211_BAND_2GHZ] = band; + } else if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) { + band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz), + GFP_KERNEL); + if (!band) + goto mem_err; + band->channels = kmemdup(&__wl_5ghz_channels, + sizeof(__wl_5ghz_channels), + GFP_KERNEL); + if (!band->channels) + goto mem_err; + + /* restore 5G channels info */ + band->n_channels = ARRAY_SIZE(__wl_5ghz_channels); + wiphy->bands[NL80211_BAND_5GHZ] = band; + } else if (bandlist[i] == cpu_to_le32(WLC_BAND_6G) && + inff_feat_is_6ghz_enabled(ifp)) { + band = kmemdup(&__wl_band_6ghz, sizeof(__wl_band_6ghz), + GFP_KERNEL); + if (!band) + goto mem_err; + band->channels = kmemdup(&__wl_6ghz_channels, + sizeof(__wl_6ghz_channels), + GFP_KERNEL); + if (!band->channels) + goto mem_err; + + /* restore 6G channels info */ + band->n_channels = ARRAY_SIZE(__wl_6ghz_channels); + wiphy->bands[NL80211_BAND_6GHZ] = band; + } + } + + if (wiphy->bands[NL80211_BAND_5GHZ] && + inff_feat_is_enabled(ifp, INFF_FEAT_DOT11H)) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_DFS_OFFLOAD); + + return 0; + +mem_err: + inff_wiphy_reset_band_and_channel(wiphy); + return -ENOMEM; +} + +static void inff_wiphy_rm_disabled_band_and_channel(struct wiphy *wiphy) +{ + enum nl80211_band band; + struct ieee80211_supported_band *wiphy_band = NULL; + struct ieee80211_channel *cur = NULL; + struct ieee80211_channel *next = NULL; + u32 n_ch = 0; + u32 i, j; + + for (band = 0; band < NUM_NL80211_BANDS; band++) { + wiphy_band = wiphy->bands[band]; + if (!wiphy_band) + continue; + + n_ch = wiphy_band->n_channels; + for (i = 0; i < n_ch;) { + cur = &wiphy_band->channels[i]; + if (cur->flags == IEEE80211_CHAN_DISABLED) { + for (j = i; j < n_ch - 1; j++) { + cur = &wiphy_band->channels[j]; + next = &wiphy_band->channels[j + 1]; + memcpy(cur, next, sizeof(*cur)); + } + n_ch--; + } else { + i++; + } + } + + wiphy_band->n_channels = n_ch; + if (!n_ch) { + kfree(wiphy_band->channels); + wiphy_band->channels = NULL; + kfree(wiphy_band); + wiphy->bands[band] = NULL; + } + } +} + +static int inff_construct_chaninfo(struct inff_cfg80211_info *cfg, + u32 bw_cap[]) +{ + struct wiphy *wiphy = cfg_to_wiphy(cfg); + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp = inff_get_ifp(drvr, 0); + struct ieee80211_supported_band *band; + struct ieee80211_channel *channel; + struct inff_chanspec_list *list; + struct inff_chan ch; + int err; + u8 *pbuf; + u32 i, j; + u32 total; + u32 chaninfo; + + pbuf = kzalloc(INFF_DCMD_MEDLEN, GFP_KERNEL); + + if (!pbuf) + return -ENOMEM; + + list = (struct inff_chanspec_list *)pbuf; + + err = inff_fil_iovar_data_get(ifp, "chanspecs", pbuf, + INFF_DCMD_MEDLEN); + if (err) { + iphy_err(drvr, "get chanspecs error (%d)\n", err); + goto fail_pbuf; + } + + err = inff_fill_band_with_default_chanlist(wiphy, ifp); + if (err) { + iphy_err(drvr, "could not retrore band and channels: err=%d\n", err); + goto fail_pbuf; + } + + band = wiphy->bands[NL80211_BAND_2GHZ]; + if (band) { + for (i = 0; i < band->n_channels; i++) + band->channels[i].flags = IEEE80211_CHAN_DISABLED; + } + band = wiphy->bands[NL80211_BAND_5GHZ]; + if (band) { + for (i = 0; i < band->n_channels; i++) + band->channels[i].flags = IEEE80211_CHAN_DISABLED; + } + band = wiphy->bands[NL80211_BAND_6GHZ]; + if (band) { + for (i = 0; i < band->n_channels; i++) + band->channels[i].flags = IEEE80211_CHAN_DISABLED; + } + + total = le32_to_cpu(list->count); + if (total > INFF_MAX_CHANSPEC_LIST) { + iphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + total); + err = -EINVAL; + goto fail_pbuf; + } + + for (i = 0; i < total; i++) { + ch.chspec = (u16)le32_to_cpu(list->element[i]); + cfg->d11inf.decchspec(&ch); + + if (ch.band == INFF_CHAN_BAND_2G) { + band = wiphy->bands[NL80211_BAND_2GHZ]; + } else if (ch.band == INFF_CHAN_BAND_5G) { + band = wiphy->bands[NL80211_BAND_5GHZ]; + } else if (ch.band == INFF_CHAN_BAND_6G) { + if (inff_feat_is_6ghz_enabled(ifp)) { + band = wiphy->bands[NL80211_BAND_6GHZ]; + } else { + inff_dbg(INFO, "Disabled channel Spec. 0x%x.\n", + ch.chspec); + continue; + } + } else { + iphy_err(drvr, "Invalid channel Spec. 0x%x.\n", + ch.chspec); + continue; + } + if (!band) + continue; + if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) && + ch.bw == INFF_CHAN_BW_40) + continue; + if (!(bw_cap[band->band] & WLC_BW_80MHZ_BIT) && + ch.bw == INFF_CHAN_BW_80) + continue; + + channel = NULL; + for (j = 0; j < band->n_channels; j++) { + if (band->channels[j].hw_value == ch.control_ch_num) { + channel = &band->channels[j]; + break; + } + } + if (!channel) { + /* It seems firmware supports some channel we never + * considered. Something new in IEEE standard? + */ + iphy_err(drvr, "Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); + continue; + } + + if (channel->orig_flags & IEEE80211_CHAN_DISABLED) + continue; + + /* assuming the chanspecs order is HT20, + * HT40 upper, HT40 lower, and VHT80. + */ + switch (ch.bw) { + case INFF_CHAN_BW_160: + channel->flags &= ~IEEE80211_CHAN_NO_160MHZ; + break; + case INFF_CHAN_BW_80: + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; + break; + case INFF_CHAN_BW_40: + inff_update_bw40_channel_flag(channel, &ch); + break; + default: + wiphy_warn(wiphy, "Firmware reported unsupported bandwidth %d\n", + ch.bw); + fallthrough; + case INFF_CHAN_BW_20: + /* enable the channel and disable other bandwidths + * for now as mentioned order assure they are enabled + * for subsequent chanspecs. + */ + channel->flags = IEEE80211_CHAN_NO_HT40 | + IEEE80211_CHAN_NO_80MHZ | + IEEE80211_CHAN_NO_160MHZ; + ch.bw = INFF_CHAN_BW_20; + cfg->d11inf.encchspec(&ch); + chaninfo = ch.chspec; + err = inff_fil_bsscfg_int_get(ifp, "per_chan_info", + &chaninfo); + if (!err) { + if (chaninfo & WL_CHAN_RADAR) + channel->flags |= + (IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_NO_IR); + if (chaninfo & WL_CHAN_PASSIVE) + channel->flags |= + IEEE80211_CHAN_NO_IR; + } + } + } + + /* Remove disabled channels and band to avoid unexpected restore. */ + inff_wiphy_rm_disabled_band_and_channel(wiphy); + +fail_pbuf: + kfree(pbuf); + return err; +} + +static int inff_enable_bw40_2g(struct inff_cfg80211_info *cfg) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp = inff_get_ifp(drvr, 0); + struct ieee80211_supported_band *band; + struct inff_fil_bwcap_le band_bwcap; + struct inff_chanspec_list *list; + u8 *pbuf; + u32 val; + int err; + struct inff_chan ch; + u32 num_chan; + int i, j; + + /* verify support for bw_cap command */ + val = WLC_BAND_5G; + err = inff_fil_iovar_int_get(ifp, "bw_cap", &val); + + if (!err) { + /* only set 2G bandwidth using bw_cap command */ + band_bwcap.band = cpu_to_le32(WLC_BAND_2G); + band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); + err = inff_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, + sizeof(band_bwcap)); + } else { + inff_dbg(INFO, "fallback to mimo_bw_cap\n"); + val = WLC_N_BW_40ALL; + err = inff_fil_iovar_int_set(ifp, "mimo_bw_cap", val); + } + + if (!err) { + /* update channel info in 2G band */ + pbuf = kzalloc(INFF_DCMD_MEDLEN, GFP_KERNEL); + + if (!pbuf) + return -ENOMEM; + + ch.band = INFF_CHAN_BAND_2G; + ch.bw = INFF_CHAN_BW_40; + ch.sb = INFF_CHAN_SB_NONE; + ch.chnum = 0; + cfg->d11inf.encchspec(&ch); + + /* pass encoded chanspec in query */ + *(__le16 *)pbuf = cpu_to_le16(ch.chspec); + + err = inff_fil_iovar_data_get(ifp, "chanspecs", pbuf, + INFF_DCMD_MEDLEN); + if (err) { + iphy_err(drvr, "get chanspecs error (%d)\n", err); + kfree(pbuf); + return err; + } + + band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ]; + list = (struct inff_chanspec_list *)pbuf; + num_chan = le32_to_cpu(list->count); + if (num_chan > INFF_MAX_CHANSPEC_LIST) { + iphy_err(drvr, "Invalid count of channel Spec. (%u)\n", + num_chan); + kfree(pbuf); + return -EINVAL; + } + + for (i = 0; i < num_chan; i++) { + ch.chspec = (u16)le32_to_cpu(list->element[i]); + cfg->d11inf.decchspec(&ch); + if (WARN_ON(ch.band != INFF_CHAN_BAND_2G)) + continue; + if (WARN_ON(ch.bw != INFF_CHAN_BW_40)) + continue; + for (j = 0; j < band->n_channels; j++) { + if (band->channels[j].hw_value == ch.control_ch_num) + break; + } + if (WARN_ON(j == band->n_channels)) + continue; + + inff_update_bw40_channel_flag(&band->channels[j], &ch); + } + kfree(pbuf); + } + return err; +} + +static void inff_get_bwcap(struct inff_if *ifp, u32 bw_cap[]) +{ + struct inff_pub *drvr = ifp->drvr; + u32 band, mimo_bwcap; + int err; + + band = WLC_BAND_2G; + err = inff_fil_iovar_int_get(ifp, "bw_cap", &band); + if (!err) { + bw_cap[NL80211_BAND_2GHZ] = band; + band = WLC_BAND_5G; + err = inff_fil_iovar_int_get(ifp, "bw_cap", &band); + if (!err) { + bw_cap[NL80211_BAND_5GHZ] = band; + + if (!inff_feat_is_6ghz_enabled(ifp)) + return; + + band = WLC_BAND_6G; + err = inff_fil_iovar_int_get(ifp, "bw_cap", &band); + if (!err) { + bw_cap[NL80211_BAND_6GHZ] = band; + return; + } + return; + } + WARN_ON(1); + return; + } + inff_dbg(INFO, "fallback to mimo_bw_cap info\n"); + mimo_bwcap = 0; + err = inff_fil_iovar_int_get(ifp, "mimo_bw_cap", &mimo_bwcap); + if (err) + /* assume 20MHz if firmware does not give a clue */ + mimo_bwcap = WLC_N_BW_20ALL; + + switch (mimo_bwcap) { + case WLC_N_BW_40ALL: + bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT; + fallthrough; + case WLC_N_BW_20IN2G_40IN5G: + bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT; + fallthrough; + case WLC_N_BW_20ALL: + bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT; + bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT; + break; + default: + iphy_err(drvr, "invalid mimo_bw_cap value\n"); + } +} + +static void inff_ht_update_wiphy_cap(struct inff_if *ifp, + u32 bw_cap[2], u32 nchain) +{ + struct inff_pub *drvr = ifp->drvr; + struct wiphy *wiphy = drvr->wiphy; + struct ieee80211_supported_band *band; + u32 nmode = 0, sgi_rx = 1; + int i = 0; + + /* HT mode */ + if (inff_fil_iovar_int_get(ifp, "nmode", &nmode)) + iphy_err(drvr, "nmode error\n"); + + if (!nmode) + return; + + inff_dbg(INFO, "HT Enabled\n"); + + if (inff_fil_iovar_int_get(ifp, "sgi_rx", &sgi_rx)) + iphy_err(drvr, "sgi_rx error\n"); + + /* Update HT Capab for each Band */ + for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { + band = wiphy->bands[i]; + if (!band) + continue; + + switch (band->band) { + case NL80211_BAND_6GHZ: + break; + + case NL80211_BAND_5GHZ: + /* Band 5GHz supports HT, so */ + fallthrough; + + case NL80211_BAND_2GHZ: + /* Band 2GHz supports HT */ + band->ht_cap.ht_supported = true; + + /* Bit 0 represents HT SGI 20MHz */ + if ((sgi_rx & BIT(0)) && + (bw_cap[band->band] & WLC_BW_20MHZ_BIT)) + band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; + + /* Bit 1 represents HT SGI 40MHz */ + if ((sgi_rx & BIT(1)) && + (bw_cap[band->band] & WLC_BW_40MHZ_BIT)) { + band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; + band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + + band->ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; + band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16; + memset(band->ht_cap.mcs.rx_mask, 0xff, nchain); + band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + break; + + default: + break; + } + } +} + +static __le16 inff_vht_get_mcs_map(u32 nchain, enum ieee80211_vht_mcs_support supp) +{ + u16 mcs_map; + int i; + + for (i = 0, mcs_map = 0xFFFF; i < nchain; i++) + mcs_map = (mcs_map << 2) | supp; + + return cpu_to_le16(mcs_map); +} + +static void inff_vht_update_wiphy_cap(struct inff_if *ifp, + u32 bw_cap[2], u32 nchain) +{ + struct inff_pub *drvr = ifp->drvr; + struct wiphy *wiphy = drvr->wiphy; + struct ieee80211_supported_band *band; + u32 vhtmode = 0; + u32 txstreams = 0; + u32 txbf_bfe_cap = 0; + u32 txbf_bfr_cap = 0; + __le16 mcs_map; + int i = 0; + + /* VHT mode */ + if (inff_fil_iovar_int_get(ifp, "vhtmode", &vhtmode)) + iphy_err(drvr, "vhtmode error\n"); + + if (!vhtmode) + return; + + inff_dbg(INFO, "VHT Enabled\n"); + + /* Create a MAP with MCS for each Stream */ + mcs_map = inff_vht_get_mcs_map(nchain, IEEE80211_VHT_MCS_SUPPORT_0_9); + + if (inff_fil_iovar_int_get(ifp, "txstreams", &txstreams)) + iphy_err(drvr, "txstreams error\n"); + + /* Beamforming support information */ + if (inff_fil_iovar_int_get(ifp, "txbf_bfe_cap", &txbf_bfe_cap)) + iphy_err(drvr, "txbf_bfe_cap error\n"); + if (inff_fil_iovar_int_get(ifp, "txbf_bfr_cap", &txbf_bfr_cap)) + iphy_err(drvr, "txbf_bfw_cap error\n"); + + /* Update VHT Capab for each Band */ + for (i = 0; i < ARRAY_SIZE(wiphy->bands); i++) { + band = wiphy->bands[i]; + if (!band) + continue; + + switch (band->band) { + case NL80211_BAND_6GHZ: + break; + + case NL80211_BAND_5GHZ: + /* Band 5GHz supports VHT */ + band->vht_cap.vht_supported = true; + band->vht_cap.vht_mcs.rx_mcs_map = mcs_map; + band->vht_cap.vht_mcs.tx_mcs_map = mcs_map; + + if (bw_cap[band->band] & WLC_BW_80MHZ_BIT) + band->vht_cap.cap |= IEEE80211_VHT_CAP_SHORT_GI_80; + + if (txbf_bfe_cap & INFF_TXBF_SU_BFE_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; + if (txbf_bfe_cap & INFF_TXBF_MU_BFE_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (txbf_bfr_cap & INFF_TXBF_SU_BFR_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; + if (txbf_bfr_cap & INFF_TXBF_MU_BFR_CAP) + band->vht_cap.cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE; + + if ((txbf_bfe_cap || txbf_bfr_cap) && txstreams > 1) { + band->vht_cap.cap |= + (2 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT); + band->vht_cap.cap |= ((txstreams - 1) << + IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT); + band->vht_cap.cap |= + IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB; + } + break; + + case NL80211_BAND_2GHZ: + break; + + default: + break; + } + } +} + +static int inff_setup_wiphybands(struct inff_cfg80211_info *cfg) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp = inff_get_ifp(drvr, 0); + u32 rxchain, nchain = 1; + u32 bw_cap[4] = { WLC_BW_20MHZ_BIT, /* 2GHz */ + WLC_BW_20MHZ_BIT, /* 5GHz */ + 0, /* 60GHz */ + 0 }; /* 6GHz */ + int err; + + inff_get_bwcap(ifp, bw_cap); + inff_dbg(INFO, "bw_cap=[2G(%d), 5G(%d), 6G(%d)]\n", + bw_cap[NL80211_BAND_2GHZ], bw_cap[NL80211_BAND_5GHZ], + bw_cap[NL80211_BAND_6GHZ]); + + err = inff_fil_iovar_int_get(ifp, "rxchain", &rxchain); + if (err) + iphy_err(drvr, "rxchain error (%d)\n", err); + else + for (nchain = 0; rxchain; nchain++) + rxchain = rxchain & (rxchain - 1); + inff_dbg(INFO, "nchain=%d\n", nchain); + + err = inff_construct_chaninfo(cfg, bw_cap); + if (err) { + iphy_err(drvr, "inff_construct_chaninfo failed (%d)\n", err); + return err; + } + + /* HT Capability Registration */ + inff_ht_update_wiphy_cap(ifp, bw_cap, nchain); + + /* VHT Capability Registration */ + inff_vht_update_wiphy_cap(ifp, bw_cap, nchain); + + /* HE Capability Registration */ + inff_he_update_wiphy_cap(ifp); + + /* EHT Capability Registration */ + inff_eht_update_wiphy_cap(ifp); + + return 0; +} + +static const struct ieee80211_txrx_stypes +inff_txrx_stypes[NUM_NL80211_IFTYPES] = { + [NL80211_IFTYPE_STATION] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_CLIENT] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_P2P_GO] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + }, + [NL80211_IFTYPE_P2P_DEVICE] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) + } +}; + +/** + * inff_setup_ifmodes() - determine interface modes and combinations. + * + * @wiphy: wiphy object. + * @ifp: interface object needed for feat module api. + * + * The interface modes and combinations are determined dynamically here + * based on firmware functionality. + * + * no p2p and no mbss: + * + * #STA <= 1, #AP <= 1, channels = 1, 2 total + * + * no p2p and mbss: + * + * #STA <= 1, #AP <= 1, channels = 1, 2 total + * #AP <= 4, matching BI, channels = 1, 4 total + * + * no p2p and rsdb: + * #STA <= 1, #AP <= 2, channels = 2, 4 total + * + * p2p, no mchan, and mbss: + * + * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total + * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total + * #AP <= 4, matching BI, channels = 1, 4 total + * + * p2p, mchan, and mbss: + * + * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total + * #STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total + * #AP <= 4, matching BI, channels = 1, 4 total + * + * p2p, rsdb, and no mbss: + * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2, + * channels = 2, 4 total + * + * Return: 0 on success, negative errno on failure + */ +static int inff_setup_ifmodes(struct wiphy *wiphy, struct inff_if *ifp) +{ + struct ieee80211_iface_combination *combo = NULL; + struct ieee80211_iface_limit *c0_limits = NULL; + struct ieee80211_iface_limit *p2p_limits = NULL; + struct ieee80211_iface_limit *mbss_limits = NULL; + bool mon_flag, mbss, p2p, rsdb, mchan; + bool wlan_sense; + int i, c, n_combos, n_limits, p2p_num_infs; + + mon_flag = inff_feat_is_enabled(ifp, INFF_FEAT_MONITOR_FLAG); + mbss = inff_feat_is_enabled(ifp, INFF_FEAT_MBSS); + p2p = inff_feat_is_enabled(ifp, INFF_FEAT_P2P); + rsdb = inff_feat_is_enabled(ifp, INFF_FEAT_RSDB); + mchan = inff_feat_is_enabled(ifp, INFF_FEAT_MCHAN); + wlan_sense = inff_feat_is_enabled(ifp, INFF_FEAT_WLAN_SENSE); + + n_combos = 1 + !!(p2p && !rsdb) + !!mbss; + combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL); + if (!combo) + goto err; + + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_ADHOC) | + BIT(NL80211_IFTYPE_AP); + if (mon_flag) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); + if (p2p) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE); + if (wlan_sense) + wiphy->interface_modes |= BIT(NL80211_IFTYPE_WLAN_SENSE); + + c = 0; + i = 0; + n_limits = 1 + mon_flag + (p2p ? 2 : 0) + (rsdb || !p2p); + n_limits += wlan_sense; + + c0_limits = kcalloc(n_limits, sizeof(*c0_limits), GFP_KERNEL); + if (!c0_limits) + goto err; + + combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan)); + c0_limits[i].max = 1 + (p2p && mchan); + c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION); + if (mon_flag) { + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR); + } + if (p2p) { + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); + c0_limits[i].max = 1 + rsdb; + c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_P2P_GO); + } + if (wlan_sense) { + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_WLAN_SENSE); + } + if (p2p && rsdb) { + c0_limits[i].max = 2; + c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); + combo[c].max_interfaces = 4; + } else if (p2p) { + combo[c].max_interfaces = i; + } else if (rsdb) { + c0_limits[i].max = 2; + c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); + combo[c].max_interfaces = 3; + } else { + c0_limits[i].max = 1; + c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); + combo[c].max_interfaces = i; + } + combo[c].n_limits = i; + combo[c].limits = c0_limits; + + if (p2p && !rsdb) { + c++; + i = 0; + p2p_num_infs = 4; + if (wlan_sense) + p2p_num_infs += 1; + + p2p_limits = kcalloc(p2p_num_infs, sizeof(*p2p_limits), GFP_KERNEL); + if (!p2p_limits) + goto err; + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT); + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); + if (wlan_sense) { + p2p_limits[i].max = 1; + p2p_limits[i++].types = BIT(NL80211_IFTYPE_WLAN_SENSE); + } + + combo[c].num_different_channels = 1; + combo[c].max_interfaces = i; + combo[c].n_limits = i; + combo[c].limits = p2p_limits; + } + + if (mbss) { + c++; + i = 0; + n_limits = 1 + mon_flag; + mbss_limits = kcalloc(n_limits, sizeof(*mbss_limits), + GFP_KERNEL); + if (!mbss_limits) + goto err; + mbss_limits[i].max = 4; + mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP); + if (mon_flag) { + mbss_limits[i].max = 1; + mbss_limits[i++].types = BIT(NL80211_IFTYPE_MONITOR); + } + combo[c].beacon_int_infra_match = true; + combo[c].num_different_channels = 1; + combo[c].max_interfaces = 4 + mon_flag; + combo[c].n_limits = i; + combo[c].limits = mbss_limits; + } + + wiphy->n_iface_combinations = n_combos; + wiphy->iface_combinations = combo; + return 0; + +err: + kfree(c0_limits); + kfree(p2p_limits); + kfree(mbss_limits); + kfree(combo); + return -ENOMEM; +} + +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support inff_wowlan_support = { + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .n_patterns = INFF_WOWL_MAXPATTERNS, + .pattern_max_len = INFF_WOWL_MAXPATTERNSIZE, + .pattern_min_len = 1, + .max_pkt_offset = 1500, +}; +#endif + +static void inff_wiphy_wowl_params(struct wiphy *wiphy, struct inff_if *ifp) +{ +#ifdef CONFIG_PM + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct wiphy_wowlan_support *wowl; + struct cfg80211_wowlan *inff_wowlan_config = NULL; + + wowl = kmemdup(&inff_wowlan_support, sizeof(inff_wowlan_support), + GFP_KERNEL); + if (!wowl) { + wiphy->wowlan = &inff_wowlan_support; + return; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_PNO)) { + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_ND)) { + wowl->flags |= WIPHY_WOWLAN_NET_DETECT; + wowl->max_nd_match_sets = INFF_PNO_MAX_PFN_COUNT; + init_waitqueue_head(&cfg->wowl.nd_data_wait); + } + } + + /* for backward compatibility, retain INFF_FEAT_WOWL_GTK */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_GTK) || + inff_feat_is_enabled(ifp, INFF_FEAT_GTKO)) + wowl->flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_GTK)) + wowl->flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; + + wiphy->wowlan = wowl; + + /* wowlan_config structure report for kernels */ + inff_wowlan_config = kzalloc(sizeof(*inff_wowlan_config), + GFP_KERNEL); + if (inff_wowlan_config) { + inff_wowlan_config->any = false; + inff_wowlan_config->disconnect = true; + inff_wowlan_config->eap_identity_req = true; + inff_wowlan_config->four_way_handshake = true; + inff_wowlan_config->rfkill_release = false; + inff_wowlan_config->patterns = NULL; + inff_wowlan_config->n_patterns = 0; + inff_wowlan_config->tcp = NULL; + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_GTK)) + inff_wowlan_config->gtk_rekey_failure = true; + else + inff_wowlan_config->gtk_rekey_failure = false; + } else { + inff_err("Can not allocate memory for inff_wowlan_config\n"); + } + wiphy->wowlan_config = inff_wowlan_config; +#endif +} + +static int inff_setup_wiphy(struct wiphy *wiphy, struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + const struct ieee80211_iface_combination *combo; + u16 max_interfaces = 0; + bool gscan; + int err, i; + enum nl80211_band band; + + wiphy->max_scan_ssids = WL_NUM_SCAN_MAX; + wiphy->max_scan_ie_len = INFF_SCAN_IE_LEN_MAX; + wiphy->max_num_pmkids = INFF_MAXPMKID; + + err = inff_setup_ifmodes(wiphy, ifp); + if (err) + return err; + + for (i = 0, combo = wiphy->iface_combinations; + i < wiphy->n_iface_combinations; i++, combo++) { + max_interfaces = max(max_interfaces, combo->max_interfaces); + } + + for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses); + i++) { + u8 *addr = drvr->addresses[i].addr; + + memcpy(addr, drvr->mac, ETH_ALEN); + if (i) { + addr[0] |= BIT(1); + addr[ETH_ALEN - 1] ^= i; + } + } + wiphy->addresses = drvr->addresses; + wiphy->n_addresses = i; + + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + wiphy->cipher_suites = inff_cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(inff_cipher_suites); + if (!inff_feat_is_enabled(ifp, INFF_FEAT_MFP)) + wiphy->n_cipher_suites -= 4; + wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) | + BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) | + BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST); + + wiphy->bss_param_support = WIPHY_BSS_PARAM_AP_ISOLATE; + + wiphy->flags |= WIPHY_FLAG_NETNS_OK | + WIPHY_FLAG_PS_ON_BY_DEFAULT | + WIPHY_FLAG_HAVE_AP_SME | + WIPHY_FLAG_OFFCHAN_TX | + WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | + WIPHY_FLAG_SPLIT_SCAN_6GHZ; + if (inff_feat_is_enabled(ifp, INFF_FEAT_MLO)) + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_TDLS)) + wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + if (ifp->drvr->settings->roamoff == INFF_ROAMOFF_DISABLE) + wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM; + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) { + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK); + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X); + if (inff_feat_is_enabled(ifp, INFF_FEAT_OWE)) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_OWE_OFFLOAD); + if (inff_feat_is_enabled(ifp, INFF_FEAT_SAE)) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD); + } + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWAUTH)) { + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK); + if (inff_feat_is_enabled(ifp, INFF_FEAT_SAE)) + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_SAE_OFFLOAD_AP); + } + if (inff_feat_is_enabled(ifp, INFF_FEAT_SAE_EXT)) { + wiphy->features |= NL80211_FEATURE_SAE; + wiphy_ext_feature_set(wiphy, + NL80211_EXT_FEATURE_AP_PMKSA_CACHING); + } + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + + wiphy->mgmt_stypes = inff_txrx_stypes; + wiphy->max_remain_on_channel_duration = 5000; + if (inff_feat_is_enabled(ifp, INFF_FEAT_PNO)) { + gscan = inff_feat_is_enabled(ifp, INFF_FEAT_GSCAN); + inff_pno_wiphy_params(wiphy, gscan); + } + /* vendor commands/events support */ + wiphy->vendor_commands = inff_vendor_cmds; + wiphy->n_vendor_commands = get_inff_num_vndr_cmds(); + + inff_wiphy_wowl_params(wiphy, ifp); + + /* first entry in bandlist is number of bands */ + for (band = 0; band < NUM_NL80211_BANDS; band++) + wiphy->bands[band] = NULL; + err = inff_fill_band_with_default_chanlist(wiphy, ifp); + if (err) { + iphy_err(drvr, "could not retrore band and channels: err=%d\n", err); + return err; + } + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + + inff_wiphy_pmsr_params(wiphy, ifp); + + wiphy_read_of_freq_limits(wiphy); + return 0; +} + +static s32 inff_config_dongle(struct inff_cfg80211_info *cfg) +{ + struct inff_pub *drvr = cfg->pub; + struct net_device *ndev; + struct wireless_dev *wdev; + struct inff_if *ifp; + s32 power_mode; + s32 eap_restrict; + s32 err = 0; + u32 wowl_config = 0; + + if (cfg->dongle_up) + return err; + + ndev = cfg_to_ndev(cfg); + wdev = ndev->ieee80211_ptr; + ifp = netdev_priv(ndev); + + /* make sure RF is ready for work */ + inff_fil_cmd_int_set(ifp, INFF_C_UP, 0); + + inff_dongle_scantime(ifp); + + power_mode = cfg->pwr_save ? ifp->drvr->settings->default_pm : PM_OFF; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_PM, power_mode); + if (err) + goto default_conf_out; + inff_dbg(INFO, "power save set to %s\n", + (power_mode ? "enabled" : "disabled")); + + err = inff_dongle_roam(ifp); + if (err) + goto default_conf_out; + + eap_restrict = ifp->drvr->settings->eap_restrict; + if (eap_restrict) { + err = inff_fil_iovar_int_set(ifp, "eap_restrict", + eap_restrict); + if (err) + inff_info("eap_restrict error (%d)\n", err); + } + err = inff_cfg80211_change_iface(wdev->wiphy, ndev, wdev->iftype, + NULL); + if (err) + goto default_conf_out; + + /* Configure user based power profile for offloads. + * Default profile is LOW_PWR. + */ + if (wdev->iftype == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + inff_offload_config(ifp, inff_offload_feat, + inff_offload_prof, false); + + if (inff_feat_is_enabled(ifp, INFF_FEAT_ULP)) { + wowl_config = INFF_WOWL_DIS | INFF_WOWL_BCN; + err = inff_fil_iovar_int_set(ifp, "wowl", wowl_config); + if (err < 0) + inff_err("wowl_flags DIS,BCN not set"); + } + } else { + inff_offload_configure_arp_nd(ifp, true); + } + + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_FAKEFRAG, 1); + if (err) { + iphy_err(drvr, "failed to set frameburst mode\n"); + goto default_conf_out; + } + + cfg->dongle_up = true; +default_conf_out: + + return err; +} + +static s32 __inff_cfg80211_up(struct inff_if *ifp) +{ + set_bit(INFF_VIF_STATUS_READY, &ifp->vif->sme_state); + + return inff_config_dongle(ifp->drvr->config); +} + +static s32 __inff_cfg80211_down(struct inff_if *ifp) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + /* Disable all offloads started on inff_config_dongle before + * link is brought down. + */ + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + inff_offload_config(ifp, inff_offload_feat, + inff_offload_prof, true); + + /* + * While going down, if associated with AP disassociate + * from AP to save power + */ + if (check_vif_up(ifp->vif)) { + inff_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED, true); + + /* Make sure WPA_Supplicant receives all the event + * generated due to DISASSOC call to the fw to keep + * the state fw and WPA_Supplicant state consistent + */ + inff_delay(500); + cfg->dongle_up = false; + } + + inff_abort_scanning(cfg); + clear_bit(INFF_VIF_STATUS_READY, &ifp->vif->sme_state); + + return 0; +} + +s32 inff_cfg80211_up(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_info *cfg = ifp->drvr->config; + s32 err = 0; + + mutex_lock(&cfg->usr_sync); + err = __inff_cfg80211_up(ifp); + mutex_unlock(&cfg->usr_sync); + + return err; +} + +s32 inff_cfg80211_down(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_cfg80211_info *cfg = ifp->drvr->config; + s32 err = 0; + + mutex_lock(&cfg->usr_sync); + err = __inff_cfg80211_down(ifp); + mutex_unlock(&cfg->usr_sync); + + return err; +} + +bool inff_get_vif_state_any(struct inff_cfg80211_info *cfg, + unsigned long state) +{ + struct inff_cfg80211_vif *vif; + + list_for_each_entry(vif, &cfg->vif_list, list) { + if (test_bit(state, &vif->sme_state)) + return true; + } + return false; +} + +static inline bool vif_event_equals(struct inff_cfg80211_vif_event *event, + u8 action) +{ + u8 evt_action; + + spin_lock(&event->vif_event_lock); + evt_action = event->action; + spin_unlock(&event->vif_event_lock); + return evt_action == action; +} + +void inff_cfg80211_arm_vif_event(struct inff_cfg80211_info *cfg, + struct inff_cfg80211_vif *vif) +{ + struct inff_cfg80211_vif_event *event = &cfg->vif_event; + + spin_lock(&event->vif_event_lock); + event->vif = vif; + event->action = 0; + spin_unlock(&event->vif_event_lock); +} + +bool inff_cfg80211_vif_event_armed(struct inff_cfg80211_info *cfg) +{ + struct inff_cfg80211_vif_event *event = &cfg->vif_event; + bool armed; + + spin_lock(&event->vif_event_lock); + armed = (event->vif) ? true : false; + spin_unlock(&event->vif_event_lock); + + return armed; +} + +int inff_cfg80211_wait_vif_event(struct inff_cfg80211_info *cfg, + u8 action, ulong timeout) +{ + struct inff_cfg80211_vif_event *event = &cfg->vif_event; + + return wait_event_timeout(event->vif_wq, + vif_event_equals(event, action), timeout); +} + +static s32 inff_translate_country_code(struct inff_pub *drvr, char alpha2[2], + struct inff_fil_country_le *ccreq) +{ + if (alpha2[0] == ccreq->country_abbrev[0] && + alpha2[1] == ccreq->country_abbrev[1]) { + inff_dbg(INFO, "Country code already set\n"); + return -EAGAIN; + } + + inff_dbg(INFO, "For Country code, using ISO3166 code and 0 rev\n"); + memset(ccreq, 0, sizeof(*ccreq)); + ccreq->country_abbrev[0] = alpha2[0]; + ccreq->country_abbrev[1] = alpha2[1]; + ccreq->ccode[0] = alpha2[0]; + ccreq->ccode[1] = alpha2[1]; + + return 0; +} + +static void inff_cfg80211_reg_notifier(struct wiphy *wiphy, + struct regulatory_request *req) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = inff_get_ifp(cfg->pub, 0); + struct inff_pub *drvr = cfg->pub; + struct inff_fil_country_le ccreq; + s32 err; + int i; + + /* The country code gets set to "00" by default at boot, ignore */ + if (req->alpha2[0] == '0' && req->alpha2[1] == '0') + return; + + /* ignore non-ISO3166 country codes */ + for (i = 0; i < 2; i++) + if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { + iphy_err(drvr, "not an ISO3166 code (0x%02x 0x%02x)\n", + req->alpha2[0], req->alpha2[1]); + return; + } + + inff_dbg(INFO, "Enter: initiator=%d, alpha=%c%c\n", req->initiator, + req->alpha2[0], req->alpha2[1]); + + err = inff_fil_iovar_data_get(ifp, "country", &ccreq, sizeof(ccreq)); + if (err) { + iphy_err(drvr, "Country code iovar returned err = %d\n", err); + return; + } + + err = inff_translate_country_code(ifp->drvr, req->alpha2, &ccreq); + if (err) + return; + + /* Abort on-going scan before changing ccode */ + inff_abort_scanning(cfg); + + err = inff_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq)); + if (err) { + iphy_err(drvr, "Firmware rejected country setting\n"); + return; + } + inff_setup_wiphybands(cfg); +} + +static void inff_free_wiphy(struct wiphy *wiphy) +{ + int i; + + if (!wiphy) + return; + + if (wiphy->iface_combinations) { + for (i = 0; i < wiphy->n_iface_combinations; i++) + kfree(wiphy->iface_combinations[i].limits); + } + kfree(wiphy->iface_combinations); + if (wiphy->bands[NL80211_BAND_2GHZ]) { + kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels); + kfree(wiphy->bands[NL80211_BAND_2GHZ]); + } + if (wiphy->bands[NL80211_BAND_5GHZ]) { + kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels); + kfree(wiphy->bands[NL80211_BAND_5GHZ]); + } + if (wiphy->bands[NL80211_BAND_6GHZ]) { + kfree(wiphy->bands[NL80211_BAND_6GHZ]->channels); + kfree(wiphy->bands[NL80211_BAND_6GHZ]); + } + +#if IS_ENABLED(CONFIG_PM) + if (wiphy->wowlan != &inff_wowlan_support) + kfree(wiphy->wowlan); +#endif + kfree(wiphy->pmsr_capa); +} + +struct inff_cfg80211_info *inff_cfg80211_attach(struct inff_pub *drvr, + struct cfg80211_ops *ops, + bool p2pdev_forced) +{ + struct wiphy *wiphy = drvr->wiphy; + struct net_device *ndev = inff_get_ifp(drvr, 0)->ndev; + struct inff_cfg80211_info *cfg; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + s32 err = 0; + s32 io_type; + u16 *cap = NULL; + + if (!ndev) { + iphy_err(drvr, "ndev is invalid\n"); + return NULL; + } + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return NULL; + + cfg->wiphy = wiphy; + cfg->pub = drvr; + cfg->pm_state = INFF_CFG80211_PM_STATE_RESUMED; + cfg->num_softap = 0; + cfg->mchan_conf = INFF_MCHAN_CONF_DEFAULT; + init_vif_event(&cfg->vif_event); + INIT_LIST_HEAD(&cfg->vif_list); + + vif = inff_alloc_vif(cfg, NL80211_IFTYPE_STATION); + if (IS_ERR(vif)) + goto wiphy_out; + + ifp = netdev_priv(ndev); + vif->ifp = ifp; + vif->wdev.netdev = ndev; + ndev->ieee80211_ptr = &vif->wdev; + SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy)); + + err = wl_init_priv(cfg); + if (err) { + iphy_err(drvr, "Failed to init iwm_priv (%d)\n", err); + inff_free_vif(vif); + goto wiphy_out; + } + ifp->vif = vif; + + /* determine d11 io type before wiphy setup */ + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_VERSION, &io_type); + if (err) { + iphy_err(drvr, "Failed to get D11 version (%d)\n", err); + goto priv_out; + } + cfg->d11inf.io_type = (u8)io_type; + inff_d11_attach(&cfg->d11inf); + + /* regulatory notifier below needs access to cfg so + * assign it now. + */ + drvr->config = cfg; + + err = inff_setup_wiphy(wiphy, ifp); + if (err < 0) + goto priv_out; + + inff_dbg(INFO, "Registering custom regulatory\n"); + wiphy->reg_notifier = inff_cfg80211_reg_notifier; + wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; + wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE; + wiphy_apply_custom_regulatory(wiphy, &inff_regdom); + + /* firmware defaults to 40MHz disabled in 2G band. We signal + * cfg80211 here that we do and have it decide we can enable + * it. But first check if device does support 2G operation. + */ + if (wiphy->bands[NL80211_BAND_2GHZ]) { + cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap; + *cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } +#ifdef CONFIG_PM + if (inff_feat_is_enabled(ifp, INFF_FEAT_WOWL_GTK)) + ops->set_rekey_data = inff_cfg80211_set_rekey_data; +#endif + /* if the firmware has GTKO cap, + * user space can use NL80211_CMD_SET_REKEY_OFFLOAD command to pass gtk data. + */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_GTKO)) + ops->set_rekey_data = inff_cfg80211_set_rekey_data; + + if (inff_feat_is_enabled(ifp, INFF_FEAT_DUMP_OBSS)) + ops->dump_survey = inff_cfg80211_dump_survey; + else if (inff_feat_is_enabled(ifp, INFF_FEAT_SURVEY_DUMP)) + ops->dump_survey = inff_cfg80211_dump_survey_2; + else + ops->dump_survey = NULL; + + err = inff_setup_wiphybands(cfg); + if (err) { + iphy_err(drvr, "Setting wiphy bands failed (%d)\n", err); + goto wiphy_unreg_out; + } + + err = wiphy_register(wiphy); + if (err < 0) { + iphy_err(drvr, "Could not register wiphy device (%d)\n", err); + goto priv_out; + } + + /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(), + * setup 40MHz in 2GHz band and enable OBSS scanning. + */ + if (cap && (*cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)) { + err = inff_enable_bw40_2g(cfg); + if (!err) + err = inff_fil_iovar_int_set(ifp, "obss_coex", + INFF_OBSS_COEX_AUTO); + else + *cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + } + + err = inff_fweh_activate_events(ifp); + if (err) { + iphy_err(drvr, "FWEH activation failed (%d)\n", err); + goto wiphy_unreg_out; + } + + err = inff_p2p_attach(cfg, p2pdev_forced); + if (err) { + iphy_err(drvr, "P2P initialisation failed (%d)\n", err); + goto wiphy_unreg_out; + } + err = inff_btcoex_attach(cfg); + if (err) { + iphy_err(drvr, "BT-coex initialisation failed (%d)\n", err); + goto detach_p2p; + } + err = inff_pno_attach(cfg); + if (err) { + iphy_err(drvr, "PNO initialisation failed (%d)\n", err); + goto detach_btcoex; + } + + err = inff_pmsr_attach(cfg); + if (err) { + iphy_err(drvr, "PMSR initialisation failed (%d)\n", err); + goto detach_pno; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_TDLS)) { + err = inff_fil_iovar_int_set(ifp, "tdls_enable", 1); + if (err) { + inff_dbg(INFO, "TDLS not enabled (%d)\n", err); + wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS; + } else { + inff_fweh_register(cfg->pub, INFF_E_TDLS_PEER_EVENT, + inff_notify_tdls_peer_event); + } + } + + err = inff_vndr_cmdstr_hashtbl_init(); + if (err) { + iphy_err(drvr, "vendor_cmd_hashtable initialisation failed (%d)\n", err); + goto detach_pmsr; + } + /* (re-) activate FWEH event handling */ + err = inff_fweh_activate_events(ifp); + if (err) { + iphy_err(drvr, "FWEH activation failed (%d)\n", err); + goto detach; + } + + /* Fill in some of the advertised nl80211 supported features */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_SCAN_RANDOM_MAC)) { + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR; +#ifdef CONFIG_PM + if (wiphy->wowlan && + wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT) + wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; +#endif + } + return cfg; + +detach: + inff_vndr_cmdstr_hashtbl_deinit(); +detach_pmsr: + inff_pmsr_detach(cfg); +detach_pno: + inff_pno_detach(cfg); +detach_btcoex: + inff_btcoex_detach(cfg); +detach_p2p: + inff_p2p_detach(&cfg->p2p); +wiphy_unreg_out: + wiphy_unregister(cfg->wiphy); +priv_out: + wl_deinit_priv(cfg); + inff_free_vif(vif); + ifp->vif = NULL; +wiphy_out: + inff_free_wiphy(wiphy); + kfree(cfg); + return NULL; +} + +void inff_cfg80211_detach(struct inff_cfg80211_info *cfg) +{ + if (!cfg) + return; + + inff_vndr_cmdstr_hashtbl_deinit(); + inff_pmsr_detach(cfg); + inff_pno_detach(cfg); + inff_btcoex_detach(cfg); + wiphy_unregister(cfg->wiphy); + wl_deinit_priv(cfg); + cancel_work_sync(&cfg->escan_timeout_work); + inff_free_wiphy(cfg->wiphy); + kfree(cfg); +} diff --git a/drivers/net/wireless/infineon/inffmac/cfg80211.h b/drivers/net/wireless/infineon/inffmac/cfg80211.h new file mode 100644 index 000000000000..549668b5d9bd --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/cfg80211.h @@ -0,0 +1,604 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_CFG80211_H +#define INFF_CFG80211_H + +#include "core.h" +#include "fwil_types.h" +#include "p2p.h" +#include "pno.h" +#include "scan.h" +#include "interface.h" +#include "ie.h" +#include "security.h" +#include "chanspec.h" + +/* Max length of Interworking element */ +#define INFF_IW_IES_MAX_BUF_LEN 8 + +#define INFF_SCAN_IE_LEN_MAX 2048 + +#define WL_NUM_SCAN_MAX 10 +#define WL_TLV_INFO_MAX 1024 +#define WL_BSS_INFO_MAX 2048 +#define WL_ASSOC_INFO_MAX 512 /* assoc related fil max buf */ +#define WL_EXTRA_BUF_MAX 2048 +#define WL_ROAM_TRIGGER_LEVEL -75 +#define WL_ROAM_DELTA 20 + +/* WME Access Category Indices (ACIs) */ +#define AC_BE 0 /* Best Effort */ +#define AC_BK 1 /* Background */ +#define AC_VI 2 /* Video */ +#define AC_VO 3 /* Voice */ + +#define EDCF_AC_COUNT 4 +#define MAX_8021D_PRIO 8 + +#define EDCF_ACI_MASK 0x60 +#define EDCF_ACI_SHIFT 5 +#define EDCF_ACM_MASK 0x10 +#define EDCF_ECWMIN_MASK 0x0f +#define EDCF_ECWMAX_SHIFT 4 +#define EDCF_AIFSN_MASK 0x0f +#define EDCF_AIFSN_MAX 15 +#define EDCF_ECWMAX_MASK 0xf0 + +/* Keep INFF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be + * problematic on some systems and should be avoided. + */ +#define INFF_ESCAN_BUF_SIZE 65000 +#define INFF_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */ + +#define WL_ESCAN_ACTION_START 1 +#define WL_ESCAN_ACTION_CONTINUE 2 +#define WL_ESCAN_ACTION_ABORT 3 + +#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ +#define IE_MAX_LEN 512 + +/* IE TLV processing */ +#define TLV_LEN_OFF 1 /* length offset */ +#define TLV_HDR_LEN 2 /* header length */ +#define TLV_BODY_OFF 2 /* body offset */ +#define TLV_OUI_LEN 3 /* oui id length */ + +/* 802.11 Mgmt Packet flags */ +#define INFF_VNDR_IE_BEACON_FLAG 0x1 +#define INFF_VNDR_IE_PRBRSP_FLAG 0x2 +#define INFF_VNDR_IE_ASSOCRSP_FLAG 0x4 +#define INFF_VNDR_IE_AUTHRSP_FLAG 0x8 +#define INFF_VNDR_IE_PRBREQ_FLAG 0x10 +#define INFF_VNDR_IE_ASSOCREQ_FLAG 0x20 +/* vendor IE in IW advertisement protocol ID field */ +#define INFF_VNDR_IE_IWAPID_FLAG 0x40 +/* allow custom IE id */ +#define INFF_VNDR_IE_CUSTOM_FLAG 0x100 + +/* P2P Action Frames flags (spec ordered) */ +#define INFF_VNDR_IE_GONREQ_FLAG 0x001000 +#define INFF_VNDR_IE_GONRSP_FLAG 0x002000 +#define INFF_VNDR_IE_GONCFM_FLAG 0x004000 +#define INFF_VNDR_IE_INVREQ_FLAG 0x008000 +#define INFF_VNDR_IE_INVRSP_FLAG 0x010000 +#define INFF_VNDR_IE_DISREQ_FLAG 0x020000 +#define INFF_VNDR_IE_DISRSP_FLAG 0x040000 +#define INFF_VNDR_IE_PRDREQ_FLAG 0x080000 +#define INFF_VNDR_IE_PRDRSP_FLAG 0x100000 + +#define INFF_VNDR_IE_P2PAF_SHIFT 12 + +#define INFF_MAX_DEFAULT_KEYS 6 + +/* beacon loss timeout defaults */ +#define INFF_DEFAULT_BCN_TIMEOUT_ROAM_ON 2 +#define INFF_DEFAULT_BCN_TIMEOUT_ROAM_OFF 4 + +#define INFF_VIF_EVENT_TIMEOUT msecs_to_jiffies(1500) + +#define INFF_PM_WAIT_MAXRETRY 100 + +/* cfg80211 wowlan definitions */ +#define WL_WOWLAN_MAX_PATTERNS 8 +#define WL_WOWLAN_MIN_PATTERN_LEN 1 +#define WL_WOWLAN_MAX_PATTERN_LEN 255 +#define WL_WOWLAN_PKT_FILTER_ID_FIRST 201 +#define WL_WOWLAN_PKT_FILTER_ID_LAST (WL_WOWLAN_PKT_FILTER_ID_FIRST + \ + WL_WOWLAN_MAX_PATTERNS - 1) + +#define WL_RSPEC_ENCODE_HE 0x03000000 /* HE MCS and Nss is stored in RSPEC_RATE_MASK */ +#define WL_RSPEC_HE_NSS_UNSPECIFIED 0xF +#define WL_RSPEC_HE_NSS_SHIFT 4 /* HE Nss value shift */ +#define WL_RSPEC_HE_GI_MASK 0x00000C00 /* HE GI indices */ +#define WL_RSPEC_HE_GI_SHIFT 10 +#define HE_GI_TO_RSPEC(gi) (((gi) << WL_RSPEC_HE_GI_SHIFT) & WL_RSPEC_HE_GI_MASK) + +/** + * enum inff_scan_status - scan engine status + * + * @INFF_SCAN_STATUS_BUSY: scanning in progress on dongle. + * @INFF_SCAN_STATUS_ABORT: scan being aborted on dongle. + * @INFF_SCAN_STATUS_SUPPRESS: scanning is suppressed in driver. + */ +enum inff_scan_status { + INFF_SCAN_STATUS_BUSY, + INFF_SCAN_STATUS_ABORT, + INFF_SCAN_STATUS_SUPPRESS, +}; + +/* dongle configuration */ +struct inff_cfg80211_conf { + u32 frag_threshold; + u32 rts_threshold; + u32 retry_short; + u32 retry_long; +}; + +/* security information with currently associated ap */ +struct inff_cfg80211_security { + u32 wpa_versions; + u32 auth_type; + u32 cipher_pairwise; + u32 cipher_group; +}; + +enum inff_profile_fwsup { + INFF_PROFILE_FWSUP_NONE, + INFF_PROFILE_FWSUP_PSK, + INFF_PROFILE_FWSUP_1X, + INFF_PROFILE_FWSUP_SAE, + INFF_PROFILE_FWSUP_ROAM +}; + +/** + * enum inff_profile_fwauth - firmware authenticator profile + * + * @INFF_PROFILE_FWAUTH_NONE: no firmware authenticator + * @INFF_PROFILE_FWAUTH_PSK: authenticator for WPA/WPA2-PSK + * @INFF_PROFILE_FWAUTH_SAE: authenticator for SAE + */ +enum inff_profile_fwauth { + INFF_PROFILE_FWAUTH_NONE, + INFF_PROFILE_FWAUTH_PSK, + INFF_PROFILE_FWAUTH_SAE +}; + +/** + * struct inff_cfg80211_profile - profile information. + * + * @bssid: bssid of joined/joining ibss. + * @sec: security information. + * @key: key information + */ +struct inff_cfg80211_profile { + u8 bssid[ETH_ALEN]; + struct inff_cfg80211_security sec; + struct inff_wsec_key key[INFF_MAX_DEFAULT_KEYS]; + enum inff_profile_fwsup use_fwsup; + u16 use_fwauth; + bool is_ft; + bool is_okc; +}; + +/** + * enum inff_vif_status - bit indices for vif status. + * + * @INFF_VIF_STATUS_READY: ready for operation. + * @INFF_VIF_STATUS_CONNECTING: connect/join in progress. + * @INFF_VIF_STATUS_CONNECTED: connected/joined successfully. + * @INFF_VIF_STATUS_DISCONNECTING: disconnect/disable in progress. + * @INFF_VIF_STATUS_AP_CREATED: AP operation started. + * @INFF_VIF_STATUS_EAP_SUCCUSS: EAPOL handshake successful. + * @INFF_VIF_STATUS_ASSOC_SUCCESS: successful SET_SSID received. + */ +enum inff_vif_status { + INFF_VIF_STATUS_READY, + INFF_VIF_STATUS_CONNECTING, + INFF_VIF_STATUS_CONNECTED, + INFF_VIF_STATUS_DISCONNECTING, + INFF_VIF_STATUS_AP_CREATED, + INFF_VIF_STATUS_EAP_SUCCESS, + INFF_VIF_STATUS_ASSOC_SUCCESS, +}; + +enum inff_cfg80211_pm_state { + INFF_CFG80211_PM_STATE_RESUMED, + INFF_CFG80211_PM_STATE_RESUMING, + INFF_CFG80211_PM_STATE_SUSPENDED, + INFF_CFG80211_PM_STATE_SUSPENDING, +}; + +/** + * enum inff_mgmt_tx_status - mgmt frame tx status + * + * @INFF_MGMT_TX_ACK: mgmt frame acked + * @INFF_MGMT_TX_NOACK: mgmt frame not acked + * @INFF_MGMT_TX_OFF_CHAN_COMPLETED: off-channel complete + * @INFF_MGMT_TX_SEND_FRAME: mgmt frame tx is in progress + */ +enum inff_mgmt_tx_status { + INFF_MGMT_TX_ACK, + INFF_MGMT_TX_NOACK, + INFF_MGMT_TX_OFF_CHAN_COMPLETED, + INFF_MGMT_TX_SEND_FRAME +}; + +/** + * struct vif_saved_ie - holds saved IEs for a virtual interface. + * + * @probe_req_ie: IE info for probe request. + * @probe_res_ie: IE info for probe response. + * @beacon_ie: IE info for beacon frame. + * @assoc_res_ie: IE info for association response frame. + * @probe_req_ie_len: IE info length for probe request. + * @probe_res_ie_len: IE info length for probe response. + * @beacon_ie_len: IE info length for beacon frame. + * @assoc_res_ie_len: IE info length for association response frame. + */ +struct vif_saved_ie { + u8 probe_req_ie[IE_MAX_LEN]; + u8 probe_res_ie[IE_MAX_LEN]; + u8 beacon_ie[IE_MAX_LEN]; + u8 assoc_req_ie[IE_MAX_LEN]; + u8 assoc_res_ie[IE_MAX_LEN]; + u32 probe_req_ie_len; + u32 probe_res_ie_len; + u32 beacon_ie_len; + u32 assoc_req_ie_len; + u32 assoc_res_ie_len; +}; + +/** + * struct inff_cfg80211_vif - virtual interface specific information. + * + * @ifp: lower layer interface pointer + * @wdev: wireless device. + * @profile: profile information. + * @sme_state: SME state using enum inff_vif_status bits. + * @list: linked list. + * @mgmt_rx_reg: registered rx mgmt frame types. + * @cqm_rssi_low: Lower RSSI limit for CQM monitoring + * @cqm_rssi_high: Upper RSSI limit for CQM monitoring + * @cqm_rssi_last: Last RSSI reading for CQM monitoring + */ +struct inff_cfg80211_vif { + struct inff_if *ifp; + struct wireless_dev wdev; + struct inff_cfg80211_profile profile; + unsigned long sme_state; + struct vif_saved_ie saved_ie; + struct list_head list; + struct completion mgmt_tx; + unsigned long mgmt_tx_status; + u32 mgmt_tx_id; + u16 mgmt_rx_reg; + int is_11d; + s32 cqm_rssi_low; + s32 cqm_rssi_high; + s32 cqm_rssi_last; +}; + +/* association inform */ +struct inff_cfg80211_connect_info { + u8 *req_ie; + s32 req_ie_len; + u8 *resp_ie; + s32 resp_ie_len; +}; + +/* assoc ie length */ +struct inff_cfg80211_assoc_ielen_le { + __le32 req_len; + __le32 resp_len; + __le32 flags; +}; + +struct inff_cfg80211_edcf_acparam { + u8 ACI; + u8 ECW; + u16 TXOP; /* stored in network order (ls octet first) */ +}; + +/* dongle escan state */ +enum wl_escan_state { + WL_ESCAN_STATE_IDLE, + WL_ESCAN_STATE_SCANNING +}; + +struct cqm_rssi_info { + bool enable; + s32 rssi_threshold; +}; + +/** + * struct inff_cfg80211_vif_event - virtual interface event information. + * + * @vif_wq: waitqueue awaiting interface event from firmware. + * @vif_event_lock: protects other members in this structure. + * @vif_complete: completion for net attach. + * @action: either add, change, or delete. + * @vif: virtual interface object related to the event. + */ +struct inff_cfg80211_vif_event { + wait_queue_head_t vif_wq; + spinlock_t vif_event_lock; /* protects other members in this structure */ + u8 action; + struct inff_cfg80211_vif *vif; +}; + +/** + * struct inff_cfg80211_wowl - wowl related information. + * + * @active: set on suspend, cleared on resume. + * @pre_pmmode: firmware PM mode at entering suspend. + * @nd: net dectect data. + * @nd_info: helper struct to pass to cfg80211. + * @nd_data_wait: wait queue to sync net detect data. + * @nd_data_completed: completion for net detect data. + * @nd_enabled: net detect enabled. + */ +struct inff_cfg80211_wowl { + bool active; + u32 pre_pmmode; + struct cfg80211_wowlan_nd_match *nd; + struct cfg80211_wowlan_nd_info *nd_info; + wait_queue_head_t nd_data_wait; + bool nd_data_completed; + bool nd_enabled; +}; + +struct network_blob { + char ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + int key_mgmt; + char psk[WSEC_MAX_PASSWORD_LEN]; + char sae_password[WSEC_MAX_PASSWORD_LEN]; + u8 proto; + u8 pairwise_cipher; + u8 frequency; +}; + +struct drv_config_pfn_params { + u8 pfn_config; + u8 count; + struct network_blob *network_blob_data; +}; + +/** + * struct inff_cfg80211_info - dongle private data of cfg80211 interface + * + * @wiphy: wiphy object for cfg80211 interface. + * @ops: pointer to copy of ops as registered with wiphy object. + * @conf: dongle configuration. + * @p2p: peer-to-peer specific information. + * @btcoex: Bluetooth coexistence information. + * @scan_request: cfg80211 scan request object. + * @usr_sync: mainly for dongle up/down synchronization. + * @bss_list: bss_list holding scanned ap information. + * @bss_info: bss information for cfg80211 layer. + * @conn_info: association info. + * @pmk_list: wpa2 pmk list. + * @scan_status: scan activity on the dongle. + * @pub: common driver information. + * @channel: current channel. + * @int_escan_map: bucket map for which internal e-scan is done. + * @ibss_starter: indicates this sta is ibss starter. + * @pwr_save: indicate whether dongle to support power save mode. + * @dongle_up: indicate whether dongle up or not. + * @roam_on: on/off switch for dongle self-roaming. + * @scan_tried: indicates if first scan attempted. + * @dcmd_buf: dcmd buffer. + * @extra_buf: mainly to grab assoc information. + * @debugfsdir: debugfs folder for this device. + * @escan_info: escan information. + * @escan_timeout: Timer for catch scan timeout. + * @escan_timeout_work: scan timeout worker. + * @vif_list: linked list of vif instances. + * @vif_cnt: number of vif instances. + * @vif_event: vif event signalling. + * @wowl: wowl related information. + * @pno: information of pno module. + * @wlan_sense: WLAN Sensing specific information. + */ +struct inff_cfg80211_info { + struct wiphy *wiphy; + struct inff_cfg80211_conf *conf; + struct inff_p2p_info p2p; + struct inff_btcoex_info *btcoex; + struct cfg80211_scan_request *scan_request; + struct mutex usr_sync; /* mainly for dongle up/down synchronization */ + struct wl_cfg80211_bss_info *bss_info; + struct inff_cfg80211_connect_info conn_info; + struct inff_pmk_list_le pmk_list; + unsigned long scan_status; + struct inff_pub *pub; + u32 channel; + u32 int_escan_map; + bool ibss_starter; + bool pwr_save; + bool dongle_up; + bool scan_tried; + u8 *dcmd_buf; + u8 *extra_buf; + struct dentry *debugfsdir; + struct escan_info escan_info; + struct timer_list escan_timeout; + struct work_struct escan_timeout_work; + struct cqm_rssi_info cqm_info; + struct list_head vif_list; + struct inff_cfg80211_vif_event vif_event; + struct completion vif_disabled; + struct inff_d11inf d11inf; + struct inff_assoclist_le assoclist; + struct inff_cfg80211_wowl wowl; + struct inff_pno_info *pno; + u8 ac_priority[MAX_8021D_PRIO]; + u8 pm_state; + u8 num_softap; + u8 pfn_enable; + u8 pfn_connection; + struct drv_config_pfn_params pfn_data; + struct inff_pmsr_info *pmsr_info; + u8 mchan_conf; +}; + +static inline enum nl80211_band inff_d11_chan_band_to_nl80211(u8 band) +{ + if (band == INFF_CHAN_BAND_2G) + return NL80211_BAND_2GHZ; + else if (band == INFF_CHAN_BAND_5G) + return NL80211_BAND_5GHZ; + else + return NL80211_BAND_6GHZ; +} + +static inline struct wiphy *cfg_to_wiphy(struct inff_cfg80211_info *cfg) +{ + return cfg->wiphy; +} + +static inline struct inff_cfg80211_info *wiphy_to_cfg(struct wiphy *w) +{ + struct inff_pub *drvr = wiphy_priv(w); + + return drvr->config; +} + +static inline struct inff_cfg80211_info *wdev_to_cfg(struct wireless_dev *wd) +{ + return wiphy_to_cfg(wd->wiphy); +} + +static inline struct inff_cfg80211_vif *wdev_to_vif(struct wireless_dev *wdev) +{ + return container_of(wdev, struct inff_cfg80211_vif, wdev); +} + +static inline +struct net_device *cfg_to_ndev(struct inff_cfg80211_info *cfg) +{ + return inff_get_ifp(cfg->pub, 0)->ndev; +} + +static inline struct inff_cfg80211_info *ndev_to_cfg(struct net_device *ndev) +{ + return wdev_to_cfg(ndev->ieee80211_ptr); +} + +static inline struct inff_cfg80211_profile *ndev_to_prof(struct net_device *nd) +{ + struct inff_if *ifp = netdev_priv(nd); + + return &ifp->vif->profile; +} + +static inline struct inff_cfg80211_vif *ndev_to_vif(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + + return ifp->vif; +} + +static inline struct +inff_cfg80211_connect_info *cfg_to_conn(struct inff_cfg80211_info *cfg) +{ + return &cfg->conn_info; +} + +static inline void inff_init_prof(struct inff_cfg80211_profile *prof) +{ + memset(prof, 0, sizeof(*prof)); +} + +u16 chandef_to_chanspec(struct inff_d11inf *d11inf, struct cfg80211_chan_def *ch); +u8 nl80211_band_to_fwil(enum nl80211_band band); + +struct inff_cfg80211_info *inff_cfg80211_attach(struct inff_pub *drvr, + struct cfg80211_ops *ops, + bool p2pdev_forced); +void inff_cfg80211_detach(struct inff_cfg80211_info *cfg); +s32 inff_cfg80211_up(struct net_device *ndev); +s32 inff_cfg80211_down(struct net_device *ndev); +struct cfg80211_ops *inff_cfg80211_get_ops(struct inff_mp_device *settings); + +u16 channel_to_chanspec(struct inff_d11inf *d11inf, + struct ieee80211_channel *ch); +bool inff_get_vif_state_any(struct inff_cfg80211_info *cfg, + unsigned long state); +void inff_cfg80211_arm_vif_event(struct inff_cfg80211_info *cfg, + struct inff_cfg80211_vif *vif); +bool inff_cfg80211_vif_event_armed(struct inff_cfg80211_info *cfg); +int inff_cfg80211_wait_vif_event(struct inff_cfg80211_info *cfg, + u8 action, ulong timeout); +void inff_set_mpc(struct inff_if *ndev, int mpc); +void inff_cfg80211_free_netdev(struct net_device *ndev); +void inff_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev); +int inff_vndr_cmdstr_hashtbl_init(void); +void inff_vndr_cmdstr_hashtbl_deinit(void); +s32 +inff_set_channel(struct inff_cfg80211_info *cfg, struct ieee80211_channel *chan); +int inff_cfg80211_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id, + struct cfg80211_chan_def *chandef); + +s32 +inff_compare_update_same_bss(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bss, + struct inff_bss_info_le *bss_info_le); + +s32 inff_inform_bss(struct inff_cfg80211_info *cfg); + +struct inff_pno_net_info_le * +inff_get_netinfo_array(struct inff_pno_scanresults_le *pfn_v1); + +s32 inff_mchan_config(struct inff_cfg80211_info *cfg); + +bool inff_is_linkup(struct inff_cfg80211_vif *vif, + const struct inff_event_msg *e); + +s32 inff_inform_ibss(struct inff_cfg80211_info *cfg, + struct net_device *ndev, const u8 *bssid); + +s32 +inff_bss_roaming_done(struct inff_cfg80211_info *cfg, + struct net_device *ndev, + const struct inff_event_msg *e); + +s32 +inff_bss_connect_done(struct inff_cfg80211_info *cfg, + struct net_device *ndev, const struct inff_event_msg *e, + bool completed); + +bool inff_is_linkdown(struct inff_cfg80211_vif *vif, + const struct inff_event_msg *e); + +void inff_link_down(struct inff_cfg80211_vif *vif, u16 reason, + bool locally_generated); + +bool inff_is_nonetwork(struct inff_cfg80211_info *cfg, + const struct inff_event_msg *e); + +s32 inff_inform_single_bss(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bi); + +void inff_wifi_prioritize_acparams(const + struct inff_cfg80211_edcf_acparam *acp, u8 *priority); + +static __always_inline void inff_delay(u32 ms) +{ + if (ms < 1000 / HZ) { + cond_resched(); + mdelay(ms); + } else { + msleep(ms); + } +} + +#endif /* INFF_CFG80211_H */ -- 2.25.1 Driver implementation of network stack related operations and other core functionalities of the driver. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/core.c | 2326 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/core.h | 355 +++ 2 files changed, 2681 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/core.c create mode 100644 drivers/net/wireless/infineon/inffmac/core.h diff --git a/drivers/net/wireless/infineon/inffmac/core.c b/drivers/net/wireless/infineon/inffmac/core.c new file mode 100644 index 000000000000..0ddb16072a9d --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/core.c @@ -0,0 +1,2326 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "chanspec.h" +#include "core.h" +#include "bus.h" +#include "debug.h" +#include "fwil_types.h" +#include "p2p.h" +#include "pno.h" +#include "cfg80211.h" +#include "fwil.h" +#include "feature.h" +#include "proto.h" +#include "pcie.h" +#include "common.h" +#include "twt.h" +#include "bt_shared_sdio.h" +#include "sdio.h" +#include "offload.h" +#include "pmsr.h" +#include "xdp.h" +#include "logger.h" +#include "dfu.h" + +#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950) + +#define INFF_BSSIDX_INVALID -1 + +#define RXS_PBPRES BIT(2) + +#define D11_PHY_HDR_LEN 6 + +#define WL_CNT_XTLV_SLICE_IDX 256 + +#define IOVAR_XTLV_BEGIN 4 + +#define XTLV_TYPE_SIZE 2 + +#define XTLV_TYPE_LEN_SIZE 4 + +#define WL_CNT_IOV_BUF 2048 + +#define CNT_VER_6 6 +#define CNT_VER_10 10 +#define CNT_VER_30 30 + +/* Macro to calculate packing factor with scalar 4 in a xTLV */ +#define PACKING_FACTOR(args) ({ \ + typeof(args) _args = (args); \ + ((_args) % 4 == 0 ? 0 : (4 - ((_args) % 4))); \ + }) + +struct d11rxhdr_le { + __le16 rx_frame_size; + u16 PAD; + __le16 phy_rx_status_0; + __le16 phy_rx_status_1; + __le16 phy_rx_status_2; + __le16 phy_rx_status_3; + __le16 phy_rx_status_4; + __le16 phy_rx_status_5; + __le16 rx_status_1; + __le16 rx_status_2; + __le16 rx_tsf_time; + __le16 rx_chan; + u8 unknown[12]; +} __packed; + +struct wlc_d11rxhdr { + struct d11rxhdr_le rxhdr; + __le32 tsf_l; + s8 rssi; + s8 rxpwr0; + s8 rxpwr1; + s8 do_rssi_ma; + s8 rxpwr[4]; +} __packed; + +static const char fmac_ethtool_string_stats_v6[][ETH_GSTRING_LEN] = { + "txbyte", "txerror", "txprshort", "txnobuf", "txrunt", "txcmiss", "txphyerr", + "rxframe", "rxerror", "rxnobuf", "rxbadds", "rxfragerr", + "rxgiant", "rxbadproto", "rxbadda", "rxoflo", "d11cnt_rxcrc_off", "dmade", + "dmape", "tbtt", "pkt_callback_reg_fail", "txackfrm", "txbcnfrm", "rxtoolate", + "txtplunfl", "rxinvmachdr", "rxbadplcp", "rxstrt", "rxmfrmucastmbss", + "rxrtsucast", "rxackucast", "rxmfrmocast", "rxrtsocast", "rxdfrmmcast", + "rxcfrmmcast", "rxdfrmucastobss", "rxrsptmout", "rxf0ovfl", "rxf2ovfl", "pmqovfl", + "frmscons", "rxback", "txfrag", "txfail", "txretrie", "txrts", "txnoack", "rxmulti", + + "txfrmsnt", "tkipmicfaill", "tkipreplay", "ccmpreplay", "fourwayfail", "wepicverr", + "tkipicverr", "tkipmicfaill_mcst", "tkipreplay_mcst", "ccmpreplay_mcst", + "fourwayfail_mcst", "wepicverr_mcst", "tkipicverr_mcst", "txexptime", "phywatchdog", + "prq_undirected_entries", "atim_suppress_count", "bcn_template_not_ready_done", + + "rx1mbps", "rx5mbps5", "rx9mbps", "rx12mbps", "rx24mbps", "rx48mbps", + "rx108mbps", "rx216mbps", "rx324mbps", "rx432mbps", "rx540mbps", + "pktengrxdmcast", "bphy_txmpdu_sgi", "txmpdu_stbc", "rxdrop20s", +}; + +static const char fmac_ethtool_string_stats_v10[][ETH_GSTRING_LEN] = { + "txframe", "txbyte", "txretrans", "txerror", "txctl", "txprshort", + "txserr", "txnobuf", "txnoassoc", "txrunt", + "txchit", "txcmiss", "txphyerr", "txphycrs", "rxframe", "rxbyte", + "rxerror", "rxctl", "rxnobuf", "rxnondata", + "rxbadds", "rxbadcm", "rxfragerr", "rxrunt", "rxgiant", "rxnoscb", + "rxbadprot", "rxbadsrcma", "rxbadda", "rxfilter", + "rxoflo", "rxuflo[0]", "rxuflo[1]", "rxuflo[2]", "rxuflo[3]", + "rxuflo[4]", "rxuflo[5]", "d11cnt_rxcrc_off", "d11cnt_txnocts_off", + "dmade", "dmada", "dmape", "reset", "tbtt", "txdmawar", + "pkt_callback_reg_fail", "txallfrm", "txrtsfrm", "txctsfrm", + "txackfrm", "txdnlfrm", "txbcnfrm", "txfunfl[0]", "txfunfl[1]", + "txfunfl[2]", "txfunfl[3]", "txfunfl[4]", "txfunfl[5]", "rxtoolate", + "txfbw", "txtplunfl", "txphyerror", "rxfrmtoolong", "rxfrmtooshrt", + "rxinvmachdr", "rxbadfcs", "rxbadplcp", "rxcrsglitch", + "rxstrt", "rxdfrmucastmbss", "rxmfrmucastmbss", "rxcfrmucast", + "rxrtsucast", "rxctsucast", "rxackucast", + "rxdfrmocast", "rxmfrmocast", "rxcfrmocast", "rxrtsocast", + "rxctsocast", "rxdfrmmcast", "rxmfrmmcast", "rxcfrmmcast", + "rxbeaconmbss", "rxdfrmucastobss", "rxbeaconobss", "rxrsptmout", + "bcntxcancl", "rxf0ovfl", "rxf1ovfl", "rxf2ovfl", "txsfovfl", + "pmqovfl", "rxcgprqfrm", "rxcgprsqovfl", "txcgprsfail", "txcgprssuc", "prs_timeout", + "rxnack", "frmscons", "txnack", "rxback", "txback", "txfrag", "txmulti", "txfail", + "txretry", "txretrie", "rxdup", "txrts", "txnocts", "txnoack", "rxfrag", "rxmulti", + "rxcrc", "txfrmsnt", "rxundec", "tkipmicfaill", "tkipcntrmsr", "tkipreplay", "ccmpfmterr", + "ccmpreplay", "ccmpundec", "fourwayfail", "wepundec", "wepicverr", "decsuccess", + "tkipicverr", "wepexcluded", "psmwds", "phywatchdog", "prq_entries_handled", + "prq_undirected_entries", "prq_bad_entries", "atim_suppress_count", + "bcn_template_not_ready", "bcn_template_not_ready_done", "late_tbtt_dpc", + "rx1mbps", "rx2mbps", "rx5mbps5", "rx6mbps", "rx9mbps", "rx11mbps", "rx12mbps", "rx18mbps", + "rx24mbps", "rx36mbps", "rx48mbps", "rx54mbps", "rx108mbps", "rx162mbps", + "rx216mbps", "rx270mbps", "rx324mbps", "rx378mbps", "rx432mbps", "rx486mbps", "rx540mbps", + "pktengrxducast", "pktengrxdmcast", "bphy_rxcrsglitch", "bphy_b", "txexptime", + "rxmpdu_sgi", "txmpdu_stbc", "rxmpdu_stbc", "tkipmicfaill_mcst", "tkipcntrmsr_mcst", + "tkipreplay_mcst", "ccmpfmterr_mcst", "ccmpreplay_mcst", "ccmpundec_mcst", + "fourwayfail_mcst", "wepundec_mcst", "wepicverr_mcst", "decsuccess_mcst", + "tkipicverr_mcst", "wepexcluded_mcst", "reinit", "pstatxnoassoc", + "pstarxucast", "pstarxbcmc", "pstatxbcmc", "cso_normal", "chained", + "chainedsz1", "unchained", "maxchainsz", "currchainsz", "rxdrop20s", + "pciereset", "cfgrestore", "reinitreason[0]", "reinitreason[1]", + "reinitreason[2]", "reinitreason[3]", "reinitreason[4]", + "reinitreason[5]", "reinitreason[6]", "reinitreason[7]", "rxrtry", +}; + +static const char fmac_ethtool_string_stats_v30[][ETH_GSTRING_LEN] = { + "txframe", "txbyte", "txretrans", "txerror", "txctl", "txprshort", "txserr", "txnobuf", + "txnoassoc", "txrunt", "txchit", "txcmiss", "txuflo", "txphyerr", "txphycrs", + "rxframe", "rxbyte", "rxerror", "rxctl", "rxnobuf", "rxnondata", "rxbadds", "rxbadcm", + "rxfragerr", "rxrunt", "rxgiant", "rxnoscb", "rxbadproto", "rxbadsrcmac", + "rxbadda", "rxfilter", "rxoflo", "rxuflo[0]", "rxuflo[1]", + "rxuflo[2]", "rxuflo[3]", "rxuflo[4]", "rxuflo[5]", + + "d11cnt_txrts_off", "d11cnt_rxcrc_off", "d11cnt_txnocts_off", "dmade", "dmada", + "dmape", "reset", "tbtt", "txdmawar", "pkt_callback_reg_fail", + "txfrag", "txmulti", "txfail", "txretry", + "txretrie", "rxdup", "txrts", "txnocts", "txnoack", "rxfrag", + "rxmulti", "rxcrc", "txfrmsnt", "rxundec", + "tkipmicfaill", "tkipcntrmsr", "tkipreplay", "ccmpfmterr", + "ccmpreplay", "ccmpundec", "fourwayfail", "wepundec", + "wepicverr", "decsuccess", "tkipicverr", "wepexcluded", + "txchanrej", "psmwds", "phywatchdog", + "prq_entries_handled", "prq_undirected_entries", "prq_bad_entries", + "atim_suppress_count", "bcn_template_not_ready", "bcn_template_not_ready_done", + "late_tbtt_dpc", + + "rx1mbps", "rx2mbps", "rx5mbps5", "rx6mbps", "rx9mbps", + "rx11mbps", "rx12mbps", "rx18mbps", "rx24mbps", "rx36mbps", + "rx48mbps", "rx54mbps", "rx108mbps", "rx162mbps", "rx216mbps", + "rx270mbps", "rx324mbps", "rx378mbps", "rx432mbps", "rx486mbps", + "rx540mbps", "rfdisable", "txexptime", "txmpdu_sgi", "rxmpdu_sgi", + "txmpdu_stbc", "rxmpdu_stbc", "rxundec_mcst", + + "tkipmicfaill_mcst", "tkipcntrmsr_mcst", "tkipreplay_mcst", + "ccmpfmterr_mcst", "ccmpreplay_mcst", "ccmpundec_mcst", + "fourwayfail_mcst", "wepundec_mcst", "wepicverr_mcst", + "decsuccess_mcst", "tkipicverr_mcst", "wepexcluded_mcst", + "dma_hang", "reinit", "pstatxucast", + "pstatxnoassoc", "pstarxucast", "pstarxbcmc", + "pstatxbcmc", "cso_passthrough", "cso_normal", + "chained", "chainedsz1", "unchained", + "maxchainsz", "currchainsz", "pciereset", + "cfgrestore", "reinitreason[0]", "reinitreason[1]", + "reinitreason[2]", "reinitreason[3]", "reinitreason[4]", + "reinitreason[5]", "reinitreason[6]", "reinitreason[7]", + "rxrtry", "rxmpdu_mu", + + "txbar", "rxbar", "txpspoll", "rxpspoll", "txnull", + "rxnull", "txqosnull", "rxqosnull", "txassocreq", "rxassocreq", + "txreassocreq", "rxreassocreq", "txdisassoc", "rxdisassoc", + "txassocrsp", "rxassocrsp", "txreassocrsp", "rxreassocrsp", + "txauth", "rxauth", "txdeauth", "rxdeauth", "txprobereq", + "rxprobereq", "txprobersp", "rxprobersp", "txaction", + "rxaction", "ampdu_wds", "txlost", "txdatamcast", + "txdatabcast", "psmxwds", "rxback", "txback", + "p2p_tbtt", "p2p_tbtt_miss", "txqueue_start", "txqueue_end", + "txbcast", "txdropped", "rxbcast", "rxdropped", + "txq_end_assoccb", +}; + +#define INFF_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock) +#define INFF_IF_STA_LIST_LOCK(ifp, flags) \ + spin_lock_irqsave(&(ifp)->sta_list_lock, (flags)) +#define INFF_IF_STA_LIST_UNLOCK(ifp, flags) \ + spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags)) + +#define INFF_STA_NULL ((struct inff_sta *)NULL) + +/* dscp exception format {dscp hex, up} */ +struct cfg80211_dscp_exception dscp_excpt[] = { +{DSCP_EF, 6}, {DSCP_CS4, 5}, {DSCP_AF41, 5}, {DSCP_CS3, 4} }; + +/* dscp range : up[0 ~ 7] */ +struct cfg80211_dscp_range dscp_range[8] = { +{0, 7}, {8, 15}, {16, 23}, {24, 31}, +{32, 39}, {40, 47}, {48, 55}, {56, 63} }; + +char *inff_ifname(struct inff_if *ifp) +{ + if (!ifp) + return ""; + + if (ifp->ndev) + return ifp->ndev->name; + + return ""; +} + +struct inff_if *inff_get_ifp(struct inff_pub *drvr, int ifidx) +{ + struct inff_if *ifp; + s32 bsscfgidx; + + if (ifidx < 0 || ifidx >= INFF_MAX_IFS) { + iphy_err(drvr, "ifidx %d out of range\n", ifidx); + return NULL; + } + + ifp = NULL; + bsscfgidx = drvr->if2bss[ifidx]; + if (bsscfgidx >= 0) + ifp = drvr->iflist[bsscfgidx]; + + return ifp; +} + +static void _inff_set_multicast_list(struct work_struct *work) +{ + struct inff_if *ifp = container_of(work, struct inff_if, + multicast_work); + struct inff_pub *drvr = ifp->drvr; + struct net_device *ndev; + struct netdev_hw_addr *ha; + u32 cmd_value, cnt; + __le32 cnt_le; + char *buf, *bufp; + u32 buflen; + s32 err; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); + + ndev = ifp->ndev; + + /* Determine initial value of allmulti flag */ + cmd_value = (ndev->flags & IFF_ALLMULTI) ? true : false; + + /* Send down the multicast list first. */ + cnt = netdev_mc_count(ndev); + buflen = sizeof(cnt) + (cnt * ETH_ALEN); + buf = kmalloc(buflen, GFP_KERNEL); + if (!buf) + return; + bufp = buf; + + cnt_le = cpu_to_le32(cnt); + memcpy(bufp, &cnt_le, sizeof(cnt_le)); + bufp += sizeof(cnt_le); + + netdev_for_each_mc_addr(ha, ndev) { + if (!cnt) + break; + memcpy(bufp, ha->addr, ETH_ALEN); + bufp += ETH_ALEN; + cnt--; + } + + err = inff_fil_iovar_data_set(ifp, "mcast_list", buf, buflen); + if (err < 0) { + iphy_err(drvr, "Setting mcast_list failed, %d\n", err); + cmd_value = cnt ? true : cmd_value; + } + + kfree(buf); + + /* + * Now send the allmulti setting. This is based on the setting in the + * net_device flags, but might be modified above to be turned on if we + * were trying to set some addresses and dongle rejected it... + */ + err = inff_fil_iovar_int_set(ifp, "allmulti", cmd_value); + if (err < 0) + iphy_err(drvr, "Setting allmulti failed, %d\n", err); + + /*Finally, pick up the PROMISC flag */ + cmd_value = (ndev->flags & IFF_PROMISC) ? true : false; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_PROMISC, cmd_value); + if (err < 0) { + /* PROMISC unsupported by firmware of older chips */ + if (err == -EBADE) + iphy_info_once(drvr, "INFF_C_SET_PROMISC unsupported\n"); + else + iphy_err(drvr, "Setting INFF_C_SET_PROMISC failed, err=%d\n", + err); + } + inff_offload_configure_arp_nd(ifp, !cmd_value); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void inff_update_ipv6_addr(struct work_struct *work) +{ + struct inff_if *ifp = container_of(work, struct inff_if, + ndoffload_work); + struct inff_pub *drvr = ifp->drvr; + int i, ret; + struct ipv6_addr addr = {0}; + + /* clear the table in firmware */ + + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + ret = inff_offload_host_ipv6_update(ifp, INFF_OFFLOAD_ICMP | INFF_OFFLOAD_ND, + &addr, 1, false); + else + ret = inff_fil_iovar_data_set(ifp, "nd_hostip_clear", NULL, 0); + + if (ret) { + inff_dbg(TRACE, "fail to clear nd ip table err:%d\n", ret); + return; + } + + for (i = 0; i < ifp->ipv6addr_idx; i++) { + if (inff_cfg80211_get_iftype(ifp) == NL80211_IFTYPE_STATION && + inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) + ret = inff_offload_host_ipv6_update(ifp, INFF_OFFLOAD_ICMP | + INFF_OFFLOAD_ND, + &ifp->ipv6_addr_tbl[i], 0, true); + else + ret = inff_fil_iovar_data_set(ifp, "nd_hostip", + &ifp->ipv6_addr_tbl[i], + sizeof(struct in6_addr)); + if (ret) + iphy_err(drvr, "add nd ip err %d\n", ret); + } +} +#else +static void inff_update_ipv6_addr(struct work_struct *work) +{ +} +#endif + +static int inff_netdev_set_mac_address(struct net_device *ndev, void *addr) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct sockaddr *sa = (struct sockaddr *)addr; + int err; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); + + err = inff_c_set_cur_etheraddr(ifp, sa->sa_data); + if (err >= 0) { + inff_dbg(TRACE, "updated to %pM\n", sa->sa_data); + memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN); + eth_hw_addr_set(ifp->ndev, ifp->mac_addr); + } + return err; +} + +static void inff_netdev_set_multicast_list(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + + schedule_work(&ifp->multicast_work); +} + +/** + * inff_skb_is_iapp - checks if skb is an IAPP packet + * + * @skb: skb to check + */ +static bool inff_skb_is_iapp(struct sk_buff *skb) +{ + static const u8 iapp_l2_update_packet[6] __aligned(2) = { + 0x00, 0x01, 0xaf, 0x81, 0x01, 0x00, + }; + unsigned char *eth_data; +#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + const u16 *a, *b; +#endif + + if (skb->len - skb->mac_len != 6 || + !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) + return false; + + eth_data = skb_mac_header(skb) + ETH_HLEN; +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) + return !(((*(const u32 *)eth_data) ^ (*(const u32 *)iapp_l2_update_packet)) | + ((*(const u16 *)(eth_data + 4)) ^ (*(const u16 *)(iapp_l2_update_packet + 4)))); +#else + a = (const u16 *)eth_data; + b = (const u16 *)iapp_l2_update_packet; + + return !((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])); +#endif +} + +static netdev_tx_t inff_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + int ret; + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + struct ethhdr *eh; + int head_delta; + unsigned int tx_bytes = skb->len; + + inff_dbg(DATA, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); + + /* Can the device send data? */ + if (drvr->bus_if->state != INFF_BUS_UP) { + iphy_err(drvr, "xmit rejected state=%d\n", drvr->bus_if->state); + netif_stop_queue(ndev); + dev_kfree_skb(skb); + ret = -ENODEV; + goto done; + } + + /* Some recent firmwares disassociate STA when they receive + * an 802.11f ADD frame. This behavior can lead to a local DoS security + * issue. Attacker may trigger disassociation of any STA by sending a + * proper Ethernet frame to the wireless interface. + * + * Moreover this feature may break AP interfaces in some specific + * setups. This applies e.g. to the bridge with hairpin mode enabled and + * IFLA_BRPORT_MCAST_TO_UCAST set. IAPP packet generated by a firmware + * will get passed back to the wireless interface and cause immediate + * disassociation of a just-connected STA. + */ + if (!drvr->settings->iapp && inff_skb_is_iapp(skb)) { + dev_kfree_skb(skb); + ret = -EINVAL; + goto done; + } + + /* Make sure there's enough writeable headroom */ + if (skb_headroom(skb) < drvr->hdrlen || skb_header_cloned(skb)) { + head_delta = max_t(int, drvr->hdrlen - skb_headroom(skb), 0); + + inff_dbg(INFO, "%s: %s headroom\n", inff_ifname(ifp), + head_delta ? "insufficient" : "unmodifiable"); + atomic_inc(&drvr->bus_if->stats.pktcowed); + ret = pskb_expand_head(skb, ALIGN(head_delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + if (ret < 0) { + iphy_err(drvr, "%s: failed to expand headroom\n", + inff_ifname(ifp)); + atomic_inc(&drvr->bus_if->stats.pktcow_failed); + dev_kfree_skb(skb); + goto done; + } + } + + /* validate length for ether packet */ + if (skb->len < sizeof(*eh)) { + ret = -EINVAL; + dev_kfree_skb(skb); + goto done; + } + + eh = (struct ethhdr *)(skb->data); + + if (eh->h_proto == htons(ETH_P_PAE)) + atomic_inc(&ifp->pend_8021x_cnt); + + /* Look into dscp to WMM UP mapping with cfg80211_qos_map */ + if (drvr->settings->pkt_prio) { + skb->priority = cfg80211_classify8021d(skb, drvr->qos_map); + /* determine the priority */ + } else if ((skb->priority == 0) || (skb->priority > 7)) { + skb->priority = cfg80211_classify8021d(skb, NULL); + } + + /* set pacing shift for packet aggregation */ + sk_pacing_shift_update(skb->sk, 8); + + ret = inff_proto_tx_queue_data(drvr, ifp->ifidx, skb); + if (ret < 0) + inff_txfinalize(ifp, skb, false); + +done: + if (ret) { + ndev->stats.tx_dropped++; + } else { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += tx_bytes; + } + + /* Return ok: we always eat the packet */ + return NETDEV_TX_OK; +} + +void inff_txflowblock_if(struct inff_if *ifp, + enum inff_netif_stop_reason reason, bool state) +{ + unsigned long flags; + + if (!ifp || !ifp->ndev) + return; + + inff_dbg(TRACE, "enter: bsscfgidx=%d stop=0x%X reason=%d state=%d\n", + ifp->bsscfgidx, ifp->netif_stop, reason, state); + + spin_lock_irqsave(&ifp->netif_stop_lock, flags); + if (state) { + if (!ifp->netif_stop) + netif_stop_queue(ifp->ndev); + ifp->netif_stop |= reason; + } else { + ifp->netif_stop &= ~reason; + if (!ifp->netif_stop) + netif_wake_queue(ifp->ndev); + } + spin_unlock_irqrestore(&ifp->netif_stop_lock, flags); +} + +void inff_netif_rx(struct inff_if *ifp, struct sk_buff *skb, bool inirq) +{ + /* Most firmwares send 802.11f ADD frame every time a new + * STA connects to the AP interface. This is an obsoleted standard most + * users don't use, so don't pass these frames up unless requested. + */ + if (!ifp->drvr->settings->iapp && inff_skb_is_iapp(skb)) { + inff_pkt_buf_free_skb(skb); + return; + } + + if (skb->pkt_type == PACKET_MULTICAST) + ifp->ndev->stats.multicast++; + + if (!(ifp->ndev->flags & IFF_UP)) { + inff_pkt_buf_free_skb(skb); + return; + } + + ifp->ndev->stats.rx_bytes += skb->len; + ifp->ndev->stats.rx_packets++; + + inff_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol)); + if (ifp->drvr->settings->napi_enable && ifp->napi.poll && ifp->napi_gro) + napi_gro_receive(&ifp->napi, skb); + else + netif_rx(skb); +} + +void inff_netif_mon_rx(struct inff_if *ifp, struct sk_buff *skb) +{ + if (inff_feat_is_enabled(ifp, INFF_FEAT_MONITOR_FMT_RADIOTAP)) { + /* Do nothing */ + } else if (inff_feat_is_enabled(ifp, INFF_FEAT_MONITOR_FMT_HW_RX_HDR)) { + struct wlc_d11rxhdr *wlc_rxhdr = (struct wlc_d11rxhdr *)skb->data; + struct ieee80211_radiotap_header *radiotap; + unsigned int offset; + u16 rx_status_1; + + rx_status_1 = le16_to_cpu(wlc_rxhdr->rxhdr.rx_status_1); + + offset = sizeof(struct wlc_d11rxhdr); + /* MAC inserts 2 pad bytes for a4 headers or QoS or A-MSDU + * subframes + */ + if (rx_status_1 & RXS_PBPRES) + offset += 2; + offset += D11_PHY_HDR_LEN; + + skb_pull(skb, offset); + + /* TODO: use RX header to fill some radiotap data */ + radiotap = skb_push(skb, sizeof(*radiotap)); + memset(radiotap, 0, sizeof(*radiotap)); + radiotap->it_len = cpu_to_le16(sizeof(*radiotap)); + + /* TODO: 4 bytes with receive status? */ + skb->len -= 4; + } else { + struct ieee80211_radiotap_header *radiotap; + + /* TODO: use RX status to fill some radiotap data */ + radiotap = skb_push(skb, sizeof(*radiotap)); + memset(radiotap, 0, sizeof(*radiotap)); + radiotap->it_len = cpu_to_le16(sizeof(*radiotap)); + + /* TODO: 4 bytes with receive status? */ + skb->len -= 4; + } + + skb->dev = ifp->ndev; + skb_reset_mac_header(skb); + skb->pkt_type = PACKET_OTHERHOST; + skb->protocol = htons(ETH_P_802_2); + /* NAPI disable or NAPI without gro will use skb_queue_tail() */ + if (ifp->drvr->settings->napi_enable) + inff_netif_rx(ifp, skb, false); +} + +static int inff_rx_hdrpull(struct inff_pub *drvr, struct sk_buff *skb, + struct inff_if **ifp) +{ + int ret; + + /* process and remove protocol-specific header */ + ret = inff_proto_hdrpull(drvr, true, skb, ifp); + + if (ret || !(*ifp) || !(*ifp)->ndev) { + if (ret != -ENODATA && *ifp && (*ifp)->ndev) + (*ifp)->ndev->stats.rx_errors++; + inff_pkt_buf_free_skb(skb); + return -ENODATA; + } + + skb->protocol = eth_type_trans(skb, (*ifp)->ndev); + inff_dbg(DATA, "protocol: 0x%04X\n", skb->protocol); + + return 0; +} + +struct sk_buff *inff_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event, + bool inirq) +{ + struct inff_if *ifp; + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + inff_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb); + + if (inff_rx_hdrpull(drvr, skb, &ifp)) + return NULL; + + if (inff_proto_is_reorder_skb(skb)) { + inff_proto_rxreorder(ifp, skb, inirq); + } else { + /* Process special event packets */ + if (handle_event) { + gfp_t gfp = inirq ? GFP_ATOMIC : GFP_KERNEL; + + inff_fweh_process_skb(ifp->drvr, skb, + INFILCP_SUBTYPE_VENDOR_LONG, gfp); + } + + /* if sdio_rxf_in_kthread, enqueue it and process it later. */ + if (inff_feat_is_sdio_rxf_in_kthread(drvr)) + return skb; + + inff_netif_rx(ifp, skb, inirq); + } + return NULL; +} + +void inff_rx_event(struct device *dev, struct sk_buff *skb) +{ + struct inff_if *ifp; + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + inff_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb); + + if (inff_rx_hdrpull(drvr, skb, &ifp)) + return; + + inff_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL); + inff_pkt_buf_free_skb(skb); +} + +void inff_txfinalize(struct inff_if *ifp, struct sk_buff *txp, bool success) +{ + struct ethhdr *eh; + u16 type; + + if (!ifp) { + inff_pkt_buf_free_skb(txp); + return; + } + + eh = (struct ethhdr *)(txp->data); + type = ntohs(eh->h_proto); + + if (type == ETH_P_PAE) { + atomic_dec(&ifp->pend_8021x_cnt); + /* check is there any 8021x under waiting */ + if (waitqueue_active(&ifp->pend_8021x_wait)) + wake_up(&ifp->pend_8021x_wait); + } + + if (!success && ifp->ndev) + ifp->ndev->stats.tx_errors++; + + inff_pkt_buf_free_skb(txp); +} + +static void inff_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + char drev[INFF_DOTREV_LEN] = "n/a"; + + if (drvr->revinfo.result == 0) + inff_dotrev_str(drvr->revinfo.driverrev, drev); + strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strscpy(info->version, drev, sizeof(info->version)); + strscpy(info->fw_version, drvr->fwver, sizeof(info->fw_version)); + strscpy(info->bus_info, dev_name(drvr->bus_if->dev), + sizeof(info->bus_info)); + if (!drvr->cnt_ver) { + int ret; + u8 *iovar_out; + + iovar_out = kzalloc(WL_CNT_IOV_BUF, GFP_KERNEL); + if (!iovar_out) + return; + ret = inff_fil_iovar_data_get(ifp, "counters", iovar_out, WL_CNT_IOV_BUF); + if (ret) { + inff_err("Failed to get counters, code :%d\n", ret); + goto done; + } + memcpy(&drvr->cnt_ver, iovar_out, sizeof(drvr->cnt_ver)); +done: + kfree(iovar_out); + iovar_out = NULL; + } +} + +static void inff_et_get_strings(struct net_device *net_dev, + u32 sset, u8 *strings) +{ + struct inff_if *ifp = netdev_priv(net_dev); + struct inff_pub *drvr = ifp->drvr; + + if (sset == ETH_SS_STATS) { + switch (drvr->cnt_ver) { + case CNT_VER_6: + memcpy(strings, fmac_ethtool_string_stats_v6, + sizeof(fmac_ethtool_string_stats_v6)); + break; + case CNT_VER_10: + memcpy(strings, fmac_ethtool_string_stats_v10, + sizeof(fmac_ethtool_string_stats_v10)); + break; + case CNT_VER_30: + memcpy(strings, fmac_ethtool_string_stats_v30, + sizeof(fmac_ethtool_string_stats_v30)); + break; + default: + inff_err("Unsupported counters version\n"); + } + } +} + +static int inff_find_wlc_cntr_tlv(u8 *src, u16 *len) +{ + u16 tlv_id, data_len; + u16 packing_offset, cur_tlv = IOVAR_XTLV_BEGIN; + + while (cur_tlv < *len) { + memcpy(&tlv_id, (src + cur_tlv), sizeof(*len)); + memcpy(&data_len, (src + cur_tlv + XTLV_TYPE_SIZE), sizeof(*len)); + if (tlv_id == WL_CNT_XTLV_SLICE_IDX) { + *len = data_len; + return cur_tlv; + } + /* xTLV data has 4 bytes packing. So caclculate the packing offset using the data */ + packing_offset = PACKING_FACTOR(data_len); + cur_tlv += XTLV_TYPE_LEN_SIZE + data_len + packing_offset; + } + return -EINVAL; +} + +static void inff_et_get_stats(struct net_device *netdev, + struct ethtool_stats *et_stats, u64 *results_buf) +{ + struct inff_if *ifp = netdev_priv(netdev); + u8 *iovar_out, *src, ret; + u16 version, len, xtlv_wl_cnt_offset = 0; + u16 soffset = 0, idx = 0; + + iovar_out = kzalloc(WL_CNT_IOV_BUF, GFP_KERNEL); + + if (!iovar_out) + return; + + ret = inff_fil_iovar_data_get(ifp, "counters", iovar_out, WL_CNT_IOV_BUF); + if (ret) { + inff_err("Failed to get counters, code :%d\n", ret); + goto done; + } + src = iovar_out; + + memcpy(&version, src, sizeof(version)); + soffset += sizeof(version); + memcpy(&len, (src + soffset), sizeof(len)); + soffset += sizeof(len); + + /* Check counters version and decide if its non-TLV or TLV (version>=30)*/ + if (version >= CNT_VER_30) { + xtlv_wl_cnt_offset = inff_find_wlc_cntr_tlv(src, &len); + len = (len / sizeof(u32)); + } else { + len = (len / sizeof(u32)) - sizeof(u32); + } + + src = src + soffset + xtlv_wl_cnt_offset; + while (idx < (len)) { + results_buf[idx++] = *((u32 *)src); + src += sizeof(u32); + } +done: + kfree(iovar_out); + iovar_out = NULL; +} + +static int inff_et_get_scount(struct net_device *dev, int sset) +{ + u16 array_size; + struct inff_if *ifp = netdev_priv(dev); + struct inff_pub *drvr = ifp->drvr; + + if (sset == ETH_SS_STATS) { + switch (drvr->cnt_ver) { + case CNT_VER_6: + array_size = ARRAY_SIZE(fmac_ethtool_string_stats_v6); + break; + case CNT_VER_10: + array_size = ARRAY_SIZE(fmac_ethtool_string_stats_v10); + break; + case CNT_VER_30: + array_size = ARRAY_SIZE(fmac_ethtool_string_stats_v30); + break; + default: + inff_err("Unsupported counters version\n"); + return -EOPNOTSUPP; + } + } else { + inff_dbg(INFO, "Does not support ethtool string set %d\n", sset); + return -EOPNOTSUPP; + } + return array_size; +} + +static const struct ethtool_ops inff_ethtool_ops = { + .get_drvinfo = inff_ethtool_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, + .get_strings = inff_et_get_strings, + .get_ethtool_stats = inff_et_get_stats, + .get_sset_count = inff_et_get_scount, +}; + +static int inff_netdev_stop(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + + inff_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); + + inff_cfg80211_down(ndev); + + inff_net_setcarrier(ifp, false); + + return 0; +} + +static int inff_netdev_open(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_bus *bus_if = drvr->bus_if; + u32 toe_ol; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx); + + /* If bus is not ready, can't continue */ + if (bus_if->state != INFF_BUS_UP) { + iphy_err(drvr, "failed bus is not ready\n"); + return -EAGAIN; + } + + atomic_set(&ifp->pend_8021x_cnt, 0); + + /* Get current TOE mode from dongle */ + if (inff_fil_iovar_int_get(ifp, "toe_ol", &toe_ol) >= 0 && + (toe_ol & TOE_TX_CSUM_OL) != 0) + ndev->features |= NETIF_F_IP_CSUM; + else + ndev->features &= ~NETIF_F_IP_CSUM; + + if (inff_cfg80211_up(ndev)) { + iphy_err(drvr, "failed to bring up cfg80211\n"); + return -EIO; + } + + /* Clear, carrier, set when connected or AP mode. */ + netif_carrier_off(ndev); + return 0; +} + +#ifdef CONFIG_INFFMAC_PCIE +static int inff_netdev_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + return inff_xdp_bpf_cmd(ndev, bpf); +} +#endif /* CONFIG_INFFMAC_PCIE */ + +static const struct net_device_ops inff_netdev_ops_pri = { + .ndo_open = inff_netdev_open, + .ndo_stop = inff_netdev_stop, + .ndo_start_xmit = inff_netdev_start_xmit, + .ndo_set_mac_address = inff_netdev_set_mac_address, + .ndo_set_rx_mode = inff_netdev_set_multicast_list, +#ifdef CONFIG_INFFMAC_PCIE + .ndo_bpf = inff_netdev_bpf, +#endif /* CONFIG_INFFMAC_PCIE */ +}; + +int inff_net_attach(struct inff_if *ifp, bool locked) +{ + struct inff_pub *drvr = ifp->drvr; + struct net_device *ndev; + s32 err; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx, + ifp->mac_addr); + ndev = ifp->ndev; + + /* set appropriate operations */ + ndev->netdev_ops = &inff_netdev_ops_pri; + + ndev->needed_headroom += drvr->hdrlen; + ndev->ethtool_ops = &inff_ethtool_ops; + + /* set the mac address & netns */ + eth_hw_addr_set(ndev, ifp->mac_addr); + dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config))); + + INIT_WORK(&ifp->multicast_work, _inff_set_multicast_list); + INIT_WORK(&ifp->ndoffload_work, inff_update_ipv6_addr); + + if (locked) + err = cfg80211_register_netdevice(ndev); + else + err = register_netdev(ndev); + if (err != 0) { + iphy_err(drvr, "couldn't register the net device\n"); + goto fail; + } + + netif_carrier_off(ndev); + + ndev->priv_destructor = inff_cfg80211_free_netdev; + inff_dbg(INFO, "%s: Infineon Dongle Host Driver\n", ndev->name); + return 0; + +fail: + drvr->iflist[ifp->bsscfgidx] = NULL; + ndev->netdev_ops = NULL; + return -EBADE; +} + +void inff_net_detach(struct net_device *ndev, bool locked) +{ + if (ndev->reg_state == NETREG_REGISTERED) { + if (locked) + cfg80211_unregister_netdevice(ndev); + else + unregister_netdev(ndev); + } else { + inff_cfg80211_free_netdev(ndev); + free_netdev(ndev); + } +} + +static int inff_net_mon_open(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + u32 monitor; + int err; + + inff_dbg(TRACE, "Enter\n"); + + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_MONITOR, &monitor); + if (err) { + iphy_err(drvr, "INFF_C_GET_MONITOR error (%d)\n", err); + return err; + } else if (monitor) { + iphy_err(drvr, "Monitor mode is already enabled\n"); + return -EEXIST; + } + + monitor = 3; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_MONITOR, monitor); + if (err) + iphy_err(drvr, "INFF_C_SET_MONITOR error (%d)\n", err); + + return err; +} + +static int inff_net_mon_stop(struct net_device *ndev) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + u32 monitor; + int err; + + inff_dbg(TRACE, "Enter\n"); + + monitor = 0; + err = inff_fil_cmd_int_set(ifp, INFF_C_SET_MONITOR, monitor); + if (err) + iphy_err(drvr, "INFF_C_SET_MONITOR error (%d)\n", err); + + return err; +} + +static netdev_tx_t inff_net_mon_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops inff_netdev_ops_mon = { + .ndo_open = inff_net_mon_open, + .ndo_stop = inff_net_mon_stop, + .ndo_start_xmit = inff_net_mon_start_xmit, +}; + +int inff_net_mon_attach(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct net_device *ndev; + int err; + + inff_dbg(TRACE, "Enter\n"); + + ndev = ifp->ndev; + ndev->netdev_ops = &inff_netdev_ops_mon; + + err = cfg80211_register_netdevice(ndev); + if (err) + iphy_err(drvr, "Failed to register %s device\n", ndev->name); + + return err; +} + +void inff_net_setcarrier(struct inff_if *ifp, bool on) +{ + struct net_device *ndev; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d carrier=%d\n", ifp->bsscfgidx, + on); + + ndev = ifp->ndev; + inff_txflowblock_if(ifp, INFF_NETIF_STOP_REASON_DISCONNECTED, !on); + if (on) { + if (!netif_carrier_ok(ndev)) + netif_carrier_on(ndev); + + } else { + if (netif_carrier_ok(ndev)) + netif_carrier_off(ndev); + } +} + +static int inff_net_p2p_open(struct net_device *ndev) +{ + inff_dbg(TRACE, "Enter\n"); + + return inff_cfg80211_up(ndev); +} + +static int inff_net_p2p_stop(struct net_device *ndev) +{ + inff_dbg(TRACE, "Enter\n"); + + return inff_cfg80211_down(ndev); +} + +static netdev_tx_t inff_net_p2p_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + if (skb) + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops inff_netdev_ops_p2p = { + .ndo_open = inff_net_p2p_open, + .ndo_stop = inff_net_p2p_stop, + .ndo_start_xmit = inff_net_p2p_start_xmit +}; + +static int inff_net_p2p_attach(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct net_device *ndev; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d mac=%pM\n", ifp->bsscfgidx, + ifp->mac_addr); + ndev = ifp->ndev; + + ndev->netdev_ops = &inff_netdev_ops_p2p; + + /* set the mac address */ + eth_hw_addr_set(ndev, ifp->mac_addr); + + if (register_netdev(ndev) != 0) { + iphy_err(drvr, "couldn't register the p2p net device\n"); + goto fail; + } + + inff_dbg(INFO, "%s: Infineon Dongle Host Driver\n", ndev->name); + + return 0; + +fail: + ifp->drvr->iflist[ifp->bsscfgidx] = NULL; + ndev->netdev_ops = NULL; + return -EBADE; +} + +struct inff_if *inff_add_if(struct inff_pub *drvr, s32 bsscfgidx, s32 ifidx, + const char *name, u8 *mac_addr, bool is_p2pdev, + bool is_wlan_sensedev) +{ + struct inff_if *ifp; + struct net_device *ndev; + + inff_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, ifidx); + + ifp = drvr->iflist[bsscfgidx]; + /* + * Delete the existing interface before overwriting it + * in case we missed the INFF_E_IF_DEL event. + */ + if (ifp) { + if (ifidx) { + iphy_err(drvr, "ERROR: netdev:%s already exists\n", + ifp->ndev->name); + netif_stop_queue(ifp->ndev); + inff_net_detach(ifp->ndev, false); + drvr->iflist[bsscfgidx] = NULL; + } else { + inff_dbg(INFO, "netdev:%s ignore IF event\n", + ifp->ndev->name); + return ERR_PTR(-EINVAL); + } + } + + if ((!drvr->settings->p2p_enable && is_p2pdev) || + is_wlan_sensedev) { + /* this is a P2P/WLAN Sense device interface */ + inff_dbg(INFO, "allocate non-netdev interface\n"); + ifp = kzalloc(sizeof(*ifp), GFP_KERNEL); + if (!ifp) + return ERR_PTR(-ENOMEM); + } else { + inff_dbg(INFO, "allocate netdev interface\n"); + /* Allocate netdev, including space for private structure */ + ndev = alloc_netdev(sizeof(*ifp), is_p2pdev ? "p2p%d" : name, + NET_NAME_UNKNOWN, ether_setup); + if (!ndev) + return ERR_PTR(-ENOMEM); + + ndev->needs_free_netdev = true; + ifp = netdev_priv(ndev); + ifp->ndev = ndev; + /* store mapping ifidx to bsscfgidx */ + if (drvr->if2bss[ifidx] == INFF_BSSIDX_INVALID) + drvr->if2bss[ifidx] = bsscfgidx; + } + + ifp->drvr = drvr; + drvr->iflist[bsscfgidx] = ifp; + ifp->ifidx = ifidx; + ifp->bsscfgidx = bsscfgidx; + + init_waitqueue_head(&ifp->pend_8021x_wait); + spin_lock_init(&ifp->netif_stop_lock); + INFF_IF_STA_LIST_LOCK_INIT(ifp); + /* Initialize STA info list */ + INIT_LIST_HEAD(&ifp->sta_list); + + spin_lock_init(&ifp->twt_sess_list_lock); + /* Initialize TWT Session list */ + INIT_LIST_HEAD(&ifp->twt_sess_list); + /* Setup the aperiodic TWT Session cleanup activity */ + timer_setup(&ifp->twt_evt_timeout, inff_twt_event_timeout_handler, 0); + + if (mac_addr) + memcpy(ifp->mac_addr, mac_addr, ETH_ALEN); + + inff_dbg(TRACE, " ==== pid:%x, if:%s (%pM) created ===\n", + current->pid, name, ifp->mac_addr); + + return ifp; +} + +static void inff_del_if(struct inff_pub *drvr, s32 bsscfgidx, + bool locked) +{ + struct inff_if *ifp; + int ifidx; + + ifp = drvr->iflist[bsscfgidx]; + if (!ifp) { + iphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx); + return; + } + inff_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx, + ifp->ifidx); + ifidx = ifp->ifidx; + + /* Stop the aperiodic TWT Session cleanup activity */ + if (timer_pending(&ifp->twt_evt_timeout)) + timer_delete_sync(&ifp->twt_evt_timeout); + + if (ifp->ndev) { + if (bsscfgidx == 0) { + if (ifp->ndev->netdev_ops == &inff_netdev_ops_pri) { + rtnl_lock(); + inff_netdev_stop(ifp->ndev); + rtnl_unlock(); + } + } else { + netif_stop_queue(ifp->ndev); + } + + if (ifp->ndev->netdev_ops == &inff_netdev_ops_pri) { + cancel_work_sync(&ifp->multicast_work); + cancel_work_sync(&ifp->ndoffload_work); + } + inff_net_detach(ifp->ndev, locked); + } else { + switch (ifp->vif->wdev.iftype) { + case NL80211_IFTYPE_P2P_DEVICE: + /* Only p2p device interfaces which get dynamically created + * end up here. In this case the p2p module should be informed + * about the removal of the interface within the firmware. If + * not then p2p commands towards the firmware will cause some + * serious troublesome side effects. The p2p module will clean + * up the ifp if needed. + */ + inff_p2p_ifp_removed(ifp, locked); + break; + case NL80211_IFTYPE_WLAN_SENSE: + inff_wlan_sense_ifp_removed(ifp, locked); + break; + default: + inff_dbg(TRACE, "Unknown wdev iface type"); + } + + kfree(ifp); + } + + drvr->iflist[bsscfgidx] = NULL; + if (drvr->if2bss[ifidx] == bsscfgidx) + drvr->if2bss[ifidx] = INFF_BSSIDX_INVALID; +} + +void inff_remove_interface(struct inff_if *ifp, bool locked) +{ + if (!ifp || !(ifp->drvr) || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp)) { + inff_err("Invalid interface or driver\n"); + return; + } + inff_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx, ifp->ifidx); + inff_proto_del_if(ifp->drvr, ifp); + inff_del_if(ifp->drvr, ifp->bsscfgidx, locked); +} + +static int inff_psm_watchdog_notify(struct inff_if *ifp, + const struct inff_event_msg *evtmsg, + void *data) +{ + struct inff_pub *drvr = ifp->drvr; + int err; + + inff_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx); + + iphy_err(drvr, "PSM's watchdog has fired!\n"); + + err = inff_debug_create_memdump(ifp->drvr->bus_if, data, + evtmsg->datalen); + if (err) + iphy_err(drvr, "Failed to get memory dump, %d\n", err); + + return err; +} + +#ifdef CONFIG_INET +#define ARPOL_MAX_ENTRIES 8 +static int inff_inetaddr_changed(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct inff_pub *drvr = container_of(nb, struct inff_pub, + inetaddr_notifier); + struct in_ifaddr *ifa = data; + struct net_device *ndev = ifa->ifa_dev->dev; + struct inff_if *ifp; + int idx, i = 0, ret; + u32 val; + __be32 addr_table[ARPOL_MAX_ENTRIES] = {0}; + + /* Find out if the notification is meant for us */ + for (idx = 0; idx < INFF_MAX_IFS; idx++) { + ifp = drvr->iflist[idx]; + if (ifp && ifp->ndev == ndev) + break; + if (idx == INFF_MAX_IFS - 1) + return NOTIFY_DONE; + } + + if (!inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + /* check if arp offload is supported */ + ret = inff_fil_iovar_int_get(ifp, "arpoe", &val); + if (ret) + return NOTIFY_OK; + + /* old version only support primary index */ + ret = inff_fil_iovar_int_get(ifp, "arp_version", &val); + if (ret) + val = 1; + if (val == 1) + ifp = drvr->iflist[0]; + + /* retrieve the table from firmware */ + ret = inff_fil_iovar_data_get(ifp, "arp_hostip", addr_table, + sizeof(addr_table)); + if (ret) { + iphy_err(drvr, "fail to get arp ip table err:%d\n", ret); + return NOTIFY_OK; + } + + for (i = 0; i < ARPOL_MAX_ENTRIES; i++) + if (ifa->ifa_address == addr_table[i]) + break; + } + + switch (action) { + case NETDEV_UP: + if (inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + inff_offload_host_ipv4_update(ifp, INFF_OFFLOAD_ARP | INFF_OFFLOAD_ICMP, + ifa->ifa_address, true); + } else { + if (i == ARPOL_MAX_ENTRIES) { + inff_dbg(TRACE, "add %pI4 to arp table\n", + &ifa->ifa_address); + /* set it directly */ + ret = inff_fil_iovar_data_set(ifp, "arp_hostip", + &ifa->ifa_address, + sizeof(ifa->ifa_address)); + if (ret) + iphy_err(drvr, "add arp ip err %d\n", ret); + } + } + break; + case NETDEV_DOWN: + if (inff_feat_is_enabled(ifp, INFF_FEAT_OFFLOADS)) { + inff_offload_host_ipv4_update(ifp, INFF_OFFLOAD_ARP | INFF_OFFLOAD_ICMP, + ifa->ifa_address, false); + } else { + if (i < ARPOL_MAX_ENTRIES) { + addr_table[i] = 0; + inff_dbg(TRACE, "remove %pI4 from arp table\n", + &ifa->ifa_address); + /* clear the table in firmware */ + ret = inff_fil_iovar_data_set(ifp, "arp_hostip_clear", + NULL, 0); + if (ret) { + iphy_err(drvr, "fail to clear arp ip table err:%d\n", + ret); + return NOTIFY_OK; + } + for (i = 0; i < ARPOL_MAX_ENTRIES; i++) { + if (addr_table[i] == 0) + continue; + ret = inff_fil_iovar_data_set(ifp, "arp_hostip", + &addr_table[i], + sizeof(addr_table[i])); + if (ret) + iphy_err(drvr, "add arp ip err %d\n", + ret); + } + } + } + break; + default: + break; + } + + return NOTIFY_OK; +} +#endif + +#if IS_ENABLED(CONFIG_IPV6) +static int inff_inet6addr_changed(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct inff_pub *drvr = container_of(nb, struct inff_pub, + inet6addr_notifier); + struct inet6_ifaddr *ifa = data; + struct inff_if *ifp; + int i; + struct in6_addr *table; + + /* Only handle primary interface */ + ifp = drvr->iflist[0]; + if (!ifp) + return NOTIFY_DONE; + if (ifp->ndev != ifa->idev->dev) + return NOTIFY_DONE; + + table = ifp->ipv6_addr_tbl; + for (i = 0; i < NDOL_MAX_ENTRIES; i++) + if (ipv6_addr_equal(&ifa->addr, &table[i])) + break; + + switch (action) { + case NETDEV_UP: + if (i == NDOL_MAX_ENTRIES) { + if (ifp->ipv6addr_idx < NDOL_MAX_ENTRIES) { + table[ifp->ipv6addr_idx++] = ifa->addr; + } else { + for (i = 0; i < NDOL_MAX_ENTRIES - 1; i++) + table[i] = table[i + 1]; + table[NDOL_MAX_ENTRIES - 1] = ifa->addr; + } + } + break; + case NETDEV_DOWN: + if (i < NDOL_MAX_ENTRIES) { + for (; i < ifp->ipv6addr_idx - 1; i++) + table[i] = table[i + 1]; + memset(&table[i], 0, sizeof(table[i])); + ifp->ipv6addr_idx--; + } + break; + default: + break; + } + + schedule_work(&ifp->ndoffload_work); + + return NOTIFY_OK; +} +#endif + +int inff_fwlog_attach(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + return inff_debug_fwlog_init(drvr); +} + +static int inff_revinfo_read(struct seq_file *s, void *data) +{ + struct inff_bus *bus_if = dev_get_drvdata(s->private); + struct inff_rev_info *ri = &bus_if->drvr->revinfo; + char drev[INFF_DOTREV_LEN]; + char brev[INFF_BOARDREV_LEN]; + + seq_printf(s, "vendorid: 0x%04x\n", ri->vendorid); + seq_printf(s, "deviceid: 0x%04x\n", ri->deviceid); + seq_printf(s, "radiorev: %s\n", inff_dotrev_str(ri->radiorev, drev)); + seq_printf(s, "chip: %s\n", ri->chipname); + seq_printf(s, "chippkg: %u\n", ri->chippkg); + seq_printf(s, "corerev: %u\n", ri->corerev); + seq_printf(s, "boardid: 0x%04x\n", ri->boardid); + seq_printf(s, "boardvendor: 0x%04x\n", ri->boardvendor); + seq_printf(s, "boardrev: %s\n", inff_boardrev_str(ri->boardrev, brev)); + seq_printf(s, "driverrev: %s\n", inff_dotrev_str(ri->driverrev, drev)); + seq_printf(s, "ucoderev: %u\n", ri->ucoderev); + seq_printf(s, "bus: %u\n", ri->bus); + seq_printf(s, "phytype: %u\n", ri->phytype); + seq_printf(s, "phyrev: %u\n", ri->phyrev); + seq_printf(s, "anarev: %u\n", ri->anarev); + seq_printf(s, "nvramrev: %08x\n", ri->nvramrev); + + seq_printf(s, "clmver: %s\n", bus_if->drvr->clmver); + + return 0; +} + +static void inff_core_bus_reset(struct work_struct *work) +{ + struct inff_pub *drvr = container_of(work, struct inff_pub, + bus_reset); + + inff_bus_reset(drvr->bus_if); +} + +static ssize_t bus_reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct inff_pub *drvr = file->private_data; + u8 value; + + if (kstrtou8_from_user(user_buf, count, 0, &value)) + return -EINVAL; + + if (value != 1) + return -EINVAL; + + schedule_work(&drvr->bus_reset); + + return count; +} + +static const struct file_operations bus_reset_fops = { + .open = simple_open, + .write = bus_reset_write, +}; + +static int inff_bus_started(struct inff_pub *drvr, struct cfg80211_ops *ops) +{ + int ret = -1; + struct inff_bus *bus_if = drvr->bus_if; + struct inff_if *ifp; + struct inff_if *p2p_ifp; + int i, num; + + inff_dbg(TRACE, "\n"); + + /* add primary networking interface */ + ifp = inff_add_if(drvr, 0, 0, "wlan%d", + is_valid_ether_addr(drvr->settings->mac) ? + drvr->settings->mac : NULL, false, false); + if (IS_ERR(ifp)) + return PTR_ERR(ifp); + + p2p_ifp = NULL; + + /* signal bus ready */ + inff_bus_change_state(bus_if, INFF_BUS_UP); + + /* do bus specific preinit here */ + ret = inff_bus_preinit(bus_if); + if (ret < 0) + goto fail; + + /* Bus is ready, do any initialization */ + ret = inff_c_preinit_dcmds(ifp); + if (ret < 0) + goto fail; + + inff_feat_attach(drvr); + ret = inff_bus_set_fcmode(bus_if); + /* Set fcmode = 0 for PCIe */ + if (ret < 0) + drvr->settings->fcmode = 0; + + ret = inff_proto_init_done(drvr); + if (ret < 0) + goto fail; + + inff_proto_add_if(drvr, ifp); + + drvr->config = inff_cfg80211_attach(drvr, ops, + drvr->settings->p2p_enable); + if (!drvr->config) { + ret = -ENOMEM; + goto fail; + } + + /* update custom DSCP to PRIO mapping */ + if (drvr->settings->pkt_prio) { + drvr->qos_map = kzalloc(sizeof(*drvr->qos_map), GFP_KERNEL); + if (!drvr->qos_map) { + ret = -ENOMEM; + goto fail; + } + num = sizeof(dscp_excpt) / (sizeof(struct cfg80211_dscp_exception)); + drvr->qos_map->num_des = num; + for (i = 0; i < num; i++) { + drvr->qos_map->dscp_exception[i].dscp = dscp_excpt[i].dscp; + drvr->qos_map->dscp_exception[i].up = dscp_excpt[i].up; + } + memcpy(drvr->qos_map->up, dscp_range, sizeof(dscp_range[8])); + } + + ret = inff_net_attach(ifp, false); + + if (!ret && drvr->settings->p2p_enable) { + p2p_ifp = drvr->iflist[1]; + if (p2p_ifp) + ret = inff_net_p2p_attach(p2p_ifp); + } + + if (ret) + goto fail; + +#ifdef CONFIG_INET + drvr->inetaddr_notifier.notifier_call = inff_inetaddr_changed; + ret = register_inetaddr_notifier(&drvr->inetaddr_notifier); + if (ret) + goto fail; + +#if IS_ENABLED(CONFIG_IPV6) + drvr->inet6addr_notifier.notifier_call = inff_inet6addr_changed; + ret = register_inet6addr_notifier(&drvr->inet6addr_notifier); + if (ret) { + unregister_inetaddr_notifier(&drvr->inetaddr_notifier); + goto fail; + } +#endif +#endif /* CONFIG_INET */ + + INIT_WORK(&drvr->bus_reset, inff_core_bus_reset); + + /* populate debugfs */ + inff_debugfs_add_entry(drvr, "revinfo", inff_revinfo_read); + inff_debugfs_add_entry(drvr, "parameter", inff_debugfs_param_read); + debugfs_create_file("reset", 0600, inff_debugfs_get_devdir(drvr), drvr, + &bus_reset_fops); + inff_feat_debugfs_create(drvr); + inff_proto_debugfs_create(drvr); + inff_bus_debugfs_create(bus_if); + inff_twt_debugfs_create(drvr); + inff_pmsr_debugfs_create(drvr); +#ifdef CONFIG_INFFMAC_BT_SHARED_SDIO + inff_btsdio_debugfs_create(drvr); +#endif + return 0; + +fail: + iphy_err(drvr, "failed: %d\n", ret); + if (drvr->config) { + inff_cfg80211_detach(drvr->config); + drvr->config = NULL; + } + inff_net_detach(ifp->ndev, false); + if (p2p_ifp) + inff_net_detach(p2p_ifp->ndev, false); + drvr->iflist[0] = NULL; + drvr->iflist[1] = NULL; + if (drvr->settings->ignore_probe_fail) + ret = 0; + + return ret; +} + +int inff_alloc(struct device *dev, struct inff_mp_device *settings) +{ + struct wiphy *wiphy; + struct cfg80211_ops *ops; + struct inff_pub *drvr = NULL; + + inff_dbg(TRACE, "Enter\n"); + + ops = inff_cfg80211_get_ops(settings); + if (!ops) + return -ENOMEM; + + wiphy = wiphy_new(ops, sizeof(*drvr)); + if (!wiphy) { + kfree(ops); + return -ENOMEM; + } + + set_wiphy_dev(wiphy, dev); + drvr = wiphy_priv(wiphy); + drvr->wiphy = wiphy; + drvr->ops = ops; + drvr->bus_if = dev_get_drvdata(dev); + drvr->bus_if->drvr = drvr; + drvr->settings = settings; + + return 0; +} + +int inff_attach(struct device *dev, bool start_bus) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + int ret = 0; + int i; + + inff_dbg(TRACE, "Enter\n"); + + /* Initialize logger */ + ret = inff_logger_attach(drvr, drvr->settings->logger_level, + drvr->settings->logring_depth); + if (ret != 0) + goto fail; + + for (i = 0; i < ARRAY_SIZE(drvr->if2bss); i++) + drvr->if2bss[i] = INFF_BSSIDX_INVALID; + + mutex_init(&drvr->proto_block); + + /* Link to bus module */ + drvr->hdrlen = 0; + + drvr->req_mpc = 1; + /* Attach and link in the protocol */ + ret = inff_proto_attach(drvr); + if (ret != 0) { + iphy_err(drvr, "inff_prot_attach failed\n"); + goto fail; + } + + /* Attach to events important for core code */ + inff_fweh_register(drvr, INFF_E_PSM_WATCHDOG, + inff_psm_watchdog_notify); + + /* attach firmware event handler */ + inff_fweh_attach(drvr); + + if (start_bus) { + ret = inff_bus_started(drvr, drvr->ops); + if (ret != 0) { + iphy_err(drvr, "dongle is not responding: err=%d\n", + ret); + goto fail; + } + } + + /* Initialize XDP */ + ret = inff_proto_xdp_init(drvr, inff_get_ifp(drvr, 0)); + if (ret != 0) + goto fail; +#ifdef CONFIG_INFFMAC_SDIO + /* Initialize DFU */ + ret = inff_dfu_attach(drvr); + if (ret != 0) + goto fail; +#endif /* CONFIG_INFFMAC_SDIO */ + return 0; +fail: + inff_detach(dev); + + return ret; +} + +void inff_bus_add_txhdrlen(struct device *dev, uint len) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + if (drvr) + drvr->hdrlen += len; +} + +void inff_dev_reset(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + if (!drvr) + return; + + if (drvr->iflist[0]) + inff_fil_cmd_int_set(drvr->iflist[0], INFF_C_TERMINATED, 1); +} + +void inff_dev_coredump(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + + if (inff_debug_create_memdump(bus_if, NULL, 0) < 0) + inff_dbg(TRACE, "failed to create coredump\n"); +} + +void inff_fw_crashed(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + iphy_err(drvr, "Firmware has halted or crashed\n"); + + inff_dev_coredump(dev); + + if (drvr->bus_reset.func) + schedule_work(&drvr->bus_reset); +} + +void inff_detach(struct device *dev) +{ + s32 i; + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + inff_dbg(TRACE, "Enter\n"); + + if (!drvr) + return; + +#ifdef CONFIG_INET + unregister_inetaddr_notifier(&drvr->inetaddr_notifier); +#endif + +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&drvr->inet6addr_notifier); +#endif +#ifdef CONFIG_INFFMAC_SDIO + inff_dfu_detach(drvr); +#endif /* CONFIG_INFFMAC_SDIO */ + inff_bus_change_state(bus_if, INFF_BUS_DOWN); + /* make sure primary interface removed last */ + for (i = INFF_MAX_IFS - 1; i > -1; i--) { + if (drvr->iflist[i]) + inff_remove_interface(drvr->iflist[i], false); + } + inff_bus_stop(drvr->bus_if); + + if (drvr->settings->pkt_prio) { + kfree(drvr->qos_map); + drvr->qos_map = NULL; + } + + inff_proto_xdp_deinit(drvr); + inff_fweh_detach(drvr); + inff_proto_detach(drvr); + inff_logger_detach(drvr); + + if (drvr->mon_if) { + inff_net_detach(drvr->mon_if->ndev, false); + drvr->mon_if = NULL; + } + + if (drvr->config) { + kfree(drvr->config->pfn_data.network_blob_data); + inff_p2p_detach(&drvr->config->p2p); + inff_cfg80211_detach(drvr->config); + drvr->config = NULL; + } +} + +void inff_free(struct device *dev) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_pub *drvr = bus_if->drvr; + + if (!drvr) + return; + + bus_if->drvr = NULL; + + kfree(drvr->ops); + + wiphy_free(drvr->wiphy); +} + +s32 inff_iovar_data_set(struct device *dev, char *name, void *data, u32 len) +{ + struct inff_bus *bus_if = dev_get_drvdata(dev); + struct inff_if *ifp = bus_if->drvr->iflist[0]; + + return inff_fil_iovar_data_set(ifp, name, data, len); +} + +static int inff_get_pend_8021x_cnt(struct inff_if *ifp) +{ + return atomic_read(&ifp->pend_8021x_cnt); +} + +int inff_netdev_wait_pend8021x(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + int err; + + err = wait_event_timeout(ifp->pend_8021x_wait, + !inff_get_pend_8021x_cnt(ifp), + MAX_WAIT_FOR_8021X_TX); + + if (!err) { + iphy_err(drvr, "Timed out waiting for no pending 802.1x packets\n"); + atomic_set(&ifp->pend_8021x_cnt, 0); + } + + return !err; +} + +void inff_bus_change_state(struct inff_bus *bus, enum inff_bus_state state) +{ + struct inff_pub *drvr = bus->drvr; + struct net_device *ndev; + int ifidx; + + inff_dbg(TRACE, "%d -> %d\n", bus->state, state); + + if (!drvr) { + inff_dbg(INFO, "ignoring transition, bus not attached yet\n"); + return; + } + + bus->state = state; + + if (state == INFF_BUS_UP) { + for (ifidx = 0; ifidx < INFF_MAX_IFS; ifidx++) { + if (drvr->iflist[ifidx] && + drvr->iflist[ifidx]->ndev) { + ndev = drvr->iflist[ifidx]->ndev; + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + } + } + } +} + +int __init inff_core_init(void) +{ + int err; + + err = inff_sdio_register(); + if (err) + return err; + + err = inff_pcie_register(); + if (err) + goto error_pcie_register; + return 0; + +error_pcie_register: + inff_sdio_exit(); + return err; +} + +void inff_core_exit(void) +{ + inff_sdio_exit(); + inff_pcie_exit(); +} + +int +inff_pktfilter_add_remove(struct net_device *ndev, int filter_num, bool add) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + struct inff_pkt_filter_le *pkt_filter; + int filter_fixed_len = offsetof(struct inff_pkt_filter_le, u); + int pattern_fixed_len = offsetof(struct inff_pkt_filter_pattern_le, + mask_and_pattern); + u16 mask_and_pattern[MAX_PKTFILTER_PATTERN_SIZE]; + int buflen = 0; + int ret = 0; + + inff_dbg(INFO, "%s packet filter number %d\n", + (add ? "add" : "remove"), filter_num); + + pkt_filter = kzalloc(sizeof(*pkt_filter) + + (MAX_PKTFILTER_PATTERN_FILL_SIZE), GFP_ATOMIC); + if (!pkt_filter) + return -ENOMEM; + + switch (filter_num) { + case INFF_UNICAST_FILTER_NUM: + pkt_filter->id = 100; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 1; + mask_and_pattern[0] = 0x0001; + break; + case INFF_BROADCAST_FILTER_NUM: + //filter_pattern = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; + pkt_filter->id = 101; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 6; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0xFFFF; + mask_and_pattern[2] = 0xFFFF; + mask_and_pattern[3] = 0xFFFF; + mask_and_pattern[4] = 0xFFFF; + mask_and_pattern[5] = 0xFFFF; + break; + case INFF_MULTICAST4_FILTER_NUM: + //filter_pattern = "102 0 0 0 0xFFFFFF 0x01005E"; + pkt_filter->id = 102; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 3; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0x01FF; + mask_and_pattern[2] = 0x5E00; + break; + case INFF_MULTICAST6_FILTER_NUM: + //filter_pattern = "103 0 0 0 0xFFFF 0x3333"; + pkt_filter->id = 103; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 2; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0x3333; + break; + case INFF_MDNS_FILTER_NUM: + //filter_pattern = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; + pkt_filter->id = 104; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 6; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0xFFFF; + mask_and_pattern[2] = 0xFFFF; + mask_and_pattern[3] = 0x0001; + mask_and_pattern[4] = 0x005E; + mask_and_pattern[5] = 0xFB00; + break; + case INFF_ARP_FILTER_NUM: + //filter_pattern = "105 0 0 12 0xFFFF 0x0806"; + pkt_filter->id = 105; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 12; + pkt_filter->u.pattern.size_bytes = 2; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0x0608; + break; + case INFF_BROADCAST_ARP_FILTER_NUM: + //filter_pattern = "106 0 0 0 + //0xFFFFFFFFFFFF0000000000000806 + //0xFFFFFFFFFFFF0000000000000806"; + pkt_filter->id = 106; + pkt_filter->type = 0; + pkt_filter->negate_match = 0; + pkt_filter->u.pattern.offset = 0; + pkt_filter->u.pattern.size_bytes = 14; + mask_and_pattern[0] = 0xFFFF; + mask_and_pattern[1] = 0xFFFF; + mask_and_pattern[2] = 0xFFFF; + mask_and_pattern[3] = 0x0000; + mask_and_pattern[4] = 0x0000; + mask_and_pattern[5] = 0x0000; + mask_and_pattern[6] = 0x0608; + mask_and_pattern[7] = 0xFFFF; + mask_and_pattern[8] = 0xFFFF; + mask_and_pattern[9] = 0xFFFF; + mask_and_pattern[10] = 0x0000; + mask_and_pattern[11] = 0x0000; + mask_and_pattern[12] = 0x0000; + mask_and_pattern[13] = 0x0608; + break; + default: + ret = -EINVAL; + goto failed; + } + memcpy(pkt_filter->u.pattern.mask_and_pattern, mask_and_pattern, + pkt_filter->u.pattern.size_bytes * 2); + buflen = filter_fixed_len + pattern_fixed_len + + pkt_filter->u.pattern.size_bytes * 2; + + if (add) { + /* Add filter */ + ifp->fwil_fwerr = true; + ret = inff_fil_iovar_data_set(ifp, "pkt_filter_add", + pkt_filter, buflen); + ifp->fwil_fwerr = false; + if (ret) + goto failed; + drvr->pkt_filter[filter_num].id = pkt_filter->id; + drvr->pkt_filter[filter_num].enable = 0; + + } else { + /* Delete filter */ + ifp->fwil_fwerr = true; + ret = inff_fil_iovar_int_set(ifp, "pkt_filter_delete", + pkt_filter->id); + ifp->fwil_fwerr = false; + if (ret == -INFF_FW_BADARG) + ret = 0; + if (ret) + goto failed; + + drvr->pkt_filter[filter_num].id = 0; + drvr->pkt_filter[filter_num].enable = 0; + } +failed: + if (ret) + inff_err("%s packet filter failed, ret=%d\n", + (add ? "add" : "remove"), ret); + + kfree(pkt_filter); + return ret; +} + +int inff_pktfilter_enable(struct net_device *ndev, bool enable) +{ + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = ifp->drvr; + int ret = 0; + int idx = 0; + + for (idx = 0; idx < MAX_PKT_FILTER_COUNT; ++idx) { + if (drvr->pkt_filter[idx].id != 0) { + drvr->pkt_filter[idx].enable = enable; + ret = inff_fil_iovar_data_set(ifp, "pkt_filter_enable", + &drvr->pkt_filter[idx], + sizeof(struct inff_pkt_filter_enable_le)); + if (ret) { + inff_err("%s packet filter id(%d) failed, ret=%d\n", + (enable ? "enable" : "disable"), + drvr->pkt_filter[idx].id, ret); + } + } + } + return ret; +} + +/** Find STA with MAC address ea in an interface's STA list. */ +struct inff_sta * +inff_find_sta(struct inff_if *ifp, const u8 *ea) +{ + struct inff_sta *sta; + unsigned long flags; + + INFF_IF_STA_LIST_LOCK(ifp, flags); + list_for_each_entry(sta, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETH_ALEN)) { + inff_dbg(INFO, "Found STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x into sta list\n", + sta->ea.octet[0], sta->ea.octet[1], + sta->ea.octet[2], sta->ea.octet[3], + sta->ea.octet[4], sta->ea.octet[5]); + INFF_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; + } + } + INFF_IF_STA_LIST_UNLOCK(ifp, flags); + + return INFF_STA_NULL; +} + +/** Add STA into the interface's STA list. */ +static struct inff_sta * +inff_add_sta(struct inff_if *ifp, const u8 *ea) +{ + struct inff_sta *sta; + unsigned long flags; + + sta = kzalloc(sizeof(*sta), GFP_KERNEL); + if (sta == INFF_STA_NULL) { + inff_err("Alloc failed\n"); + return INFF_STA_NULL; + } + memcpy(sta->ea.octet, ea, ETH_ALEN); + inff_dbg(INFO, "Add STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x into sta list\n", + sta->ea.octet[0], sta->ea.octet[1], + sta->ea.octet[2], sta->ea.octet[3], + sta->ea.octet[4], sta->ea.octet[5]); + + /* link the sta and the inff_if interface */ + sta->ifp = ifp; + INIT_LIST_HEAD(&sta->list); + + INFF_IF_STA_LIST_LOCK(ifp, flags); + + list_add_tail(&sta->list, &ifp->sta_list); + + INFF_IF_STA_LIST_UNLOCK(ifp, flags); + return sta; +} + +/** Delete STA from the interface's STA list. */ +void +inff_del_sta(struct inff_if *ifp, const u8 *ea) +{ + struct inff_sta *sta, *next; + unsigned long flags; + + INFF_IF_STA_LIST_LOCK(ifp, flags); + list_for_each_entry_safe(sta, next, &ifp->sta_list, list) { + if (!memcmp(sta->ea.octet, ea, ETH_ALEN)) { + inff_dbg(INFO, "del STA: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x from sta list\n", + ea[0], ea[1], ea[2], ea[3], + ea[4], ea[5]); + list_del(&sta->list); + kfree(sta); + } + } + + INFF_IF_STA_LIST_UNLOCK(ifp, flags); +} + +/** Add STA if it doesn't exist. Not reentrant. */ +struct inff_sta* +inff_findadd_sta(struct inff_if *ifp, const u8 *ea) +{ + struct inff_sta *sta = NULL; + + sta = inff_find_sta(ifp, ea); + + if (!sta) { + /* Add entry */ + sta = inff_add_sta(ifp, ea); + } + return sta; +} + +s32 +inff_set_channel(struct inff_cfg80211_info *cfg, struct ieee80211_channel *chan) +{ + u16 chspec = 0; + int err = 0; + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + + if (chan->flags & IEEE80211_CHAN_DISABLED) + return -EINVAL; + + /* set_channel */ + chspec = channel_to_chanspec(&cfg->d11inf, chan); + if (chspec != INVCHANSPEC) { + err = inff_fil_iovar_int_set(ifp, "chanspec", chspec); + if (err) { + inff_err("set chanspec 0x%04x fail, reason %d\n", chspec, err); + err = -EINVAL; + } + } else { + inff_err("failed to convert host chanspec to fw chanspec\n"); + err = -EINVAL; + } + + return err; +} + +s32 inff_mchan_config(struct inff_cfg80211_info *cfg) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp; + int ifidx = 0; + s32 err = 0; + struct inff_assoclist_le assoclist = {0}; + u8 mac_addr[ETH_ALEN] = {0}, mac_null[ETH_ALEN] = {0}; + u32 mchan_arg = 0; + bool staif_connected = false, apif_connected = false; + + if (!inff_feat_is_enabled(drvr->iflist[0], INFF_FEAT_MCHAN_CONFIG)) + return 0; + + for (ifidx = 0; ifidx < INFF_MAX_IFS; ifidx++) { + ifp = drvr->iflist[ifidx]; + if (ifp && ifp->vif) { + if (ifp->vif->wdev.iftype == NL80211_IFTYPE_STATION) { + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_BSSID, &mac_addr, + sizeof(mac_addr)); + if (err) + iphy_err(drvr, "INFF_C_GET_BSSID fail (%d)\n", err); + else if (memcmp(mac_null, mac_addr, ETH_ALEN)) + staif_connected = true; + } else if (ifp->vif->wdev.iftype == NL80211_IFTYPE_AP) { + memset(&assoclist, 0, sizeof(assoclist)); + assoclist.count = cpu_to_le32(INFF_MAX_ASSOCLIST); + err = inff_fil_cmd_data_get(ifp, INFF_C_GET_ASSOCLIST, + &assoclist, + sizeof(assoclist)); + if (err) + iphy_err(drvr, "INFF_C_GET_ASSOCLIST fail (%d)\n", err); + else if (assoclist.count > 0) + apif_connected = true; + } + } + } + + if (staif_connected && apif_connected) { + inff_dbg(TRACE, "apsta case, set mchan config %d\n", cfg->mchan_conf); + switch (cfg->mchan_conf) { + case INFF_MCHAN_CONF_DEFAULT: + mchan_arg = INFF_MCHAN_SI_ALGO; + err = inff_fil_iovar_data_set(drvr->iflist[0], "mchan_algo", + &mchan_arg, sizeof(mchan_arg)); + break; + case INFF_MCHAN_CONF_VEDIO: + mchan_arg = INFF_MCHAN_ASYMMETRIC_SI_ALGO; + err = inff_fil_iovar_data_set(drvr->iflist[0], "mchan_algo", + &mchan_arg, sizeof(mchan_arg)); + break; + case INFF_MCHAN_CONF_AUDIO: + mchan_arg = INFF_MCHAN_BANDWIDTH_ALGO; + err = inff_fil_iovar_data_set(drvr->iflist[0], "mchan_algo", + &mchan_arg, sizeof(mchan_arg)); + if (err) + goto cmd_fail; + mchan_arg = INFF_MCHAN_BANDWIDTH_VAL; + err = inff_fil_iovar_data_set(drvr->iflist[0], "mchan_bw", + &mchan_arg, sizeof(mchan_arg)); + break; + default: + inff_dbg(TRACE, "unexpected mchan conf %d\n", cfg->mchan_conf); + break; + } +cmd_fail: + if (err) + iphy_err(drvr, "cmd fail while set mchan config (%d)\n", err); + } + + return 0; +} diff --git a/drivers/net/wireless/infineon/inffmac/core.h b/drivers/net/wireless/infineon/inffmac/core.h new file mode 100644 index 000000000000..a1707ebc69dd --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/core.h @@ -0,0 +1,355 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_CORE_H +#define INFF_CORE_H + +#include +#include +#include "fweh.h" +#include "fwil_types.h" +#include "feature.h" +#include "logger.h" + +/* + * Priority definitions according 802.1D + */ +#define PRIO_8021D_NONE 2 +#define PRIO_8021D_BK 1 +#define PRIO_8021D_BE 0 +#define PRIO_8021D_EE 3 +#define PRIO_8021D_CL 4 +#define PRIO_8021D_VI 5 +#define PRIO_8021D_VO 6 +#define PRIO_8021D_NC 7 + +#define MAXPRIO 7 +#define NUMPRIO (MAXPRIO + 1) + +/* DSCP type definitions for RFC4594 */ +/* DF: Standard (RFC2474) */ +#define DSCP_DF 0x00u +/* AF1x: High-Throughput Data (RFC2597) */ +#define DSCP_AF11 0x0Au +#define DSCP_AF12 0x0Cu +#define DSCP_AF13 0x0Eu +/* CS1: Low-Priority Data (RFC3662) */ +#define DSCP_CS1 0x08u +/* AF2x: Low-Latency Data (RFC2597) */ +#define DSCP_AF21 0x12u +#define DSCP_AF22 0x14u +#define DSCP_AF23 0x16u +/* CS2: OAM (RFC2474) */ +#define DSCP_CS2 0x10u +/* AF3x: Multimedia Streaming (RFC2597) */ +#define DSCP_AF31 0x1Au +#define DSCP_AF32 0x1Cu +#define DSCP_AF33 0x1Eu +/* CS3: Broadcast Video (RFC2474) */ +#define DSCP_CS3 0x18u +/* AF4x: Multimedia Conferencing (RFC2597) */ +#define DSCP_AF41 0x22u +#define DSCP_AF42 0x24u +#define DSCP_AF43 0x26u +/* CS4: Real-Time Interactive (RFC2474) */ +#define DSCP_CS4 0x20u +/* CS5: Signaling (RFC2474) */ +#define DSCP_CS5 0x28u +/* VA: VOCIE-ADMIT (RFC5865) */ +#define DSCP_VA 0x2Cu +/* EF: Telephony (RFC3246) */ +#define DSCP_EF 0x2Eu +/* CS6: Network Control (RFC2474) */ +#define DSCP_CS6 0x30u +/* CS7: Network Control (RFC2474) */ +#define DSCP_CS7 0x38u + +/* Bit masks for radio disabled status - returned by WL_GET_RADIO */ + +#define WL_RADIO_SW_DISABLE BIT(0) +#define WL_RADIO_HW_DISABLE BIT(1) +/* some countries don't support any channel */ +#define WL_RADIO_COUNTRY_DISABLE BIT(3) + +/* Override bit for SET_TXPWR. if set, ignore other level limits */ +#define WL_TXPWR_OVERRIDE BIT(31) + +/* Values for PM */ +#define PM_OFF 0 +#define PM_MAX 1 +#define PM_FAST 2 + +/* + * Sonics Configuration Space Registers. + */ + +/* core sbconfig regs are top 256bytes of regs */ +#define SBCONFIGOFF 0xf00 + +#define TOE_TX_CSUM_OL 0x00000001 +#define TOE_RX_CSUM_OL 0x00000002 + +/* For supporting multiple interfaces */ +#define INFF_MAX_IFS 16 + +/* Small, medium and maximum buffer size for dcmd + */ +#define INFF_DCMD_SMLEN 256 +#define INFF_DCMD_MEDLEN 1536 +#define INFF_DCMD_MAXLEN 8192 + +/* IOCTL from host to device are limited in length. A device can only handle + * ethernet frame size. This limitation is to be applied by protocol layer. + */ +#define INFF_TX_IOCTL_MAX_MSG_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN) + +#define INFF_AMPDU_RX_REORDER_MAXFLOWS 256 + +/* Length of firmware version string stored for + * ethtool driver info which uses 32 bytes as well. + */ +#define INFF_DRIVER_FIRMWARE_VERSION_LEN 32 + +#define NDOL_MAX_ENTRIES 8 + +/** + * struct inff_ampdu_rx_reorder - AMPDU receive reorder info + * + * @flow_id: AMPDU flow identifier. + * @cur_idx: last AMPDU index from firmware. + * @exp_idx: expected next AMPDU index. + * @max_idx: maximum amount of packets per AMPDU. + * @pend_pkts: number of packets currently in @pktslots. + * @pktslots: array for ordering AMPDU packets. + */ +struct inff_ampdu_rx_reorder { + u8 flow_id; + u8 cur_idx; + u8 exp_idx; + u8 max_idx; + u8 pend_pkts; + struct sk_buff *pktslots[]; +}; + +/* Forward decls for struct inff_pub (see below) */ +struct inff_proto; /* device communication protocol info */ +struct inff_fws_info; /* firmware signalling info */ +struct inff_mp_device; /* module parameters, device specific */ + +/* + * struct inff_rev_info + * + * The result field stores the error code of the + * revision info request from firmware. For the + * other fields see struct inff_rev_info_le in + * fwil_types.h + */ +struct inff_rev_info { + int result; + u32 vendorid; + u32 deviceid; + u32 radiorev; + u32 corerev; + u32 boardid; + u32 boardvendor; + u32 boardrev; + u32 driverrev; + u32 ucoderev; + u32 bus; + char chipname[12]; + u32 phytype; + u32 phyrev; + u32 anarev; + u32 chippkg; + u32 nvramrev; +}; + +/** wlc interface version */ +struct inff_wlc_version { + /* wlc interface version numbers */ + u16 wlc_ver_major; /**< wlc interface major version number */ + u16 wlc_ver_minor; /**< wlc interface minor version number */ +}; + +/* Common structure for module and instance linkage */ +struct inff_pub { + /* Linkage ponters */ + struct inff_bus *bus_if; + struct inff_proto *proto; + struct wiphy *wiphy; + struct cfg80211_ops *ops; + struct inff_cfg80211_info *config; + + /* Internal inff items */ + uint hdrlen; /* Total INFF header length (proto + bus) */ + + /* Dongle media info */ + char fwver[INFF_DRIVER_FIRMWARE_VERSION_LEN]; + u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */ + + struct mac_address addresses[INFF_MAX_IFS]; + + struct inff_if *iflist[INFF_MAX_IFS]; + s32 if2bss[INFF_MAX_IFS]; + struct inff_if *mon_if; + + struct mutex proto_block; /* used to protect proto resource */ + unsigned char proto_buf[INFF_DCMD_MAXLEN]; + + struct inff_fweh_info fweh; + + struct inff_ampdu_rx_reorder + *reorder_flows[INFF_AMPDU_RX_REORDER_MAXFLOWS]; + + u8 feat_flags[DIV_ROUND_UP(INFF_FEAT_LAST, 8)]; + u32 chip_quirks; + int req_mpc; + + struct inff_rev_info revinfo; + struct inff_wlc_version wlc_ver; +#ifdef DEBUG + struct dentry *dbgfs_dir; +#endif + + struct notifier_block inetaddr_notifier; + struct notifier_block inet6addr_notifier; + struct inff_mp_device *settings; + + struct work_struct bus_reset; + + u8 clmver[INFF_DCMD_SMLEN]; + u8 sta_mac_idx; + struct inff_pkt_filter_enable_le pkt_filter[MAX_PKT_FILTER_COUNT]; + u16 cnt_ver; + + struct cfg80211_qos_map *qos_map; + struct inff_logger *logger; + struct inff_dfu *dfu; +}; + +/* forward declarations */ +struct inff_cfg80211_vif; +struct inff_fws_mac_descriptor; + +/** + * enum inff_netif_stop_reason - reason for stopping netif queue. + * + * @INFF_NETIF_STOP_REASON_FWS_FC: + * netif stopped due to firmware signalling flow control. + * @INFF_NETIF_STOP_REASON_FLOW: + * netif stopped due to flowring full. + * @INFF_NETIF_STOP_REASON_DISCONNECTED: + * netif stopped due to not being connected (STA mode). + */ +enum inff_netif_stop_reason { + INFF_NETIF_STOP_REASON_FWS_FC = BIT(0), + INFF_NETIF_STOP_REASON_FLOW = BIT(1), + INFF_NETIF_STOP_REASON_DISCONNECTED = BIT(2) +}; + +/** + * struct inff_if - interface control information. + * + * @drvr: points to device related information. + * @vif: points to cfg80211 specific interface information. + * @ndev: associated network device. + * @multicast_work: worker object for multicast provisioning. + * @ndoffload_work: worker object for neighbor discovery offload configuration. + * @fws_desc: interface specific firmware-signalling descriptor. + * @ifidx: interface index in device firmware. + * @bsscfgidx: index of bss associated with this interface. + * @mac_addr: assigned mac address. + * @netif_stop: bitmap indicates reason why netif queues are stopped. + * @netif_stop_lock: spinlock for update netif_stop from multiple sources. + * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. + * @pend_8021x_wait: used for signalling change in count. + * @fwil_fwerr: flag indicating fwil layer should return firmware error codes. + * @sta_list: dll of associated stations. + * @sta_list_lock: station list lock. + * @twt_sess_list: dll of TWT sessions. + * @twt_sess_list_lock: TWT session list lock. + * @twt_evt_timeout: TWT firmware event timeout. + * @fmac_pkt_fwd_en: Driver packet forward enabled for AP Mode. + * @napi: NAPI structure for handling the RX packets received on this interface. + * @napi_gro: NAPI GRO Receive enabled flag. + * @xdp_prog: XDP Program attached to the netdev for native mode execution. + */ +struct inff_if { + struct inff_pub *drvr; + struct inff_cfg80211_vif *vif; + struct net_device *ndev; + struct work_struct multicast_work; + struct work_struct ndoffload_work; + struct inff_fws_mac_descriptor *fws_desc; + int ifidx; + s32 bsscfgidx; + bool isap; + u8 mac_addr[ETH_ALEN]; + u8 netif_stop; + spinlock_t netif_stop_lock; /* used to protect netif_stop resource */ + atomic_t pend_8021x_cnt; + wait_queue_head_t pend_8021x_wait; + struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; + u8 ipv6addr_idx; + bool fwil_fwerr; + struct list_head sta_list; + spinlock_t sta_list_lock; /* used to protect STA list */ + struct list_head twt_sess_list; + spinlock_t twt_sess_list_lock; /* used to protect TWT session list */ + struct timer_list twt_evt_timeout; + bool fmac_pkt_fwd_en; + struct napi_struct napi; + bool napi_gro; + struct bpf_prog *xdp_prog; +}; + +struct ether_addr { + u8 octet[ETH_ALEN]; +}; + +/* + * struct inff_sta - Per STA param. + * + * @ifp: associated inff_if pointer. + * @ea: station ethernet MAC address. + * @list: link to inff_if::sta_list. + */ +struct inff_sta { + void *ifp; + struct ether_addr ea; + struct list_head list; +}; + +int inff_netdev_wait_pend8021x(struct inff_if *ifp); + +/* Return pointer to interface name */ +char *inff_ifname(struct inff_if *ifp); +struct inff_if *inff_get_ifp(struct inff_pub *drvr, int ifidx); +int inff_net_attach(struct inff_if *ifp, bool locked); +struct inff_if *inff_add_if(struct inff_pub *drvr, s32 bsscfgidx, s32 ifidx, + const char *name, u8 *mac_addr, bool is_p2pdev, + bool is_wlan_sensedev); +void inff_remove_interface(struct inff_if *ifp, bool locked); +void inff_txflowblock_if(struct inff_if *ifp, + enum inff_netif_stop_reason reason, bool state); +void inff_txfinalize(struct inff_if *ifp, struct sk_buff *txp, bool success); +void inff_netif_rx(struct inff_if *ifp, struct sk_buff *skb, bool inirq); +void inff_netif_mon_rx(struct inff_if *ifp, struct sk_buff *skb); +void inff_net_detach(struct net_device *ndev, bool locked); +int inff_net_mon_attach(struct inff_if *ifp); +void inff_net_setcarrier(struct inff_if *ifp, bool on); +int __init inff_core_init(void); +void inff_core_exit(void); +int inff_pktfilter_add_remove(struct net_device *ndev, int filter_num, + bool add); +int inff_pktfilter_enable(struct net_device *ndev, bool enable); +void inff_del_sta(struct inff_if *ifp, const u8 *ea); +struct inff_sta *inff_find_sta(struct inff_if *ifp, const u8 *ea); +struct inff_sta *inff_findadd_sta(struct inff_if *ifp, const u8 *ea); + +#endif /* INFF_CORE_H */ -- 2.25.1 Driver implementation of Coexistence mechanism to mitigate interface between Wi-Fi and Bluetooth. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/btcoex.c | 482 ++++++++++++++++++ .../net/wireless/infineon/inffmac/btcoex.h | 22 + 2 files changed, 504 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/btcoex.c create mode 100644 drivers/net/wireless/infineon/inffmac/btcoex.h diff --git a/drivers/net/wireless/infineon/inffmac/btcoex.c b/drivers/net/wireless/infineon/inffmac/btcoex.c new file mode 100644 index 000000000000..a1620501e947 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/btcoex.c @@ -0,0 +1,482 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include +#include + +#include "core.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "btcoex.h" +#include "cfg80211.h" + +/* T1 start SCO/eSCO priority suppression */ +#define INFF_BTCOEX_OPPR_WIN_TIME msecs_to_jiffies(2000) + +/* BT registers values during DHCP */ +#define INFF_BT_DHCP_REG50 0x8022 +#define INFF_BT_DHCP_REG51 0 +#define INFF_BT_DHCP_REG64 0 +#define INFF_BT_DHCP_REG65 0 +#define INFF_BT_DHCP_REG71 0 +#define INFF_BT_DHCP_REG66 0x2710 +#define INFF_BT_DHCP_REG41 0x33 +#define INFF_BT_DHCP_REG68 0x190 + +/* number of samples for SCO detection */ +#define INFF_BT_SCO_SAMPLES 12 + +/** + * enum inff_btcoex_state - BT coex DHCP state machine states + * @INFF_BT_DHCP_IDLE: DCHP is idle + * @INFF_BT_DHCP_START: DHCP started, wait before + * boosting wifi priority + * @INFF_BT_DHCP_OPPR_WIN: graceful DHCP opportunity ended, + * boost wifi priority + * @INFF_BT_DHCP_FLAG_FORCE_TIMEOUT: wifi priority boost end, + * restore defaults + */ +enum inff_btcoex_state { + INFF_BT_DHCP_IDLE, + INFF_BT_DHCP_START, + INFF_BT_DHCP_OPPR_WIN, + INFF_BT_DHCP_FLAG_FORCE_TIMEOUT +}; + +/** + * struct inff_btcoex_info - BT coex related information + * @vif: interface for which request was done. + * @timer: timer for DHCP state machine + * @timeout: configured timeout. + * @timer_on: DHCP timer active + * @dhcp_done: DHCP finished before T1/T2 timer expiration + * @bt_state: DHCP state machine state + * @work: DHCP state machine work + * @cfg: driver private data for cfg80211 interface + * @reg66: saved value of btc_params 66 + * @reg41: saved value of btc_params 41 + * @reg68: saved value of btc_params 68 + * @saved_regs_part1: flag indicating regs 66,41,68 + * have been saved + * @reg50: saved value of btc_params 50 + * @reg51: saved value of btc_params 51 + * @reg64: saved value of btc_params 64 + * @reg65: saved value of btc_params 65 + * @reg71: saved value of btc_params 71 + * @saved_regs_part2: flag indicating regs 50,51,64,65,71 + * have been saved + */ +struct inff_btcoex_info { + struct inff_cfg80211_vif *vif; + struct timer_list timer; + u16 timeout; + bool timer_on; + bool dhcp_done; + enum inff_btcoex_state bt_state; + struct work_struct work; + struct inff_cfg80211_info *cfg; + u32 reg66; + u32 reg41; + u32 reg68; + bool saved_regs_part1; + u32 reg50; + u32 reg51; + u32 reg64; + u32 reg65; + u32 reg71; + bool saved_regs_part2; +}; + +/** + * inff_btcoex_params_write() - write btc_params firmware variable + * @ifp: interface + * @addr: btc_params register number + * @data: data to write + */ +static s32 inff_btcoex_params_write(struct inff_if *ifp, u32 addr, u32 data) +{ + struct { + __le32 addr; + __le32 data; + } reg_write; + + reg_write.addr = cpu_to_le32(addr); + reg_write.data = cpu_to_le32(data); + return inff_fil_iovar_data_set(ifp, "btc_params", + ®_write, sizeof(reg_write)); +} + +/** + * inff_btcoex_params_read() - read btc_params firmware variable + * @ifp: interface + * @addr: btc_params register number + * @data: read data + */ +static s32 inff_btcoex_params_read(struct inff_if *ifp, u32 addr, u32 *data) +{ + *data = addr; + + return inff_fil_iovar_int_get(ifp, "btc_params", data); +} + +/** + * inff_btcoex_boost_wifi() - control BT SCO/eSCO parameters + * @btci: BT coex info + * @trump_sco: + * true - set SCO/eSCO parameters for compatibility + * during DHCP window + * false - restore saved parameter values + * + * Enhanced BT COEX settings for eSCO compatibility during DHCP window + */ +static void inff_btcoex_boost_wifi(struct inff_btcoex_info *btci, + bool trump_sco) +{ + struct inff_if *ifp = inff_get_ifp(btci->cfg->pub, 0); + + if (trump_sco && !btci->saved_regs_part2) { + /* this should reduce eSCO agressive + * retransmit w/o breaking it + */ + + /* save current */ + inff_dbg(INFO, "new SCO/eSCO coex algo {save & override}\n"); + inff_btcoex_params_read(ifp, 50, &btci->reg50); + inff_btcoex_params_read(ifp, 51, &btci->reg51); + inff_btcoex_params_read(ifp, 64, &btci->reg64); + inff_btcoex_params_read(ifp, 65, &btci->reg65); + inff_btcoex_params_read(ifp, 71, &btci->reg71); + + btci->saved_regs_part2 = true; + inff_dbg(INFO, + "saved bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n", + btci->reg50, btci->reg51, btci->reg64, + btci->reg65, btci->reg71); + + /* pacify the eSco */ + inff_btcoex_params_write(ifp, 50, INFF_BT_DHCP_REG50); + inff_btcoex_params_write(ifp, 51, INFF_BT_DHCP_REG51); + inff_btcoex_params_write(ifp, 64, INFF_BT_DHCP_REG64); + inff_btcoex_params_write(ifp, 65, INFF_BT_DHCP_REG65); + inff_btcoex_params_write(ifp, 71, INFF_BT_DHCP_REG71); + + } else if (btci->saved_regs_part2) { + /* restore previously saved bt params */ + inff_dbg(INFO, "Do new SCO/eSCO coex algo {restore}\n"); + inff_btcoex_params_write(ifp, 50, btci->reg50); + inff_btcoex_params_write(ifp, 51, btci->reg51); + inff_btcoex_params_write(ifp, 64, btci->reg64); + inff_btcoex_params_write(ifp, 65, btci->reg65); + inff_btcoex_params_write(ifp, 71, btci->reg71); + + inff_dbg(INFO, + "restored bt_params[50,51,64,65,71]: 0x%x 0x%x 0x%x 0x%x 0x%x\n", + btci->reg50, btci->reg51, btci->reg64, + btci->reg65, btci->reg71); + + btci->saved_regs_part2 = false; + } else { + inff_dbg(INFO, "attempted to restore not saved BTCOEX params\n"); + } +} + +/** + * inff_btcoex_is_sco_active() - check if SCO/eSCO is active + * @ifp: interface + * + * return: true if SCO/eSCO session is active + */ +static bool inff_btcoex_is_sco_active(struct inff_if *ifp) +{ + int ioc_res = 0; + bool res = false; + int sco_id_cnt = 0; + u32 param27; + int i; + + for (i = 0; i < INFF_BT_SCO_SAMPLES; i++) { + ioc_res = inff_btcoex_params_read(ifp, 27, ¶m27); + + if (ioc_res < 0) { + inff_err("ioc read btc params error\n"); + break; + } + + inff_dbg(INFO, "sample[%d], btc_params 27:%x\n", i, param27); + + if ((param27 & 0x6) == 2) { /* count both sco & esco */ + sco_id_cnt++; + } + + if (sco_id_cnt > 2) { + inff_dbg(INFO, + "sco/esco detected, pkt id_cnt:%d samples:%d\n", + sco_id_cnt, i); + res = true; + break; + } + } + inff_dbg(TRACE, "exit: result=%d\n", res); + return res; +} + +/* + * btcmf_btcoex_save_part1() - save first step parameters. + */ +static void btcmf_btcoex_save_part1(struct inff_btcoex_info *btci) +{ + struct inff_if *ifp = btci->vif->ifp; + + if (!btci->saved_regs_part1) { + /* Retrieve and save original reg value */ + inff_btcoex_params_read(ifp, 66, &btci->reg66); + inff_btcoex_params_read(ifp, 41, &btci->reg41); + inff_btcoex_params_read(ifp, 68, &btci->reg68); + btci->saved_regs_part1 = true; + inff_dbg(INFO, + "saved btc_params regs (66,41,68) 0x%x 0x%x 0x%x\n", + btci->reg66, btci->reg41, + btci->reg68); + } +} + +/* + * inff_btcoex_restore_part1() - restore first step parameters. + */ +static void inff_btcoex_restore_part1(struct inff_btcoex_info *btci) +{ + struct inff_if *ifp; + + if (btci->saved_regs_part1) { + btci->saved_regs_part1 = false; + ifp = btci->vif->ifp; + inff_btcoex_params_write(ifp, 66, btci->reg66); + inff_btcoex_params_write(ifp, 41, btci->reg41); + inff_btcoex_params_write(ifp, 68, btci->reg68); + inff_dbg(INFO, + "restored btc_params regs {66,41,68} 0x%x 0x%x 0x%x\n", + btci->reg66, btci->reg41, + btci->reg68); + } +} + +/* + * inff_btcoex_timerfunc() - BT coex timer callback + */ +static void inff_btcoex_timerfunc(struct timer_list *t) +{ + struct inff_btcoex_info *bt_local = timer_container_of(bt_local, t, + timer); + inff_dbg(TRACE, "enter\n"); + + bt_local->timer_on = false; + schedule_work(&bt_local->work); +} + +/** + * inff_btcoex_handler() - BT coex state machine work handler + * @work: work + */ +static void inff_btcoex_handler(struct work_struct *work) +{ + struct inff_btcoex_info *btci; + + btci = container_of(work, struct inff_btcoex_info, work); + if (btci->timer_on) { + btci->timer_on = false; + timer_delete_sync(&btci->timer); + } + + switch (btci->bt_state) { + case INFF_BT_DHCP_START: + /* DHCP started provide OPPORTUNITY window + * to get DHCP address + */ + inff_dbg(INFO, "DHCP started\n"); + btci->bt_state = INFF_BT_DHCP_OPPR_WIN; + if (btci->timeout < INFF_BTCOEX_OPPR_WIN_TIME) { + mod_timer(&btci->timer, btci->timer.expires); + } else { + btci->timeout -= INFF_BTCOEX_OPPR_WIN_TIME; + mod_timer(&btci->timer, + jiffies + INFF_BTCOEX_OPPR_WIN_TIME); + } + btci->timer_on = true; + break; + + case INFF_BT_DHCP_OPPR_WIN: + if (btci->dhcp_done) { + inff_dbg(INFO, "DHCP done before T1 expiration\n"); + goto idle; + } + + /* DHCP is not over yet, start lowering BT priority */ + inff_dbg(INFO, "DHCP T1:%d expired\n", + jiffies_to_msecs(INFF_BTCOEX_OPPR_WIN_TIME)); + inff_btcoex_boost_wifi(btci, true); + + btci->bt_state = INFF_BT_DHCP_FLAG_FORCE_TIMEOUT; + mod_timer(&btci->timer, jiffies + btci->timeout); + btci->timer_on = true; + break; + + case INFF_BT_DHCP_FLAG_FORCE_TIMEOUT: + if (btci->dhcp_done) + inff_dbg(INFO, "DHCP done before T2 expiration\n"); + else + inff_dbg(INFO, "DHCP T2:%d expired\n", + INFF_BT_DHCP_FLAG_FORCE_TIMEOUT); + + goto idle; + + default: + inff_err("invalid state=%d !!!\n", btci->bt_state); + goto idle; + } + + return; + +idle: + btci->bt_state = INFF_BT_DHCP_IDLE; + btci->timer_on = false; + inff_btcoex_boost_wifi(btci, false); + cfg80211_crit_proto_stopped(&btci->vif->wdev, GFP_KERNEL); + inff_btcoex_restore_part1(btci); + btci->vif = NULL; +} + +/** + * inff_btcoex_attach() - initialize BT coex data + * @cfg: driver private cfg80211 data + * + * return: 0 on success + */ +int inff_btcoex_attach(struct inff_cfg80211_info *cfg) +{ + struct inff_btcoex_info *btci; + + inff_dbg(TRACE, "enter\n"); + + btci = kmalloc(sizeof(*btci), GFP_KERNEL); + if (!btci) + return -ENOMEM; + + btci->bt_state = INFF_BT_DHCP_IDLE; + + /* Set up timer for BT */ + btci->timer_on = false; + btci->timeout = INFF_BTCOEX_OPPR_WIN_TIME; + timer_setup(&btci->timer, inff_btcoex_timerfunc, 0); + btci->cfg = cfg; + btci->saved_regs_part1 = false; + btci->saved_regs_part2 = false; + + INIT_WORK(&btci->work, inff_btcoex_handler); + + cfg->btcoex = btci; + return 0; +} + +/** + * inff_btcoex_detach - clean BT coex data + * @cfg: driver private cfg80211 data + */ +void inff_btcoex_detach(struct inff_cfg80211_info *cfg) +{ + inff_dbg(TRACE, "enter\n"); + + if (!cfg->btcoex) + return; + + timer_shutdown_sync(&cfg->btcoex->timer); + cfg->btcoex->timer_on = false; + + cancel_work_sync(&cfg->btcoex->work); + + inff_btcoex_boost_wifi(cfg->btcoex, false); + inff_btcoex_restore_part1(cfg->btcoex); + + kfree(cfg->btcoex); + cfg->btcoex = NULL; +} + +static void inff_btcoex_dhcp_start(struct inff_btcoex_info *btci) +{ + struct inff_if *ifp = btci->vif->ifp; + + btcmf_btcoex_save_part1(btci); + /* set new regs values */ + inff_btcoex_params_write(ifp, 66, INFF_BT_DHCP_REG66); + inff_btcoex_params_write(ifp, 41, INFF_BT_DHCP_REG41); + inff_btcoex_params_write(ifp, 68, INFF_BT_DHCP_REG68); + btci->dhcp_done = false; + btci->bt_state = INFF_BT_DHCP_START; + schedule_work(&btci->work); + inff_dbg(TRACE, "enable BT DHCP Timer\n"); +} + +static void inff_btcoex_dhcp_end(struct inff_btcoex_info *btci) +{ + /* Stop any bt timer because DHCP session is done */ + btci->dhcp_done = true; + if (btci->timer_on) { + inff_dbg(INFO, "disable BT DHCP Timer\n"); + btci->timer_on = false; + timer_delete_sync(&btci->timer); + + /* schedule worker if transition to IDLE is needed */ + if (btci->bt_state != INFF_BT_DHCP_IDLE) { + inff_dbg(INFO, "bt_state:%d\n", + btci->bt_state); + schedule_work(&btci->work); + } + } else { + /* Restore original values */ + inff_btcoex_restore_part1(btci); + } +} + +/* + * inff_btcoex_set_mode - set BT coex mode + * @mode: Wifi-Bluetooth coexistence mode + * + * return: 0 on success + */ +int inff_btcoex_set_mode(struct inff_cfg80211_vif *vif, + enum inff_btcoex_mode mode, u16 duration) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); + struct inff_btcoex_info *btci = cfg->btcoex; + struct inff_if *ifp = inff_get_ifp(cfg->pub, 0); + + switch (mode) { + case INFF_BTCOEX_DISABLED: + inff_dbg(INFO, "DHCP session starts\n"); + if (btci->bt_state != INFF_BT_DHCP_IDLE) + return -EBUSY; + /* Start BT timer only for SCO connection */ + if (inff_btcoex_is_sco_active(ifp)) { + btci->timeout = msecs_to_jiffies(duration); + btci->vif = vif; + inff_btcoex_dhcp_start(btci); + } + break; + + case INFF_BTCOEX_ENABLED: + inff_dbg(INFO, "DHCP session ends\n"); + if (btci->bt_state != INFF_BT_DHCP_IDLE && + vif == btci->vif) { + inff_btcoex_dhcp_end(btci); + } + break; + default: + inff_dbg(INFO, "Unknown mode, ignored\n"); + } + return 0; +} diff --git a/drivers/net/wireless/infineon/inffmac/btcoex.h b/drivers/net/wireless/infineon/inffmac/btcoex.h new file mode 100644 index 000000000000..fc597d3a7575 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/btcoex.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_BTCOEX_H +#define INFF_BTCOEX_H + +enum inff_btcoex_mode { + INFF_BTCOEX_DISABLED, + INFF_BTCOEX_ENABLED +}; + +int inff_btcoex_attach(struct inff_cfg80211_info *cfg); +void inff_btcoex_detach(struct inff_cfg80211_info *cfg); +int inff_btcoex_set_mode(struct inff_cfg80211_vif *vif, + enum inff_btcoex_mode mode, u16 duration); + +#endif /* INFF_BTCOEX_H */ -- 2.25.1 Driver implementation for the Preferred Network Offload (PNO) feature. This helps in offload the preferred BSS profiles to the Device firmware. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/pno.c | 770 ++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/pno.h | 140 ++++ 2 files changed, 910 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/pno.c create mode 100644 drivers/net/wireless/infineon/inffmac/pno.h diff --git a/drivers/net/wireless/infineon/inffmac/pno.c b/drivers/net/wireless/infineon/inffmac/pno.c new file mode 100644 index 000000000000..5d4026745f03 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pno.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2016 Broadcom + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include +#include + +#include "core.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "cfg80211.h" +#include "pno.h" +#include "feature.h" +#include "vendor_inf.h" +#include "chanspec.h" +#include "security.h" + +#define INFF_PNO_VERSION 2 +#define INFF_PNO_REPEAT 4 +#define INFF_PNO_FREQ_EXPO_MAX 3 +#define INFF_PNO_IMMEDIATE_SCAN_BIT 3 +#define INFF_PNO_ENABLE_BD_SCAN_BIT 5 +#define INFF_PNO_ENABLE_ADAPTSCAN_BIT 6 +#define INFF_PNO_REPORT_SEPARATELY_BIT 11 +#define INFF_PNO_SCAN_INCOMPLETE 0 +#define INFF_PNO_WPA_AUTH_ANY 0xFFFFFFFF +#define INFF_PNO_HIDDEN_BIT 2 +#define INFF_PNO_SCHED_SCAN_PERIOD 30 + +#define INFF_PNO_MAX_BUCKETS 16 +#define GSCAN_BATCH_NO_THR_SET 101 +#define GSCAN_RETRY_THRESHOLD 3 + +struct inff_pno_info { + int n_reqs; + struct cfg80211_sched_scan_request *reqs[INFF_PNO_MAX_BUCKETS]; + struct mutex req_lock; /* used to protect pno resource */ +}; + +#define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno) + +static int inff_pno_store_request(struct inff_pno_info *pi, + struct cfg80211_sched_scan_request *req) +{ + if (WARN(pi->n_reqs == INFF_PNO_MAX_BUCKETS, + "pno request storage full\n")) + return -ENOSPC; + + inff_dbg(SCAN, "reqid=%llu\n", req->reqid); + mutex_lock(&pi->req_lock); + pi->reqs[pi->n_reqs++] = req; + mutex_unlock(&pi->req_lock); + return 0; +} + +static int inff_pno_remove_request(struct inff_pno_info *pi, u64 reqid) +{ + int i, err = 0; + + mutex_lock(&pi->req_lock); + + /* Nothing to do if we have no requests */ + if (pi->n_reqs == 0) + goto done; + + /* find request */ + for (i = 0; i < pi->n_reqs; i++) { + if (pi->reqs[i]->reqid == reqid) + break; + } + /* request not found */ + if (WARN(i == pi->n_reqs, "reqid not found\n")) { + err = -ENOENT; + goto done; + } + + inff_dbg(SCAN, "reqid=%llu\n", reqid); + pi->n_reqs--; + + /* if last we are done */ + if (!pi->n_reqs || i == pi->n_reqs) + goto done; + + /* fill the gap with remaining requests */ + while (i <= pi->n_reqs - 1) { + pi->reqs[i] = pi->reqs[i + 1]; + i++; + } + +done: + mutex_unlock(&pi->req_lock); + return err; +} + +static int inff_pno_channel_config(struct inff_if *ifp, + struct inff_pno_config_le *cfg) +{ + cfg->reporttype = 0; + cfg->flags = 0; + + return inff_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg)); +} + +static int inff_pno_config(struct inff_if *ifp, u32 scan_freq, + u32 mscan, u32 bestn) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_pno_param_le pfn_param; + u16 flags; + u32 pfnmem; + s32 err; + + memset(&pfn_param, 0, sizeof(pfn_param)); + pfn_param.version = cpu_to_le32(INFF_PNO_VERSION); + + /* set extra pno params */ + flags = BIT(INFF_PNO_IMMEDIATE_SCAN_BIT) | + BIT(INFF_PNO_ENABLE_ADAPTSCAN_BIT); + pfn_param.repeat = INFF_PNO_REPEAT; + pfn_param.exp = INFF_PNO_FREQ_EXPO_MAX; + + /* set up pno scan fr */ + pfn_param.scan_freq = cpu_to_le32(scan_freq); + + if (mscan) { + pfnmem = bestn; + + /* set bestn in firmware */ + err = inff_fil_iovar_int_set(ifp, "pfnmem", pfnmem); + if (err < 0) { + iphy_err(drvr, "failed to set pfnmem\n"); + goto exit; + } + /* get max mscan which the firmware supports */ + err = inff_fil_iovar_int_get(ifp, "pfnmem", &pfnmem); + if (err < 0) { + iphy_err(drvr, "failed to get pfnmem\n"); + goto exit; + } + mscan = min_t(u32, mscan, pfnmem); + pfn_param.mscan = mscan; + pfn_param.bestn = bestn; + flags |= BIT(INFF_PNO_ENABLE_BD_SCAN_BIT); + inff_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn); + } + + pfn_param.flags = cpu_to_le16(flags); + err = inff_fil_iovar_data_set(ifp, "pfn_set", &pfn_param, + sizeof(pfn_param)); + if (err) + iphy_err(drvr, "pfn_set failed, err=%d\n", err); + +exit: + return err; +} + +static int inff_pno_set_random(struct inff_if *ifp, struct inff_pno_info *pi) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_pno_macaddr_le pfn_mac; + u8 *mac_addr = NULL; + u8 *mac_mask = NULL; + int err, i, ri; + + for (ri = 0; ri < pi->n_reqs; ri++) + if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + mac_addr = pi->reqs[ri]->mac_addr; + mac_mask = pi->reqs[ri]->mac_addr_mask; + break; + } + + /* no random mac requested */ + if (!mac_addr) + return 0; + + pfn_mac.version = INFF_PFN_MACADDR_CFG_VER; + pfn_mac.flags = INFF_PFN_MAC_OUI_ONLY | INFF_PFN_SET_MAC_UNASSOC; + + memcpy(pfn_mac.mac, mac_addr, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) { + pfn_mac.mac[i] &= mac_mask[i]; + pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]); + } + /* Clear multi bit */ + pfn_mac.mac[0] &= 0xFE; + /* Set locally administered */ + pfn_mac.mac[0] |= 0x02; + + inff_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n", + pi->reqs[ri]->reqid, pfn_mac.mac); + err = inff_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac, + sizeof(pfn_mac)); + if (err) + iphy_err(drvr, "pfn_macaddr failed, err=%d\n", err); + + return err; +} + +static int inff_pno_add_ssid(struct inff_if *ifp, struct cfg80211_ssid *ssid, + bool active) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_pno_net_param_le pfn; + int err; + + pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN); + pfn.wpa_auth = cpu_to_le32(INFF_PNO_WPA_AUTH_ANY); + pfn.wsec = cpu_to_le32(0); + pfn.infra = cpu_to_le32(1); + pfn.flags = 0; + if (active) + pfn.flags = cpu_to_le32(1 << INFF_PNO_HIDDEN_BIT); + pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len); + memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len); + + inff_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active); + err = inff_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn)); + if (err < 0) + iphy_err(drvr, "adding failed: err=%d\n", err); + return err; +} + +static int inff_pno_add_bssid(struct inff_if *ifp, const u8 *bssid) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_pno_bssid_le bssid_cfg; + int err; + + memcpy(bssid_cfg.bssid, bssid, ETH_ALEN); + bssid_cfg.flags = 0; + + inff_dbg(SCAN, "adding bssid=%pM\n", bssid); + err = inff_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg, + sizeof(bssid_cfg)); + if (err < 0) + iphy_err(drvr, "adding failed: err=%d\n", err); + return err; +} + +static bool inff_is_ssid_active(struct cfg80211_ssid *ssid, + struct cfg80211_sched_scan_request *req) +{ + int i; + + if (!ssid || !req->ssids || !req->n_ssids) + return false; + + for (i = 0; i < req->n_ssids; i++) { + if (ssid->ssid_len == req->ssids[i].ssid_len) { + if (!strncmp(ssid->ssid, req->ssids[i].ssid, + ssid->ssid_len)) + return true; + } + } + return false; +} + +static int inff_pno_clean(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + int ret; + + /* Disable pfn */ + ret = inff_fil_iovar_int_set(ifp, "pfn", 0); + if (ret == 0) { + /* clear pfn */ + ret = inff_fil_iovar_data_set(ifp, "pfnclear", NULL, 0); + } + if (ret < 0) + iphy_err(drvr, "failed code %d\n", ret); + + return ret; +} + +static int inff_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r, + struct inff_pno_config_le *pno_cfg) +{ + u32 n_chan = le32_to_cpu(pno_cfg->channel_num); + u16 chan; + int i, err = 0; + + for (i = 0; i < r->n_channels; i++) { + if (n_chan >= INFF_NUMCHANNELS) { + err = -ENOSPC; + goto done; + } + chan = r->channels[i]->hw_value; + inff_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan); + pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan); + } + /* return number of channels */ + err = n_chan; +done: + pno_cfg->channel_num = cpu_to_le32(n_chan); + return err; +} + +static int inff_pno_prep_fwconfig(struct inff_pno_info *pi, + struct inff_pno_config_le *pno_cfg, + struct inff_gscan_bucket_config **buckets, + u32 *scan_freq) +{ + struct cfg80211_sched_scan_request *sr; + struct inff_gscan_bucket_config *fw_buckets; + int i, err, chidx; + + inff_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs); + if (WARN_ON(!pi->n_reqs)) + return -ENODATA; + + /* + * actual scan period is determined using gcd() for each + * scheduled scan period. + */ + *scan_freq = pi->reqs[0]->scan_plans[0].interval; + for (i = 1; i < pi->n_reqs; i++) { + sr = pi->reqs[i]; + *scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq); + } + if (*scan_freq < INFF_PNO_SCHED_SCAN_MIN_PERIOD) { + inff_dbg(SCAN, "scan period too small, using minimum\n"); + *scan_freq = INFF_PNO_SCHED_SCAN_MIN_PERIOD; + } + + *buckets = NULL; + fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL); + if (!fw_buckets) + return -ENOMEM; + + memset(pno_cfg, 0, sizeof(*pno_cfg)); + for (i = 0; i < pi->n_reqs; i++) { + sr = pi->reqs[i]; + chidx = inff_pno_get_bucket_channels(sr, pno_cfg); + if (chidx < 0) { + err = chidx; + goto fail; + } + fw_buckets[i].bucket_end_index = chidx - 1; + fw_buckets[i].bucket_freq_multiple = + sr->scan_plans[0].interval / *scan_freq; + /* assure period is non-zero */ + if (!fw_buckets[i].bucket_freq_multiple) + fw_buckets[i].bucket_freq_multiple = 1; + fw_buckets[i].flag = INFF_PNO_REPORT_NO_BATCH; + } + + if (INFF_SCAN_ON()) { + inff_err("base period=%u\n", *scan_freq); + for (i = 0; i < pi->n_reqs; i++) { + inff_err("[%d] period %u max %u repeat %u flag %x idx %u\n", + i, fw_buckets[i].bucket_freq_multiple, + le16_to_cpu(fw_buckets[i].max_freq_multiple), + fw_buckets[i].repeat, fw_buckets[i].flag, + fw_buckets[i].bucket_end_index); + } + } + *buckets = fw_buckets; + return pi->n_reqs; + +fail: + kfree(fw_buckets); + return err; +} + +static int inff_pno_config_networks(struct inff_if *ifp, + struct inff_pno_info *pi) +{ + struct cfg80211_sched_scan_request *r; + struct cfg80211_match_set *ms; + bool active; + int i, j, err = 0; + + for (i = 0; i < pi->n_reqs; i++) { + r = pi->reqs[i]; + + for (j = 0; j < r->n_match_sets; j++) { + ms = &r->match_sets[j]; + if (ms->ssid.ssid_len) { + active = inff_is_ssid_active(&ms->ssid, r); + err = inff_pno_add_ssid(ifp, &ms->ssid, + active); + } + if (!err && is_valid_ether_addr(ms->bssid)) + err = inff_pno_add_bssid(ifp, ms->bssid); + + if (err < 0) + return err; + } + } + return 0; +} + +static int inff_pno_config_sched_scans(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_pno_info *pi; + struct inff_gscan_config *gscan_cfg; + struct inff_gscan_bucket_config *buckets; + struct inff_pno_config_le pno_cfg; + size_t gsz; + u32 scan_freq; + int err, n_buckets; + + pi = ifp_to_pno(ifp); + n_buckets = inff_pno_prep_fwconfig(pi, &pno_cfg, &buckets, + &scan_freq); + if (n_buckets < 0) + return n_buckets; + + gsz = struct_size(gscan_cfg, bucket, n_buckets); + gscan_cfg = kzalloc(gsz, GFP_KERNEL); + if (!gscan_cfg) { + err = -ENOMEM; + goto free_buckets; + } + + /* clean up everything */ + err = inff_pno_clean(ifp); + if (err < 0) { + iphy_err(drvr, "failed error=%d\n", err); + goto free_gscan; + } + + /* configure pno */ + err = inff_pno_config(ifp, scan_freq, 0, 0); + if (err < 0) + goto free_gscan; + + err = inff_pno_channel_config(ifp, &pno_cfg); + if (err < 0) + goto clean; + + gscan_cfg->version = cpu_to_le16(INFF_GSCAN_CFG_VERSION); + gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD; + gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET; + gscan_cfg->flags = INFF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN; + + gscan_cfg->count_of_channel_buckets = n_buckets; + memcpy(gscan_cfg->bucket, buckets, + array_size(n_buckets, sizeof(*buckets))); + + err = inff_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz); + + if (err < 0) + goto clean; + + /* configure random mac */ + err = inff_pno_set_random(ifp, pi); + if (err < 0) + goto clean; + + err = inff_pno_config_networks(ifp, pi); + if (err < 0) + goto clean; + + /* Enable the PNO */ + err = inff_fil_iovar_int_set(ifp, "pfn", 1); + +clean: + if (err < 0) + inff_pno_clean(ifp); +free_gscan: + kfree(gscan_cfg); +free_buckets: + kfree(buckets); + return err; +} + +int inff_pno_start_sched_scan(struct inff_if *ifp, + struct cfg80211_sched_scan_request *req) +{ + struct inff_pno_info *pi; + int ret; + + inff_dbg(TRACE, "reqid=%llu\n", req->reqid); + + pi = ifp_to_pno(ifp); + ret = inff_pno_store_request(pi, req); + if (ret < 0) + return ret; + + ret = inff_pno_config_sched_scans(ifp); + if (ret < 0) { + inff_pno_remove_request(pi, req->reqid); + if (pi->n_reqs) + (void)inff_pno_config_sched_scans(ifp); + return ret; + } + return 0; +} + +int inff_pno_stop_sched_scan(struct inff_if *ifp, u64 reqid) +{ + struct inff_pno_info *pi; + int err; + + inff_dbg(TRACE, "reqid=%llu\n", reqid); + + pi = ifp_to_pno(ifp); + + /* No PNO request */ + if (!pi->n_reqs) + return 0; + + err = inff_pno_remove_request(pi, reqid); + if (err) + return err; + + inff_pno_clean(ifp); + + if (pi->n_reqs) + (void)inff_pno_config_sched_scans(ifp); + + return 0; +} + +int inff_pno_attach(struct inff_cfg80211_info *cfg) +{ + struct inff_pno_info *pi; + + inff_dbg(TRACE, "enter\n"); + pi = kzalloc(sizeof(*pi), GFP_KERNEL); + if (!pi) + return -ENOMEM; + + cfg->pno = pi; + mutex_init(&pi->req_lock); + return 0; +} + +void inff_pno_detach(struct inff_cfg80211_info *cfg) +{ + struct inff_pno_info *pi; + + inff_dbg(TRACE, "enter\n"); + pi = cfg->pno; + cfg->pno = NULL; + + WARN_ON(pi->n_reqs); + mutex_destroy(&pi->req_lock); + kfree(pi); +} + +void inff_pno_wiphy_params(struct wiphy *wiphy, bool gscan) +{ + /* scheduled scan settings */ + wiphy->max_sched_scan_reqs = gscan ? INFF_PNO_MAX_BUCKETS : 1; + wiphy->max_sched_scan_ssids = INFF_PNO_MAX_PFN_COUNT; + wiphy->max_match_sets = INFF_PNO_MAX_PFN_COUNT; + wiphy->max_sched_scan_ie_len = INFF_SCAN_IE_LEN_MAX; + wiphy->max_sched_scan_plan_interval = INFF_PNO_SCHED_SCAN_MAX_PERIOD; +} + +u64 inff_pno_find_reqid_by_bucket(struct inff_pno_info *pi, u32 bucket) +{ + u64 reqid = 0; + + mutex_lock(&pi->req_lock); + + if (bucket < pi->n_reqs) + reqid = pi->reqs[bucket]->reqid; + + mutex_unlock(&pi->req_lock); + return reqid; +} + +u32 inff_pno_get_bucket_map(struct inff_pno_info *pi, + struct inff_pno_net_info_le *ni) +{ + struct cfg80211_sched_scan_request *req; + struct cfg80211_match_set *ms; + u32 bucket_map = 0; + int i, j; + + mutex_lock(&pi->req_lock); + for (i = 0; i < pi->n_reqs; i++) { + req = pi->reqs[i]; + + if (!req->n_match_sets) + continue; + for (j = 0; j < req->n_match_sets; j++) { + ms = &req->match_sets[j]; + if (ms->ssid.ssid_len == ni->SSID_len && + !memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) { + bucket_map |= BIT(i); + break; + } + if (is_valid_ether_addr(ms->bssid) && + !memcmp(ms->bssid, ni->bssid, ETH_ALEN)) { + bucket_map |= BIT(i); + break; + } + } + } + mutex_unlock(&pi->req_lock); + return bucket_map; +} + +int pfn_send_network_blob_fw(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + int i, ret; + struct inff_cfg80211_vif *vif; + struct inff_if *ifp; + struct network_blob *network_blob_data; + struct inff_pfn_param pfn_param; + struct inff_pfn *pfn_list_buffer = NULL, *pssidnet; + int inff_pfn_length = 0; + u32 offset; + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + ifp = vif->ifp; + + inff_dbg(TRACE, "Enter\n"); + + ret = inff_fil_cmd_data_set(vif->ifp, + INFF_C_DISASSOC, NULL, 0); + if (ret) { + inff_err("INFF_C_DISASSOC error:%d\n", ret); + return ret; + } + inff_pno_clean(ifp); + + if (inff_feat_is_enabled(ifp, INFF_FEAT_FWSUP)) { + ret = inff_fil_iovar_int_set(ifp, "sup_wpa", 1); + if (ret) { + inff_err("sup_wpa set error:%d\n", ret); + return ret; + } + } + + if (!cfg->pfn_data.count) + return 0; + + pfn_param.flags = (PFN_LIST_ORDER << SORT_CRITERIA_BIT | ENABLE << IMMEDIATE_SCAN_BIT); + pfn_param.bestn = DEFAULT_BESTN; + pfn_param.mscan = DEFAULT_MSCAN; + pfn_param.repeat = DEFAULT_REPEAT; + pfn_param.exp = DEFAULT_EXP; + + if (cfg->pfn_data.pfn_config == PFN_CONFIG_AUTOCONNECT) { + pfn_param.flags |= AUTO_CONNECT_MASK; + + } else if (cfg->pfn_data.pfn_config == PFN_CONFIG_AUTOSWITCH_LISTORDER) { + pfn_param.flags |= AUTO_NET_SWITCH_MASK; + pfn_param.flags |= (PFN_LIST_ORDER << SORT_CRITERIA_BIT); + + } else if (cfg->pfn_data.pfn_config == PFN_CONFIG_AUTOSWITCH_RSSI) { + pfn_param.flags |= AUTO_NET_SWITCH_MASK; + pfn_param.flags |= (PFN_RSSI << SORT_CRITERIA_BIT); + } + + pfn_param.version = cpu_to_le32(PFN_VERSION); + pfn_param.scan_freq = cpu_to_le32(30); + pfn_param.lost_network_timeout = cpu_to_le32(60); + pfn_param.flags = cpu_to_le16(pfn_param.flags); + pfn_param.rssi_margin = cpu_to_le16(10); + pfn_param.slow_freq = cpu_to_le32(0); + + ret = inff_fil_iovar_data_set(ifp, "pfn_set", (void *)&pfn_param, + sizeof(struct inff_pfn_param)); + if (ret) { + inff_err("set pfn_set enable error:%d\n", ret); + return ret; + } + + inff_pfn_length = (cfg->pfn_data.count) * sizeof(struct inff_pfn); + + pfn_list_buffer = kzalloc(inff_pfn_length, GFP_KERNEL); + + if (!pfn_list_buffer) + return -ENOMEM; + + pssidnet = pfn_list_buffer; + network_blob_data = cfg->pfn_data.network_blob_data; + + for (i = 0; i < cfg->pfn_data.count; i++) { + /* Default setting, open, no WPA, no WEP and bss */ + pssidnet->auth = WLAN_AUTH_OPEN; + pssidnet->wpa_auth = WPA_AUTH_DISABLED; + pssidnet->wsec = CRYPTO_ALGO_OFF; + pssidnet->infra = PFN_SSID_INFRA; + pssidnet->flags = 0; + memcpy((char *)pssidnet->ssid.SSID, network_blob_data->ssid, + network_blob_data->ssid_len); + pssidnet->ssid.SSID_len = cpu_to_le32(network_blob_data->ssid_len); + pssidnet->flags = cpu_to_le32(pssidnet->flags); + + if (strlen(network_blob_data->psk)) { + memcpy((char *)pssidnet->psk.key, + network_blob_data->psk, WSEC_MAX_PASSWORD_LEN); + pssidnet->psk.key_len = strlen(network_blob_data->psk); + } + + if (network_blob_data->proto == WPA_PROTO_WPA && + network_blob_data->key_mgmt == INFF_KEY_MGMT_ID_WPA) { + pssidnet->wpa_auth = WPA_AUTH_PSK; + + } else if (network_blob_data->proto == WPA_PROTO_RSN && + network_blob_data->key_mgmt == INFF_KEY_MGMT_ID_WPA2) { + pssidnet->wpa_auth = WPA2_AUTH_PSK; + + } else if (network_blob_data->proto == WPA_PROTO_RSN && + network_blob_data->key_mgmt == INFF_KEY_MGMT_ID_SAE) { + pssidnet->wpa_auth = WPA3_AUTH_SAE_PSK; + pssidnet->auth = WLAN_AUTH_SAE; + + } else if (network_blob_data->proto == WPA_PROTO_RSN && + network_blob_data->key_mgmt == INFF_KEY_MGMT_ID_OWE) { + pssidnet->wpa_auth = WPA3_AUTH_OWE; + } + + if (network_blob_data->pairwise_cipher == BIT(CRYPTO_ALGO_AES_CCM)) + pssidnet->wsec = AES_ENABLED; + + else if (network_blob_data->pairwise_cipher == BIT(CRYPTO_ALGO_TKIP)) + pssidnet->wsec = TKIP_ENABLED; + + inff_dbg(TRACE, "ssid %s key_mgmt %d proto %d wsec %d wpa_auth %d auth %d\n", + network_blob_data->ssid, network_blob_data->key_mgmt, + network_blob_data->proto, pssidnet->wsec, + pssidnet->wpa_auth, pssidnet->auth); + pssidnet++; + network_blob_data++; + } + + /* There is a limit in len of data that we can send to fw using an iovar at a time. + * Here max value of cfg->pfn_data.count could be 16 which is exceeding the limit, + * so sending it two times. + */ + if (cfg->pfn_data.count > (INFF_PNO_MAX_PFN_COUNT / 2)) { + offset = sizeof(struct inff_pfn) * (INFF_PNO_MAX_PFN_COUNT / 2); + ret = inff_fil_iovar_data_set(ifp, "pfn_add", (void *)pfn_list_buffer, + offset); + if (ret) { + inff_err("set pfnadd enable error:%d\n", ret); + return ret; + } + + ret = inff_fil_iovar_data_set(ifp, "pfn_add", (void *)pfn_list_buffer + offset, + inff_pfn_length - offset); + if (ret) { + inff_err("set pfnadd enable error:%d\n", ret); + return ret; + } + + } else { + ret = inff_fil_iovar_data_set(ifp, "pfn_add", (void *)pfn_list_buffer, + inff_pfn_length); + } + + if (ret) { + inff_err("set pfnadd enable error:%d\n", ret); + return ret; + } + ret = inff_fil_iovar_int_set(ifp, "pfn", PFN_SET); + if (ret) { + inff_err("set pfn error:%d\n", ret); + return ret; + } + kfree(pfn_list_buffer); + inff_dbg(TRACE, "Exit\n"); + return 0; +} diff --git a/drivers/net/wireless/infineon/inffmac/pno.h b/drivers/net/wireless/infineon/inffmac/pno.h new file mode 100644 index 000000000000..9037107a648c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/pno.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2016 Broadcom + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_PNO_H +#define INFF_PNO_H + +#define INFF_PNO_SCAN_COMPLETE 1 +#define INFF_PNO_MAX_PFN_COUNT 16 +#define INFF_PNO_SCHED_SCAN_MIN_PERIOD 10 +#define INFF_PNO_SCHED_SCAN_MAX_PERIOD 508 +#define AUTO_CONNECT_MASK 0x0010 +#define AUTO_NET_SWITCH_MASK 0x0002 +#define INFF_PNO_WPA_AUTH_ANY 0xFFFFFFFF +#define MAXNUM_SSID_PER_ADD 16 +#define WSEC_MIN_PASSWORD_LEN 8 +#define WSEC_MAX_PASSWORD_LEN 64 +#define PFN_VERSION 2 +#define PFN_LIST_ORDER 0 +#define PFN_RSSI 1 +#define SORT_CRITERIA_BIT 0 +#define ENABLE 1 +#define IMMEDIATE_SCAN_BIT 3 +#define DEFAULT_BESTN 2 +#define DEFAULT_MSCAN 0 +#define DEFAULT_REPEAT 10 +#define DEFAULT_EXP 2 +#define PFN_SET 1 +#define PFN_UNSET 0 +#define PFN_CONFIG_AND_COUNT_SIZE 2 +#define PFN_SSID_INFRA 1 + +struct inff_pfn_param { + s32 version; + s32 scan_freq; + s32 lost_network_timeout; + s16 flags; + s16 rssi_margin; + u8 bestn; + u8 mscan; + u8 repeat; + u8 exp; + s32 slow_freq; +}; + +struct inff_pfn { + struct inff_ssid_le ssid; /*ssid and its length*/ + s32 flags; /*bit2: hidden*/ + s32 infra; /*BSS Vs IBSS*/ + s32 auth; /*Open Vs Closed*/ + s32 wpa_auth; /*WPA type*/ + s32 wsec; /*wsec value*/ + struct inff_wsec_pmk_le psk; /*Password*/ +}; + +struct pfn_conn_info { + u8 SSID_len; + u8 SSID[IEEE80211_MAX_SSID_LEN]; + u8 BSSID[ETH_ALEN]; + s16 RSSI; + s8 phy_noise; + u16 channel; + s16 SNR; + u8 proto; + int key_mgmt; +}; + +enum { + PFN_CONFIG_AUTOCONNECT, + PFN_CONFIG_AUTOSWITCH_LISTORDER, + PFN_CONFIG_AUTOSWITCH_RSSI, +}; + +/* forward declaration */ +struct inff_pno_info; + +/** + * inff_pno_start_sched_scan - initiate scheduled scan on device. + * + * @ifp: interface object used. + * @req: configuration parameters for scheduled scan. + */ +int inff_pno_start_sched_scan(struct inff_if *ifp, + struct cfg80211_sched_scan_request *req); + +/** + * inff_pno_stop_sched_scan - terminate scheduled scan on device. + * + * @ifp: interface object used. + * @reqid: unique identifier of scan to be stopped. + */ +int inff_pno_stop_sched_scan(struct inff_if *ifp, u64 reqid); + +/** + * inff_pno_wiphy_params - fill scheduled scan parameters in wiphy instance. + * + * @wiphy: wiphy instance to be used. + * @gscan: indicates whether the device has support for g-scan feature. + */ +void inff_pno_wiphy_params(struct wiphy *wiphy, bool gscan); + +/** + * inff_pno_attach - allocate and attach module information. + * + * @cfg: cfg80211 context used. + */ +int inff_pno_attach(struct inff_cfg80211_info *cfg); + +/** + * inff_pno_detach - detach and free module information. + * + * @cfg: cfg80211 context used. + */ +void inff_pno_detach(struct inff_cfg80211_info *cfg); + +/** + * inff_pno_find_reqid_by_bucket - find request id for given bucket index. + * + * @pi: pno instance used. + * @bucket: index of firmware bucket. + */ +u64 inff_pno_find_reqid_by_bucket(struct inff_pno_info *pi, u32 bucket); + +/** + * inff_pno_get_bucket_map - determine bucket map for given netinfo. + * + * @pi: pno instance used. + * @netinfo: netinfo to compare with bucket configuration. + */ +u32 inff_pno_get_bucket_map(struct inff_pno_info *pi, + struct inff_pno_net_info_le *netinfo); + +int pfn_send_network_blob_fw(struct wiphy *wiphy, + struct wireless_dev *wdev); + +#endif /* INFF_PNO_H */ -- 2.25.1 Driver imeplementation to configure, handle and parse Information elements of the Wi-Fi frames. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/ie.c | 801 +++++++++++++++++++++ drivers/net/wireless/infineon/inffmac/ie.h | 167 +++++ 2 files changed, 968 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/ie.c create mode 100644 drivers/net/wireless/infineon/inffmac/ie.h diff --git a/drivers/net/wireless/infineon/inffmac/ie.c b/drivers/net/wireless/infineon/inffmac/ie.c new file mode 100644 index 000000000000..b3fa20f960d7 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/ie.c @@ -0,0 +1,801 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "chanspec.h" +#include "hw_ids.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fwil_types.h" +#include "p2p.h" +#include "btcoex.h" +#include "pno.h" +#include "fwsignal.h" +#include "cfg80211.h" +#include "feature.h" +#include "fwil.h" +#include "proto.h" +#include "vendor.h" +#include "vendor_inf.h" +#include "bus.h" +#include "common.h" +#include "he.h" +#include "eht.h" +#include "twt.h" +#include "offload.h" +#include "pmsr.h" +#include "security.h" + +void inff_clear_assoc_req_ie(struct inff_cfg80211_info *cfg) +{ + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + + kfree(conn_info->req_ie); + conn_info->req_ie = NULL; + conn_info->req_ie_len = 0; +} + +void inff_clear_assoc_resp_ie(struct inff_cfg80211_info *cfg) +{ + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + + kfree(conn_info->resp_ie); + conn_info->resp_ie = NULL; + conn_info->resp_ie_len = 0; +} + +/* Traverse a string of 1-byte tag/1-byte length/variable-length value + * triples, returning a pointer to the substring whose first element + * matches tag + */ +const struct inff_tlv * +inff_parse_tlvs(const void *buf, int buflen, uint key) +{ + const struct inff_tlv *elt = buf; + int totlen = buflen; + + /* find tagged parameter */ + while (totlen >= TLV_HDR_LEN) { + int len = elt->len; + + /* validate remaining totlen */ + if (elt->id == key && (totlen >= (len + TLV_HDR_LEN))) + return elt; + + elt = (struct inff_tlv *)((u8 *)elt + (len + TLV_HDR_LEN)); + totlen -= (len + TLV_HDR_LEN); + } + + return NULL; +} + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +bool +inff_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, + const u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return true; + } + + if (!tlvs) + return false; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return false; +} + +struct inff_vs_tlv * +inff_find_wpaie(const u8 *parse, u32 len) +{ + const struct inff_tlv *ie; + + while ((ie = inff_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { + if (inff_tlv_has_ie((const u8 *)ie, &parse, &len, + WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE)) + return (struct inff_vs_tlv *)ie; + } + return NULL; +} + +struct inff_vs_tlv * +inff_find_wpsie(const u8 *parse, u32 len) +{ + const struct inff_tlv *ie; + + while ((ie = inff_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) { + if (inff_tlv_has_ie((u8 *)ie, &parse, &len, + WPA_OUI, TLV_OUI_LEN, WPS_OUI_TYPE)) + return (struct inff_vs_tlv *)ie; + } + return NULL; +} + +struct inff_tlv * +inff_find_iwie(const u8 *parse, u32 len) +{ + const struct inff_tlv *ie = NULL; + + /* unfortunately it's too much work to dispose the const cast - inff_parse_tlvs + * is used everywhere and changing its prototype to take const qualifier needs + * a massive change to all its callers... + */ + + ie = inff_parse_tlvs(parse, len, WLAN_EID_INTERWORKING); + if (ie) + return (struct inff_tlv *)ie; + return NULL; +} + +s32 +inff_clear_iwie(struct inff_cfg80211_info *cfg, struct inff_if *ifp) +{ + struct ie_set_buffer ie_setbuf = {0}; + + inff_dbg(TRACE, "clear interworking IE\n"); + + memset(&ie_setbuf, 0, sizeof(struct ie_set_buffer)); + + ie_setbuf.ie_buffer.iecount = cpu_to_le32(1); + ie_setbuf.ie_buffer.ie_list[0].ie_data.id = WLAN_EID_INTERWORKING; + ie_setbuf.ie_buffer.ie_list[0].ie_data.len = 0; + + return inff_fil_iovar_data_set(ifp, "ie", &ie_setbuf, sizeof(ie_setbuf)); +} + +s32 +inff_add_iwie(struct inff_cfg80211_info *cfg, struct inff_if *ifp, s32 pktflag, + u8 ie_id, u8 *data, u8 data_len) +{ + int err = 0; + u32 buf_len; + struct ie_set_buffer *ie_setbuf; + + if (ie_id != WLAN_EID_INTERWORKING) { + inff_err("unsupported (id=%d)\n", ie_id); + return -EINVAL; + } + + /* access network options (1 octet) is the mandatory field */ + if (!data || data_len == 0 || data_len > INFF_IW_IES_MAX_BUF_LEN) { + inff_err("wrong interworking IE (len=%d)\n", data_len); + return -EINVAL; + } + + /* Validate the pktflag parameter */ + if (pktflag & ~(INFF_VNDR_IE_CUSTOM_FLAG)) { + inff_err("invalid packet flag 0x%x\n", pktflag); + return -EINVAL; + } + + buf_len = sizeof(struct ie_set_buffer) + data_len - 1; + + /* if already set with previous values, delete it first */ + err = inff_clear_iwie(cfg, ifp); + if (err) + return err; + + ie_setbuf = kmalloc(buf_len, GFP_KERNEL); + if (!ie_setbuf) + return -ENOMEM; + + strscpy(ie_setbuf->cmd, "add", sizeof(ie_setbuf->cmd)); + + /* Buffer contains only 1 IE */ + ie_setbuf->ie_buffer.iecount = cpu_to_le32(1); + /* use VNDR_IE_CUSTOM_FLAG flags for none vendor IE . currently fixed value */ + ie_setbuf->ie_buffer.ie_list[0].pktflag = cpu_to_le32(pktflag); + + /* Now, add the IE to the buffer */ + ie_setbuf->ie_buffer.ie_list[0].ie_data.id = WLAN_EID_INTERWORKING; + ie_setbuf->ie_buffer.ie_list[0].ie_data.len = data_len; + /* Returning void here as max data_len can be 8 */ + (void)memcpy((u8 *)&ie_setbuf->ie_buffer.ie_list[0].ie_data.data[0], + data, data_len); + + err = inff_fil_iovar_data_set(ifp, "ie", ie_setbuf, buf_len); + if (err) + inff_err("Failed to add interworking IE\n"); + + kfree(ie_setbuf); + + return err; +} + +bool inff_valid_wpa_oui(u8 *oui, bool is_rsn_ie) +{ + if (is_rsn_ie) + return (memcmp(oui, RSN_OUI, TLV_OUI_LEN) == 0); + + return (memcmp(oui, WPA_OUI, TLV_OUI_LEN) == 0); +} + +bool inff_valid_dpp_suite(u8 *oui) +{ + return (memcmp(oui, WFA_OUI, TLV_OUI_LEN) == 0 && + *(oui + TLV_OUI_LEN) == DPP_AKM_SUITE_TYPE); +} + +s32 +inff_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len, + struct parsed_vndr_ies *vndr_ies) +{ + struct inff_vs_tlv *vndrie; + struct inff_tlv *ie; + struct parsed_vndr_ie_info *parsed_info; + s32 remaining_len; + + remaining_len = (s32)vndr_ie_len; + memset(vndr_ies, 0, sizeof(*vndr_ies)); + + ie = (struct inff_tlv *)vndr_ie_buf; + while (ie) { + if (ie->id != WLAN_EID_VENDOR_SPECIFIC) + goto next; + vndrie = (struct inff_vs_tlv *)ie; + /* len should be bigger than OUI length + one */ + if (vndrie->len < (VS_IE_FIXED_HDR_LEN - TLV_HDR_LEN + 1)) { + inff_err("invalid vndr ie. length is too small %d\n", + vndrie->len); + goto next; + } + /* if wpa or wme ie, do not add ie */ + if (!memcmp(vndrie->oui, (u8 *)WPA_OUI, TLV_OUI_LEN) && + (vndrie->oui_type == WPA_OUI_TYPE || + vndrie->oui_type == WME_OUI_TYPE)) { + inff_dbg(TRACE, "Found WPA/WME oui. Do not add it\n"); + goto next; + } + + parsed_info = &vndr_ies->ie_info[vndr_ies->count]; + + /* save vndr ie information */ + parsed_info->ie_ptr = (char *)vndrie; + parsed_info->ie_len = vndrie->len + TLV_HDR_LEN; + memcpy(&parsed_info->vndrie, vndrie, sizeof(*vndrie)); + + vndr_ies->count++; + + inff_dbg(TRACE, "** OUI %3ph, type 0x%02x\n", + parsed_info->vndrie.oui, + parsed_info->vndrie.oui_type); + + if (vndr_ies->count >= VNDR_IE_PARSE_LIMIT) + break; +next: + remaining_len -= (ie->len + TLV_HDR_LEN); + if (remaining_len <= TLV_HDR_LEN) + ie = NULL; + else + ie = (struct inff_tlv *)(((u8 *)ie) + ie->len + + TLV_HDR_LEN); + } + return 0; +} + +u32 +inff_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd) +{ + strscpy(iebuf, add_del_cmd, VNDR_IE_CMD_LEN); + + put_unaligned_le32(1, &iebuf[VNDR_IE_COUNT_OFFSET]); + + put_unaligned_le32(pktflag, &iebuf[VNDR_IE_PKTFLAG_OFFSET]); + + memcpy(&iebuf[VNDR_IE_VSIE_OFFSET], ie_ptr, ie_len); + + return ie_len + VNDR_IE_HDR_SIZE; +} + +s32 +inff_parse_extension_ies(const u8 *extension_ie_buf, u32 extension_ie_len, + struct parsed_extension_ies *extension_ies) +{ + struct inff_ext_tlv *ext_ie; + struct inff_tlv *ie; + struct parsed_ext_ie_info *parsed_info; + s32 remaining_len; + + remaining_len = (s32)extension_ie_len; + memset(extension_ies, 0, sizeof(*extension_ies)); + + ie = (struct inff_tlv *)extension_ie_buf; + while (ie) { + if (ie->id != WLAN_EID_EXTENSION) + goto next; + ext_ie = (struct inff_ext_tlv *)ie; + + /* len should be bigger than ext_id + one data */ + if (ext_ie->len < 2) { + inff_err("invalid ext_ie ie. length is too small %d\n", + ext_ie->len); + goto next; + } + + /* skip parsing the HE capab, HE_6G_capa & oper IE from upper layer + * to avoid sending it to the FW, as these IEs will be + * added by the FW based on the MAC & PHY capab if HE + * is enabled. + */ + if (ext_ie->ext_id == WLAN_EID_EXT_HE_CAPABILITY || + ext_ie->ext_id == WLAN_EID_EXT_HE_OPERATION || + ext_ie->ext_id == WLAN_EID_EXT_HE_6GHZ_CAPA) + goto next; + + parsed_info = &extension_ies->ie_info[extension_ies->count]; + + parsed_info->ie_ptr = (char *)ext_ie; + parsed_info->ie_len = ext_ie->len + TLV_HDR_LEN; + memcpy(&parsed_info->ie_data, ext_ie, sizeof(*ext_ie)); + + extension_ies->count++; + + inff_dbg(TRACE, "** EXT_IE %d, len 0x%02x EXT_ID: %d\n", + parsed_info->ie_data.id, + parsed_info->ie_data.len, + parsed_info->ie_data.ext_id); + + /* temperory parsing at most 5 EXT_ID, will review it.*/ + if (extension_ies->count >= VNDR_IE_PARSE_LIMIT) + break; +next: + remaining_len -= (ie->len + TLV_HDR_LEN); + if (remaining_len <= TLV_HDR_LEN) + ie = NULL; + else + ie = (struct inff_tlv *)(((u8 *)ie) + ie->len + + TLV_HDR_LEN); + } + return 0; +} + +s32 inff_get_assoc_ies(struct inff_cfg80211_info *cfg, struct inff_if *ifp) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_assoc_ielen_le *assoc_info; + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + struct inff_cfg80211_edcf_acparam edcf_acparam_info[EDCF_AC_COUNT]; + u32 req_len; + u32 resp_len; + u32 flags; + s32 err = 0; + + inff_dbg(CONN, "req: %p, req len (%d) resp: %p resp len (%d)\n", conn_info->req_ie, + conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len); + + if (conn_info->req_ie_len && conn_info->resp_ie_len && + conn_info->req_ie && conn_info->resp_ie) + return 0; + + inff_clear_assoc_ies(cfg); + + memset(cfg->extra_buf, '\0', WL_EXTRA_BUF_MAX); + err = inff_fil_iovar_data_get(ifp, "assoc_info", + cfg->extra_buf, WL_ASSOC_INFO_MAX); + if (err) { + iphy_err(drvr, "could not get assoc info (%d)\n", err); + return err; + } + assoc_info = + (struct inff_cfg80211_assoc_ielen_le *)cfg->extra_buf; + req_len = le32_to_cpu(assoc_info->req_len); + resp_len = le32_to_cpu(assoc_info->resp_len); + flags = le32_to_cpu(assoc_info->flags); + if (req_len > WL_EXTRA_BUF_MAX || resp_len > WL_EXTRA_BUF_MAX) { + iphy_err(drvr, "invalid lengths in assoc info: req %u resp %u\n", + req_len, resp_len); + return -EINVAL; + } + if (req_len) { + memset(cfg->extra_buf, '\0', WL_EXTRA_BUF_MAX); + err = inff_fil_iovar_data_get(ifp, "assoc_req_ies", + cfg->extra_buf, + WL_ASSOC_INFO_MAX); + if (err) { + iphy_err(drvr, "could not get assoc req (%d)\n", err); + return err; + } + + if (flags & INFF_ASSOC_REQ_IS_REASSOC) + conn_info->req_ie_len = req_len - sizeof(struct dot11_reassoc_req); + else + conn_info->req_ie_len = req_len - sizeof(struct dot11_assoc_req); + + conn_info->req_ie = + kmemdup(cfg->extra_buf, conn_info->req_ie_len, + GFP_KERNEL); + if (!conn_info->req_ie) + conn_info->req_ie_len = 0; + } else { + conn_info->req_ie_len = 0; + conn_info->req_ie = NULL; + } + + /* resp_len is the total length of assoc resp + * which includes 6 bytes of aid/status code/capabilities. + * the assoc_resp_ie length should minus the 6 bytes which starts from rate_ie. + */ + if (resp_len) { + memset(cfg->extra_buf, '\0', WL_EXTRA_BUF_MAX); + err = inff_fil_iovar_data_get(ifp, "assoc_resp_ies", + cfg->extra_buf, + WL_ASSOC_INFO_MAX); + if (err) { + iphy_err(drvr, "could not get assoc resp (%d)\n", err); + return err; + } + conn_info->resp_ie_len = resp_len - sizeof(struct dot11_assoc_resp); + conn_info->resp_ie = + kmemdup(cfg->extra_buf, conn_info->resp_ie_len, + GFP_KERNEL); + if (!conn_info->resp_ie) + conn_info->resp_ie_len = 0; + + err = inff_fil_iovar_data_get(ifp, "wme_ac_sta", + edcf_acparam_info, + sizeof(edcf_acparam_info)); + if (err) { + inff_err("could not get wme_ac_sta (%d)\n", err); + return err; + } + + inff_wifi_prioritize_acparams(edcf_acparam_info, + cfg->ac_priority); + } else { + conn_info->resp_ie_len = 0; + conn_info->resp_ie = NULL; + } + inff_dbg(CONN, "req len (%d) resp len (%d)\n", + conn_info->req_ie_len, conn_info->resp_ie_len); + + return err; +} + +void inff_clear_assoc_ies(struct inff_cfg80211_info *cfg) +{ + struct inff_cfg80211_connect_info *conn_info = cfg_to_conn(cfg); + + kfree(conn_info->req_ie); + conn_info->req_ie = NULL; + conn_info->req_ie_len = 0; + kfree(conn_info->resp_ie); + conn_info->resp_ie = NULL; + conn_info->resp_ie_len = 0; +} + +s32 +inff_config_ap_mgmt_ie(struct inff_cfg80211_vif *vif, + struct cfg80211_beacon_data *beacon) +{ + struct inff_pub *drvr = vif->ifp->drvr; + s32 err; + + /* Set Beacon IEs to FW */ + err = inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_BEACON_FLAG, + beacon->tail, beacon->tail_len); + if (err) { + iphy_err(drvr, "Set Beacon IE Failed\n"); + return err; + } + inff_dbg(TRACE, "Applied Vndr IEs for Beacon\n"); + + /* Set Probe Response IEs to FW */ + err = inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_PRBRSP_FLAG, + beacon->proberesp_ies, + beacon->proberesp_ies_len); + if (err) + iphy_err(drvr, "Set Probe Resp IE Failed\n"); + else + inff_dbg(TRACE, "Applied Vndr IEs for Probe Resp\n"); + + /* Set Assoc Response IEs to FW */ + err = inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_ASSOCRSP_FLAG, + beacon->assocresp_ies, + beacon->assocresp_ies_len); + if (err) + inff_err("Set Assoc Resp IE Failed\n"); + else + inff_dbg(TRACE, "Applied Vndr IEs for Assoc Resp\n"); + + return err; +} + +s32 inff_vif_set_mgmt_ie(struct inff_cfg80211_vif *vif, s32 pktflag, + const u8 *vndr_ie_buf, u32 vndr_ie_len) +{ + struct inff_pub *drvr; + struct inff_if *ifp; + struct vif_saved_ie *saved_ie; + s32 err = 0; + u8 *iovar_ie_buf; + u8 *curr_ie_buf; + u8 *mgmt_ie_buf = NULL; + int mgmt_ie_buf_len; + u32 *mgmt_ie_len; + u32 del_add_ie_buf_len = 0; + u32 total_ie_buf_len = 0; + u32 parsed_ie_buf_len = 0; + struct parsed_vndr_ies old_vndr_ies; + struct parsed_vndr_ies new_vndr_ies; + struct parsed_vndr_ie_info *vndrie_info; + s32 i; + u8 *ptr; + int remained_buf_len; + struct parsed_extension_ies new_ext_ies; + struct parsed_extension_ies old_ext_ies; + struct parsed_ext_ie_info *extie_info; + + if (!vif) + return -ENODEV; + ifp = vif->ifp; + drvr = ifp->drvr; + saved_ie = &vif->saved_ie; + + inff_dbg(TRACE, "bsscfgidx %d, pktflag : 0x%02X\n", ifp->bsscfgidx, + pktflag); + iovar_ie_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); + if (!iovar_ie_buf) + return -ENOMEM; + curr_ie_buf = iovar_ie_buf; + switch (pktflag) { + case INFF_VNDR_IE_PRBREQ_FLAG: + mgmt_ie_buf = saved_ie->probe_req_ie; + mgmt_ie_len = &saved_ie->probe_req_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->probe_req_ie); + break; + case INFF_VNDR_IE_PRBRSP_FLAG: + mgmt_ie_buf = saved_ie->probe_res_ie; + mgmt_ie_len = &saved_ie->probe_res_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->probe_res_ie); + break; + case INFF_VNDR_IE_BEACON_FLAG: + mgmt_ie_buf = saved_ie->beacon_ie; + mgmt_ie_len = &saved_ie->beacon_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->beacon_ie); + break; + case INFF_VNDR_IE_ASSOCREQ_FLAG: + mgmt_ie_buf = saved_ie->assoc_req_ie; + mgmt_ie_len = &saved_ie->assoc_req_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->assoc_req_ie); + break; + case INFF_VNDR_IE_ASSOCRSP_FLAG: + mgmt_ie_buf = saved_ie->assoc_res_ie; + mgmt_ie_len = &saved_ie->assoc_res_ie_len; + mgmt_ie_buf_len = sizeof(saved_ie->assoc_res_ie); + break; + default: + err = -EPERM; + iphy_err(drvr, "not suitable type\n"); + goto exit; + } + + if (vndr_ie_len > mgmt_ie_buf_len) { + err = -ENOMEM; + iphy_err(drvr, "extra IE size too big\n"); + goto exit; + } + + /* parse and save new vndr_ie in curr_ie_buff before comparing it */ + if (vndr_ie_buf && vndr_ie_len && curr_ie_buf) { + ptr = curr_ie_buf; + inff_parse_vndr_ies(vndr_ie_buf, vndr_ie_len, &new_vndr_ies); + for (i = 0; i < new_vndr_ies.count; i++) { + vndrie_info = &new_vndr_ies.ie_info[i]; + memcpy(ptr + parsed_ie_buf_len, vndrie_info->ie_ptr, + vndrie_info->ie_len); + parsed_ie_buf_len += vndrie_info->ie_len; + } + inff_parse_extension_ies(vndr_ie_buf, vndr_ie_len, &new_ext_ies); + for (i = 0; i < new_ext_ies.count; i++) { + extie_info = &new_ext_ies.ie_info[i]; + memcpy(ptr + parsed_ie_buf_len, extie_info->ie_ptr, + extie_info->ie_len); + parsed_ie_buf_len += extie_info->ie_len; + } + } + + if (mgmt_ie_buf && *mgmt_ie_len) { + if (parsed_ie_buf_len && parsed_ie_buf_len == *mgmt_ie_len && + (memcmp(mgmt_ie_buf, curr_ie_buf, + parsed_ie_buf_len) == 0)) { + inff_dbg(TRACE, "Previous mgmt IE equals to current IE\n"); + goto exit; + } + + /* parse old vndr_ie */ + inff_parse_vndr_ies(mgmt_ie_buf, *mgmt_ie_len, &old_vndr_ies); + /* parse old ext_ie */ + inff_parse_extension_ies(mgmt_ie_buf, *mgmt_ie_len, &old_ext_ies); + + /* make a command to delete old ie */ + for (i = 0; i < old_vndr_ies.count; i++) { + vndrie_info = &old_vndr_ies.ie_info[i]; + + inff_dbg(TRACE, "DEL ID : %d, Len: %d , OUI:%3ph\n", + vndrie_info->vndrie.id, + vndrie_info->vndrie.len, + vndrie_info->vndrie.oui); + + del_add_ie_buf_len = inff_vndr_ie(curr_ie_buf, pktflag, + vndrie_info->ie_ptr, + vndrie_info->ie_len, + "del"); + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + /* make a command to delete old extension ie */ + for (i = 0; i < old_ext_ies.count; i++) { + extie_info = &old_ext_ies.ie_info[i]; + + inff_dbg(TRACE, "DEL EXT_IE : %d, Len: %d , ext_id:%d\n", + extie_info->ie_data.id, + extie_info->ie_data.len, + extie_info->ie_data.ext_id); + + del_add_ie_buf_len = inff_vndr_ie(curr_ie_buf, + pktflag | INFF_VNDR_IE_CUSTOM_FLAG, + extie_info->ie_ptr, + extie_info->ie_len, + "del"); + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + + *mgmt_ie_len = 0; + /* Add if there is any extra IE */ + if (mgmt_ie_buf && parsed_ie_buf_len) { + ptr = mgmt_ie_buf; + + remained_buf_len = mgmt_ie_buf_len; + + /* make a command to add new ie */ + for (i = 0; i < new_vndr_ies.count; i++) { + vndrie_info = &new_vndr_ies.ie_info[i]; + + /* verify remained buf size before copy data */ + if (remained_buf_len < (vndrie_info->vndrie.len + + VNDR_IE_VSIE_OFFSET)) { + iphy_err(drvr, "no space in mgmt_ie_buf: len left %d", + remained_buf_len); + break; + } + remained_buf_len -= (vndrie_info->ie_len + + VNDR_IE_VSIE_OFFSET); + + inff_dbg(TRACE, "ADDED ID : %d, Len: %d, OUI:%3ph\n", + vndrie_info->vndrie.id, + vndrie_info->vndrie.len, + vndrie_info->vndrie.oui); + + del_add_ie_buf_len = inff_vndr_ie(curr_ie_buf, pktflag, + vndrie_info->ie_ptr, + vndrie_info->ie_len, + "add"); + + /* save the parsed IE in wl struct */ + memcpy(ptr + (*mgmt_ie_len), vndrie_info->ie_ptr, + vndrie_info->ie_len); + *mgmt_ie_len += vndrie_info->ie_len; + + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + /* make a command to add new EXT ie */ + for (i = 0; i < new_ext_ies.count; i++) { + extie_info = &new_ext_ies.ie_info[i]; + + /* verify remained buf size before copy data */ + if (remained_buf_len < (extie_info->ie_data.len + + VNDR_IE_VSIE_OFFSET)) { + iphy_err(drvr, "no space in mgmt_ie_buf: len left %d", + remained_buf_len); + break; + } + remained_buf_len -= (extie_info->ie_len + + VNDR_IE_VSIE_OFFSET); + + inff_dbg(TRACE, "ADDED EXT ID : %d, Len: %d, OUI:%d\n", + extie_info->ie_data.id, + extie_info->ie_data.len, + extie_info->ie_data.ext_id); + + del_add_ie_buf_len = inff_vndr_ie(curr_ie_buf, + pktflag | INFF_VNDR_IE_CUSTOM_FLAG, + extie_info->ie_ptr, + extie_info->ie_len, + "add"); + + /* save the parsed IE in wl struct */ + memcpy(ptr + (*mgmt_ie_len), extie_info->ie_ptr, + extie_info->ie_len); + *mgmt_ie_len += extie_info->ie_len; + + curr_ie_buf += del_add_ie_buf_len; + total_ie_buf_len += del_add_ie_buf_len; + } + } + if (total_ie_buf_len) { + err = inff_fil_bsscfg_data_set(ifp, "vndr_ie", iovar_ie_buf, + total_ie_buf_len); + if (err) + iphy_err(drvr, "vndr ie set error : %d\n", err); + } + +exit: + kfree(iovar_ie_buf); + return err; +} + +s32 inff_vif_clear_mgmt_ies(struct inff_cfg80211_vif *vif) +{ + static const s32 pktflags[] = { + INFF_VNDR_IE_PRBRSP_FLAG, + INFF_VNDR_IE_BEACON_FLAG, + INFF_VNDR_IE_ASSOCRSP_FLAG + }; + int i; + + if (vif->wdev.iftype == NL80211_IFTYPE_AP) + inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_ASSOCRSP_FLAG, NULL, 0); + else + inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_PRBREQ_FLAG, NULL, 0); + + for (i = 0; i < ARRAY_SIZE(pktflags); i++) + inff_vif_set_mgmt_ie(vif, pktflags[i], NULL, 0); + + memset(&vif->saved_ie, 0, sizeof(vif->saved_ie)); + return 0; +} + +/* Is any of the tlvs the expected entry? If + * not update the tlvs buffer pointer/length. + */ +bool +wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, + const u8 *oui, u32 oui_len, u8 type) +{ + /* If the contents match the OUI and the type */ + if (ie[TLV_LEN_OFF] >= oui_len + 1 && + !memcmp(&ie[TLV_BODY_OFF], oui, oui_len) && + type == ie[TLV_BODY_OFF + oui_len]) { + return true; + } + + if (!tlvs) + return false; + /* point to the next ie */ + ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; + /* calculate the length of the rest of the buffer */ + *tlvs_len -= (int)(ie - *tlvs); + /* update the pointer to the start of the buffer */ + *tlvs = ie; + + return false; +} diff --git a/drivers/net/wireless/infineon/inffmac/ie.h b/drivers/net/wireless/infineon/inffmac/ie.h new file mode 100644 index 000000000000..6716eb81eba0 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/ie.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_IE_H +#define INFF_IE_H + +#define DOT11_MGMT_HDR_LEN 24 /* d11 management header len */ +#define DOT11_BCN_PRB_FIXED_LEN 12 /* beacon/probe fixed length */ + +#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ +#define WPA_OUI_TYPE 1 +#define RSN_OUI "\x00\x0F\xAC" /* RSN OUI */ +#define WME_OUI_TYPE 2 +#define WPS_OUI_TYPE 4 +#define WFA_OUI_TYPE_MBO_OCE 0x16 + +#define VS_IE_FIXED_HDR_LEN 6 +#define WPA_IE_VERSION_LEN 2 +#define WPA_IE_MIN_OUI_LEN 4 +#define WPA_IE_SUITE_COUNT_LEN 2 + +#define VNDR_IE_CMD_LEN 4 /* length of the set command + * string :"add", "del" (+ NUL) + */ +#define VNDR_IE_COUNT_OFFSET 4 +#define VNDR_IE_PKTFLAG_OFFSET 8 +#define VNDR_IE_VSIE_OFFSET 12 +#define VNDR_IE_HDR_SIZE 12 +#define VNDR_IE_PARSE_LIMIT 5 + +/* flags */ +#define INFF_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */ + +struct dot11_assoc_req { + u16 capability; /* capability information */ + u16 listen; /* listen interval */ +}; + +struct dot11_reassoc_req { + u16 capability; /* capability information */ + u16 listen; /* listen interval */ + u8 ap[ETH_ALEN]; /* Current AP address */ +}; + +struct dot11_assoc_resp { + u16 capability; /* capability information */ + u16 status; /* status code */ + u16 aid; /* association ID */ +}; + +/** + * struct inff_tlv - tag_ID/length/value_buffer tuple. + * + * @id: tag identifier. + * @len: number of bytes in value buffer. + * @data: value buffer. + */ +struct inff_tlv { + u8 id; + u8 len; + u8 data[]; +}; + +/* Vendor specific ie. id = 221, oui and type defines exact ie */ +struct inff_vs_tlv { + u8 id; + u8 len; + u8 oui[3]; + u8 oui_type; +}; + +struct parsed_vndr_ie_info { + u8 *ie_ptr; + u32 ie_len; /* total length including id & length field */ + struct inff_vs_tlv vndrie; +}; + +struct parsed_vndr_ies { + u32 count; + struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; +}; + +struct inff_ext_tlv { + u8 id; + u8 len; + u8 ext_id; +}; + +struct parsed_ext_ie_info { + u8 *ie_ptr; + u32 ie_len; /* total length including id & length field */ + struct inff_ext_tlv ie_data; +}; + +struct parsed_extension_ies { + u32 count; + struct parsed_ext_ie_info ie_info[VNDR_IE_PARSE_LIMIT]; +}; + +struct ie_info { + u32 pktflag; /* bitmask indicating which packet(s) contain this IE */ + struct inff_tlv ie_data; /* IE data */ +} __packed; + +struct ie_buf { + s32 iecount; /* number of entries in the ie_list[] array */ + struct ie_info ie_list[1]; /* variable size list of ie_info_t structs */ +} __packed; + +struct ie_set_buffer { + char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */ + struct ie_buf ie_buffer; /* buffer containing IE list information */ +} __packed; + +/* Check whether the given IE looks like WFA OCE IE. */ +#define wl_cfgoce_is_oce_ie(ie, tlvs, len) \ + wl_cfgoce_has_ie(ie, tlvs, len, \ + (const u8 *)WFA_OUI, TLV_OUI_LEN, WFA_OUI_TYPE_MBO_OCE) + +bool +wl_cfgoce_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, + const u8 *oui, u32 oui_len, u8 type); + +const struct inff_tlv *inff_parse_tlvs(const void *buf, int buflen, uint key); + +bool inff_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len, + const u8 *oui, u32 oui_len, u8 type); + +struct inff_vs_tlv *inff_find_wpaie(const u8 *parse, u32 len); + +struct inff_vs_tlv *inff_find_wpsie(const u8 *parse, u32 len); + +struct inff_tlv *inff_find_iwie(const u8 *parse, u32 len); + +s32 inff_clear_iwie(struct inff_cfg80211_info *cfg, struct inff_if *ifp); + +s32 inff_add_iwie(struct inff_cfg80211_info *cfg, struct inff_if *ifp, + s32 pktflag, u8 ie_id, u8 *data, u8 data_len); + +bool inff_valid_wpa_oui(u8 *oui, bool is_rsn_ie); +bool inff_valid_dpp_suite(u8 *oui); + +s32 inff_parse_vndr_ies(const u8 *vndr_ie_buf, u32 vndr_ie_len, struct parsed_vndr_ies *vndr_ies); + +u32 inff_vndr_ie(u8 *iebuf, s32 pktflag, u8 *ie_ptr, u32 ie_len, s8 *add_del_cmd); + +s32 inff_parse_extension_ies(const u8 *extension_ie_buf, u32 extension_ie_len, + struct parsed_extension_ies *extension_ies); + +s32 inff_get_assoc_ies(struct inff_cfg80211_info *cfg, struct inff_if *ifp); + +void inff_clear_assoc_ies(struct inff_cfg80211_info *cfg); + +s32 inff_vif_set_mgmt_ie(struct inff_cfg80211_vif *vif, s32 pktflag, + const u8 *vndr_ie_buf, u32 vndr_ie_len); +s32 inff_vif_clear_mgmt_ies(struct inff_cfg80211_vif *vif); +s32 inff_config_ap_mgmt_ie(struct inff_cfg80211_vif *vif, struct cfg80211_beacon_data *beacon); + +void inff_clear_assoc_req_ie(struct inff_cfg80211_info *cfg); +void inff_clear_assoc_resp_ie(struct inff_cfg80211_info *cfg); + +#endif /* INFF_IE_H */ -- 2.25.1 Driver imeplementation of the SCAN functionality to discover all ctive BSS in the medium. Also support collecting the channel survery dump. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/scan.c | 1025 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/scan.h | 127 +++ 2 files changed, 1152 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/scan.c create mode 100644 drivers/net/wireless/infineon/inffmac/scan.h diff --git a/drivers/net/wireless/infineon/inffmac/scan.c b/drivers/net/wireless/infineon/inffmac/scan.c new file mode 100644 index 000000000000..5a1d8f1fc8ed --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/scan.c @@ -0,0 +1,1025 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "chanspec.h" +#include "hw_ids.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fwil_types.h" +#include "p2p.h" +#include "btcoex.h" +#include "pno.h" +#include "fwsignal.h" +#include "cfg80211.h" +#include "feature.h" +#include "fwil.h" +#include "proto.h" +#include "vendor.h" +#include "vendor_inf.h" +#include "bus.h" +#include "common.h" +#include "he.h" +#include "eht.h" +#include "twt.h" +#include "offload.h" +#include "pmsr.h" +#include "security.h" + +static int inff_parse_dump_obss(char *buf, struct inff_dump_survey *survey) +{ + int i; + char *token; + char delim[] = "\n "; + unsigned long val; + int err = 0; + + token = strsep(&buf, delim); + while (token) { + if (!strcmp(token, "OBSS")) { + for (i = 0; i < OBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->obss = val; + } + + if (!strcmp(token, "IBSS")) { + for (i = 0; i < IBSS_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->ibss = val; + } + + if (!strcmp(token, "TXDur")) { + for (i = 0; i < TX_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->tx = val; + } + + if (!strcmp(token, "Category")) { + for (i = 0; i < CTG_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->no_ctg = val; + } + + if (!strcmp(token, "Packet")) { + for (i = 0; i < PKT_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->no_pckt = val; + } + + if (!strcmp(token, "Opp(time):")) { + for (i = 0; i < IDLE_TOKEN_IDX; i++) + token = strsep(&buf, delim); + err = kstrtoul(token, 10, &val); + if (err) + break; + survey->idle = val; + } + + token = strsep(&buf, delim); + } + + return err; +} + +static int inff_dump_obss(struct inff_if *ifp, struct cca_msrmnt_query req, + struct inff_dump_survey *survey) +{ + struct cca_stats_n_flags *results; + char *buf; + int err; + + buf = kzalloc(sizeof(char) * INFF_DCMD_MEDLEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, &req, sizeof(struct cca_msrmnt_query)); + err = inff_fil_iovar_data_get(ifp, "dump_obss", + buf, INFF_DCMD_MEDLEN); + if (err) { + inff_err("dump_obss error (%d)\n", err); + err = -EINVAL; + goto exit; + } + results = (struct cca_stats_n_flags *)(buf); + + if (req.msrmnt_query) + inff_parse_dump_obss(results->buf, survey); + +exit: + kfree(buf); + return err; +} + +int +inff_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct inff_dump_survey survey = {}; + struct ieee80211_supported_band *band; + enum nl80211_band band_id; + struct cca_msrmnt_query req; + u32 noise; + int err; + + inff_dbg(TRACE, "Enter: channel idx=%d\n", idx); + + if (!inff_is_apmode(ifp->vif)) + return -ENOENT; + + /* Do not run survey when VIF in CONNECTING / CONNECTED states */ + if ((test_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state)) || + (test_bit(INFF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))) { + return -EBUSY; + } + + for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) { + /* FIXME SWLINUX-4979, the firmware cannot report the survey info of 6GHz, + * once the firmware can report it in someday. + * This condition can be removed. + */ + if (band_id == NL80211_BAND_6GHZ) + continue; + + band = wiphy->bands[band_id]; + if (!band) + continue; + if (idx >= band->n_channels) { + idx -= band->n_channels; + continue; + } + + info->channel = &band->channels[idx]; + break; + } + if (band_id == NUM_NL80211_BANDS) + return -ENOENT; + + /* Setting current channel to the requested channel */ + info->filled = 0; + if (inff_set_channel(cfg, info->channel)) + return 0; + + /* Disable mpc */ + inff_set_mpc(ifp, 0); + + /* Set interface up, explicitly. */ + err = inff_fil_cmd_int_set(ifp, INFF_C_UP, 1); + if (err) { + inff_err("set interface up failed, err = %d\n", err); + goto exit; + } + + /* Get noise value */ + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_PHY_NOISE, &noise); + if (err) { + inff_err("Get Phy Noise failed, use dummy value\n"); + noise = CHAN_NOISE_DUMMY; + } + + /* Start Measurement for obss stats on current channel */ + req.msrmnt_query = 0; + req.time_req = ACS_MSRMNT_DELAY; + err = inff_dump_obss(ifp, req, &survey); + if (err) + goto exit; + + /* Add 10 ms for IOVAR completion */ + msleep(ACS_MSRMNT_DELAY + 10); + + /* Issue IOVAR to collect measurement results */ + req.msrmnt_query = 1; + err = inff_dump_obss(ifp, req, &survey); + if (err) + goto exit; + + info->noise = noise; + info->time = ACS_MSRMNT_DELAY; + info->time_busy = ACS_MSRMNT_DELAY - survey.idle; + info->time_rx = survey.obss + survey.ibss + survey.no_ctg + + survey.no_pckt; + info->time_tx = survey.tx; + info->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX; + + inff_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n", + ieee80211_frequency_to_channel(info->channel->center_freq), + ACS_MSRMNT_DELAY); + inff_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n", + info->noise, info->time_busy, info->time_rx, info->time_tx); + +exit: + if (!inff_is_apmode(ifp->vif)) + inff_set_mpc(ifp, 1); + return err; +} + +int +inff_cfg80211_dump_survey_2(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct ieee80211_supported_band *band; + struct cca_survey_req *survey = NULL; + struct cca_survey *secs; + struct cfg80211_chan_def chandef; + struct wireless_dev *wdev; + enum nl80211_band band_id; + int err = 0; + u32 noise; + + inff_dbg(TRACE, "Enter: channel idx=%d\n", idx); + + /* Do not run survey when VIF in CONNECTING / CONNECTED states */ + if ((test_bit(INFF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state))) + return -EBUSY; + + for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) { + /* FIXME SWLINUX-4979, the firmware cannot report the survey info of 6GHz, + * once the firmware can report it in someday. + * This condition can be removed. + */ + if (band_id == NL80211_BAND_6GHZ) + continue; + + band = wiphy->bands[band_id]; + if (!band) + continue; + if (idx >= band->n_channels) { + idx -= band->n_channels; + continue; + } + + info->channel = &band->channels[idx]; + break; + } + if (band_id == NUM_NL80211_BANDS) + return -ENOENT; + + /* Set interface up, explicitly. */ + err = inff_fil_cmd_int_set(ifp, INFF_C_UP, 1); + if (err) { + inff_err("set interface up failed, err = %d\n", err); + return err; + } + + /* Get noise value */ + err = inff_fil_cmd_int_get(ifp, INFF_C_GET_PHY_NOISE, &noise); + if (err) { + inff_err("Get Phy Noise failed, use dummy value\n"); + noise = CHAN_NOISE_DUMMY; + } + + survey = kzalloc(sizeof(*survey), GFP_KERNEL); + if (!survey) + return -ENOMEM; + + survey->chanspec = channel_to_chanspec(&cfg->d11inf, info->channel); + err = inff_fil_iovar_data_get(ifp, "cca_survey_dump", + survey, sizeof(struct cca_survey_req)); + if (err) { + inff_err("cca_survey_dump error (%d)\n", err); + err = -EINVAL; + goto exit; + } + + secs = &survey->secs[0]; + + info->noise = noise; + info->time = secs->usecs; + info->time_busy = secs->ibss + secs->txdur + secs->obss + + secs->noctg + secs->nopkt; + info->time_rx = secs->obss + secs->ibss + secs->noctg + + secs->nopkt; + info->time_tx = secs->txdur; + info->time_bss_rx = secs->ibss; + + info->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_BSS_RX; + + wdev = ndev->ieee80211_ptr; + memset(&chandef, 0, sizeof(chandef)); + err = inff_cfg80211_get_channel(wiphy, wdev, 0, &chandef); + if (unlikely(err)) { + inff_err("Get chandef error: %d\n", err); + err = -EINVAL; + goto exit; + } + if (chandef.chan->center_freq == info->channel->center_freq) + info->filled = info->filled | SURVEY_INFO_IN_USE; + + inff_dbg(INFO, "survey dump: channel %d: survey duration %llu\n", + ieee80211_frequency_to_channel(info->channel->center_freq), + info->time); + inff_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n", + info->noise, info->time_busy, info->time_rx, info->time_tx); + +exit: + kfree(survey); + return err; +} + +s32 +inff_run_escan(struct inff_cfg80211_info *cfg, struct inff_if *ifp, + struct cfg80211_scan_request *request) +{ + s32 params_size = INFF_SCAN_PARAMS_V2_FIXED_SIZE + + offsetof(struct inff_escan_params_le, params_v2_le); + struct inff_escan_params_le *params; + s32 err = 0; + + inff_dbg(SCAN, "E-SCAN START\n"); + + if (request) { + /* Allocate space for populating ssids in struct */ + params_size += sizeof(u32) * ((request->n_channels + 1) / 2); + + /* Allocate space for populating ssids in struct */ + params_size += sizeof(struct inff_ssid_le) * request->n_ssids; + } + + params = kzalloc(params_size, GFP_KERNEL); + if (!params) { + err = -ENOMEM; + goto exit; + } + WARN_ON_ONCE(params_size + sizeof("escan") >= INFF_DCMD_MEDLEN); + inff_escan_prep(cfg, ¶ms->params_v2_le, request); + + params->version = cpu_to_le32(INFF_ESCAN_REQ_VERSION_V2); + + if (!inff_feat_is_enabled(ifp, INFF_FEAT_SCAN_V2)) { + struct inff_escan_params_le *params_v1; + + params_size -= INFF_SCAN_PARAMS_V2_FIXED_SIZE; + params_size += INFF_SCAN_PARAMS_FIXED_SIZE; + params_v1 = kzalloc(params_size, GFP_KERNEL); + if (!params_v1) { + err = -ENOMEM; + goto exit_params; + } + params_v1->version = cpu_to_le32(INFF_ESCAN_REQ_VERSION); + inff_scan_params_v2_to_v1(¶ms->params_v2_le, ¶ms_v1->params_le); + kfree(params); + params = params_v1; + } + + params->action = cpu_to_le16(WL_ESCAN_ACTION_START); + params->sync_id = cpu_to_le16(0x1234); + + err = inff_fil_iovar_data_set(ifp, "escan", params, params_size); + if (err) { + if (err == -EBUSY) + inff_dbg(INFO, "system busy : escan canceled\n"); + else + iphy_err(ifp->drvr, "error (%d)\n", err); + } + +exit_params: + kfree(params); +exit: + return err; +} + +static s32 +inff_do_escan(struct inff_if *ifp, struct cfg80211_scan_request *request) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + s32 err; + struct inff_scan_results *results; + struct escan_info *escan = &cfg->escan_info; + + inff_dbg(SCAN, "Enter\n"); + escan->ifp = ifp; + escan->wiphy = cfg->wiphy; + escan->escan_state = WL_ESCAN_STATE_SCANNING; + + inff_scan_config_mpc(ifp, 0); + results = (struct inff_scan_results *)cfg->escan_info.escan_buf; + results->version = 0; + results->count = 0; + results->buflen = WL_ESCAN_RESULTS_FIXED_SIZE; + + err = escan->run(cfg, ifp, request); + if (err) + inff_scan_config_mpc(ifp, 1); + return err; +} + +int inff_start_internal_escan(struct inff_if *ifp, u32 fwmap, + struct cfg80211_scan_request *request) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + int err; + + if (test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) { + if (cfg->int_escan_map) + inff_dbg(SCAN, "aborting internal scan: map=%u\n", + cfg->int_escan_map); + /* Abort any on-going scan */ + inff_abort_scanning(cfg); + } + + inff_dbg(SCAN, "start internal scan: map=%u\n", fwmap); + set_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status); + cfg->escan_info.run = inff_run_escan; + err = inff_do_escan(ifp, request); + if (err) { + clear_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status); + return err; + } + cfg->int_escan_map = fwmap; + return 0; +} + +void inff_abort_scanning(struct inff_cfg80211_info *cfg) +{ + struct escan_info *escan = &cfg->escan_info; + + set_bit(INFF_SCAN_STATUS_ABORT, &cfg->scan_status); + if (cfg->int_escan_map || cfg->scan_request) { + escan->escan_state = WL_ESCAN_STATE_IDLE; + inff_notify_escan_complete(cfg, escan->ifp, true, true); + } + clear_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status); + clear_bit(INFF_SCAN_STATUS_ABORT, &cfg->scan_status); +} + +static void inff_cfg80211_escan_timeout_worker(struct work_struct *work) +{ + struct inff_cfg80211_info *cfg = + container_of(work, struct inff_cfg80211_info, + escan_timeout_work); + + if (!cfg) + return; + + inff_inform_bss(cfg); + inff_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true); +} + +void inff_escan_prep(struct inff_cfg80211_info *cfg, + struct inff_scan_params_v2_le *params_le, + struct cfg80211_scan_request *request) +{ + u32 n_ssids; + u32 n_channels; + s32 i; + s32 offset; + u16 chanspec; + char *ptr; + int length; + struct inff_ssid_le ssid_le; + + eth_broadcast_addr(params_le->bssid); + + length = INFF_SCAN_PARAMS_V2_FIXED_SIZE; + + params_le->version = cpu_to_le16(INFF_SCAN_PARAMS_VERSION_V2); + params_le->bss_type = DOT11_BSSTYPE_ANY; + params_le->scan_type = cpu_to_le32(INFF_SCANTYPE_ACTIVE); + params_le->channel_num = 0; + params_le->nprobes = cpu_to_le32(-1); + params_le->active_time = cpu_to_le32(-1); + params_le->passive_time = cpu_to_le32(-1); + params_le->home_time = cpu_to_le32(-1); + memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); + + /* Scan abort */ + if (!request) { + length += sizeof(u16); + params_le->channel_num = cpu_to_le32(1); + params_le->channel_list[0] = cpu_to_le16(-1); + params_le->length = cpu_to_le16(length); + return; + } + + n_ssids = request->n_ssids; + n_channels = request->n_channels; + + /* Copy channel array if applicable */ + inff_dbg(SCAN, "### List of channelspecs to scan ### %d\n", + n_channels); + if (n_channels > 0) { + length += roundup(sizeof(u16) * n_channels, sizeof(u32)); + for (i = 0; i < n_channels; i++) { + chanspec = channel_to_chanspec(&cfg->d11inf, + request->channels[i]); + inff_dbg(SCAN, "Chan : %d, Channel spec: %x\n", + request->channels[i]->hw_value, chanspec); + params_le->channel_list[i] = cpu_to_le16(chanspec); + } + } else { + inff_dbg(SCAN, "Scanning all channels\n"); + } + + /* Copy ssid array if applicable */ + inff_dbg(SCAN, "### List of SSIDs to scan ### %d\n", n_ssids); + if (n_ssids > 0) { + offset = offsetof(struct inff_scan_params_v2_le, channel_list) + + n_channels * sizeof(u16); + offset = roundup(offset, sizeof(u32)); + length += sizeof(ssid_le) * n_ssids; + ptr = (char *)params_le + offset; + for (i = 0; i < n_ssids; i++) { + memset(&ssid_le, 0, sizeof(ssid_le)); + ssid_le.SSID_len = + cpu_to_le32(request->ssids[i].ssid_len); + memcpy(ssid_le.SSID, request->ssids[i].ssid, + request->ssids[i].ssid_len); + if (!ssid_le.SSID_len) + inff_dbg(SCAN, "%d: Broadcast scan\n", i); + else + inff_dbg(SCAN, "%d: scan for %.32s size=%d\n", + i, ssid_le.SSID, ssid_le.SSID_len); + memcpy(ptr, &ssid_le, sizeof(ssid_le)); + ptr += sizeof(ssid_le); + } + } else { + inff_dbg(SCAN, "Performing passive scan\n"); + params_le->scan_type = cpu_to_le32(INFF_SCANTYPE_PASSIVE); + } + params_le->length = cpu_to_le16(length); + /* Adding mask to channel numbers */ + params_le->channel_num = + cpu_to_le32((n_ssids << INFF_SCAN_PARAMS_NSSID_SHIFT) | + (n_channels & INFF_SCAN_PARAMS_COUNT_MASK)); +} + +s32 +inff_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_vif *vif; + s32 err = 0; + struct inff_tlv *interworking_ie = NULL; + + inff_dbg(TRACE, "Enter\n"); + vif = container_of(request->wdev, struct inff_cfg80211_vif, wdev); + if (!check_vif_up(vif)) + return -EIO; + + if (test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) { + iphy_err(drvr, "Scanning already: status (%lu)\n", + cfg->scan_status); + return -EAGAIN; + } + if (test_bit(INFF_SCAN_STATUS_ABORT, &cfg->scan_status)) { + iphy_err(drvr, "Scanning being aborted: status (%lu)\n", + cfg->scan_status); + return -EAGAIN; + } + if (test_bit(INFF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) { + iphy_err(drvr, "Scanning suppressed: status (%lu)\n", + cfg->scan_status); + return -EAGAIN; + } + if (test_bit(INFF_VIF_STATUS_CONNECTING, &vif->sme_state)) { + iphy_err(drvr, "Connecting: status (%lu)\n", vif->sme_state); + return -EAGAIN; + } + + inff_dbg(SCAN, "START ESCAN\n"); + + cfg->scan_request = request; + set_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status); + + interworking_ie = inff_find_iwie(request->ie, request->ie_len); + if (interworking_ie) { + err = inff_add_iwie(cfg, vif->ifp, + INFF_VNDR_IE_CUSTOM_FLAG, + interworking_ie->id, + interworking_ie->data, + interworking_ie->len); + if (err) + inff_err("Failed to add interworking IE"); + } else { + /* we have to clear IW IE */ + inff_clear_iwie(cfg, vif->ifp); + } + + cfg->escan_info.run = inff_run_escan; + err = inff_p2p_scan_prep(wiphy, request, vif); + if (err) + goto scan_out; + + /* If scan req comes for p2p0, send it over primary I/F */ + if (vif == cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + vif = cfg->p2p.bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + + err = inff_do_escan(vif->ifp, request); + if (err) + goto scan_out; + + /* Arm scan timeout timer */ + mod_timer(&cfg->escan_timeout, + jiffies + msecs_to_jiffies(INFF_ESCAN_TIMER_INTERVAL_MS)); + + return 0; + +scan_out: + iphy_err(drvr, "scan error (%d)\n", err); + clear_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status); + cfg->scan_request = NULL; + return err; +} + +static s32 +inff_cfg80211_escan_handler(struct inff_if *ifp, + const struct inff_event_msg *e, void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_info *cfg = drvr->config; + s32 status; + struct inff_escan_result_le *escan_result_le; + u32 escan_buflen; + struct inff_bss_info_le *bss_info_le; + struct inff_bss_info_le *bss = NULL; + u32 bi_length; + struct inff_scan_results *list; + u32 i; + bool aborted; + + status = e->status; + + if (status == INFF_E_STATUS_ABORT) + goto exit; + + if (!test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) { + iphy_err(drvr, "scan not ready, bsscfgidx=%d\n", + ifp->bsscfgidx); + return -EPERM; + } + + if (status == INFF_E_STATUS_PARTIAL) { + inff_dbg(SCAN, "ESCAN Partial result\n"); + if (e->datalen < sizeof(*escan_result_le)) { + iphy_err(drvr, "invalid event data length\n"); + goto exit; + } + escan_result_le = (struct inff_escan_result_le *)data; + if (!escan_result_le) { + iphy_err(drvr, "Invalid escan result (NULL pointer)\n"); + goto exit; + } + escan_buflen = le32_to_cpu(escan_result_le->buflen); + if (escan_buflen > INFF_ESCAN_BUF_SIZE || + escan_buflen > e->datalen || + escan_buflen < sizeof(*escan_result_le)) { + iphy_err(drvr, "Invalid escan buffer length: %d\n", + escan_buflen); + goto exit; + } + if (le16_to_cpu(escan_result_le->bss_count) != 1) { + iphy_err(drvr, "Invalid bss_count %d: ignoring\n", + escan_result_le->bss_count); + goto exit; + } + bss_info_le = &escan_result_le->bss_info_le; + + if (inff_p2p_scan_finding_common_channel(cfg, bss_info_le)) + goto exit; + + if (!cfg->int_escan_map && !cfg->scan_request) { + inff_dbg(SCAN, "result without cfg80211 request\n"); + goto exit; + } + + bi_length = le32_to_cpu(bss_info_le->length); + if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) { + iphy_err(drvr, "Ignoring invalid bss_info length: %d\n", + bi_length); + goto exit; + } + + if (!(cfg_to_wiphy(cfg)->interface_modes & + BIT(NL80211_IFTYPE_ADHOC))) { + if (le16_to_cpu(bss_info_le->capability) & + WLAN_CAPABILITY_IBSS) { + iphy_err(drvr, "Ignoring IBSS result\n"); + goto exit; + } + } + + list = (struct inff_scan_results *) + cfg->escan_info.escan_buf; + if (bi_length > INFF_ESCAN_BUF_SIZE - list->buflen) { + iphy_err(drvr, "Buffer is too small: ignoring\n"); + goto exit; + } + + for (i = 0; i < list->count; i++) { + bss = bss ? (struct inff_bss_info_le *) + ((unsigned char *)bss + + le32_to_cpu(bss->length)) : list->bss_info_le; + if (inff_compare_update_same_bss(cfg, bss, + bss_info_le)) + goto exit; + } + memcpy(&cfg->escan_info.escan_buf[list->buflen], bss_info_le, + bi_length); + list->version = le32_to_cpu(bss_info_le->version); + list->buflen += bi_length; + list->count++; + } else { + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + if (inff_p2p_scan_finding_common_channel(cfg, NULL)) + goto exit; + if (cfg->int_escan_map || cfg->scan_request) { + inff_inform_bss(cfg); + aborted = status != INFF_E_STATUS_SUCCESS; + inff_notify_escan_complete(cfg, ifp, aborted, false); + } else { + inff_dbg(SCAN, "Ignored scan complete result 0x%x\n", + status); + } + } +exit: + return 0; +} + +static void inff_escan_timeout(struct timer_list *t) +{ + struct inff_cfg80211_info *cfg = + timer_container_of(cfg, t, escan_timeout); + struct inff_pub *drvr = cfg->pub; + + if (cfg->int_escan_map || cfg->scan_request) { + iphy_err(drvr, "timer expired\n"); + schedule_work(&cfg->escan_timeout_work); + } +} + +void inff_init_escan(struct inff_cfg80211_info *cfg) +{ + inff_fweh_register(cfg->pub, INFF_E_ESCAN_RESULT, + inff_cfg80211_escan_handler); + cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE; + /* Init scan_timeout timer */ + timer_setup(&cfg->escan_timeout, inff_escan_timeout, 0); + INIT_WORK(&cfg->escan_timeout_work, + inff_cfg80211_escan_timeout_worker); +} + +void inff_scan_params_v2_to_v1(struct inff_scan_params_v2_le *params_v2_le, + struct inff_scan_params_le *params_le) +{ + size_t params_size; + u32 ch; + int n_channels, n_ssids; + + memcpy(¶ms_le->ssid_le, ¶ms_v2_le->ssid_le, + sizeof(params_le->ssid_le)); + memcpy(¶ms_le->bssid, ¶ms_v2_le->bssid, + sizeof(params_le->bssid)); + + params_le->bss_type = params_v2_le->bss_type; + params_le->scan_type = le32_to_cpu(params_v2_le->scan_type); + params_le->nprobes = params_v2_le->nprobes; + params_le->active_time = params_v2_le->active_time; + params_le->passive_time = params_v2_le->passive_time; + params_le->home_time = params_v2_le->home_time; + params_le->channel_num = params_v2_le->channel_num; + + ch = le32_to_cpu(params_v2_le->channel_num); + n_channels = ch & INFF_SCAN_PARAMS_COUNT_MASK; + n_ssids = ch >> INFF_SCAN_PARAMS_NSSID_SHIFT; + + params_size = sizeof(u16) * n_channels; + if (n_ssids > 0) { + params_size = roundup(params_size, sizeof(u32)); + params_size += sizeof(struct inff_ssid_le) * n_ssids; + } + + memcpy(¶ms_le->channel_list[0], + ¶ms_v2_le->channel_list[0], params_size); +} + +int +inff_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_sched_scan_request *req) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + + inff_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n", + req->n_match_sets, req->n_ssids); + + if (test_bit(INFF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) { + iphy_err(drvr, "Scanning suppressed: status=%lu\n", + cfg->scan_status); + return -EAGAIN; + } + + if (req->n_match_sets <= 0) { + inff_dbg(SCAN, "invalid number of matchsets specified: %d\n", + req->n_match_sets); + return -EINVAL; + } + + return inff_pno_start_sched_scan(ifp, req); +} + +int inff_cfg80211_sched_scan_stop(struct wiphy *wiphy, + struct net_device *ndev, u64 reqid) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(ndev); + + inff_dbg(SCAN, "enter\n"); + inff_pno_stop_sched_scan(ifp, reqid); + if (cfg->int_escan_map) + inff_notify_escan_complete(cfg, ifp, true, true); + return 0; +} + +struct cfg80211_scan_request * +inff_alloc_internal_escan_request(struct wiphy *wiphy, u32 n_netinfo) +{ + struct cfg80211_scan_request *req; + size_t req_size; + size_t size_sanity = ~0; + + if (n_netinfo > ((size_sanity - sizeof(*req)) / + (sizeof(req->channels[0]) + sizeof(*req->ssids)))) { + inff_err("requesting a huge count:%d\n", n_netinfo); + return NULL; + } + + req_size = sizeof(*req) + + n_netinfo * sizeof(req->channels[0]) + + n_netinfo * sizeof(*req->ssids); + + req = kzalloc(req_size, GFP_KERNEL); + if (req) { + req->wiphy = wiphy; + req->ssids = (void *)(&req->channels[0]) + + n_netinfo * sizeof(req->channels[0]); + } + return req; +} + +void inff_scan_config_mpc(struct inff_if *ifp, int mpc) +{ + if (inff_feat_is_quirk_enabled(ifp, INFF_FEAT_QUIRK_NEED_MPC)) + inff_set_mpc(ifp, mpc); +} + +s32 inff_notify_escan_complete(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, bool aborted, + bool fw_abort) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_scan_params_v2_le params_v2_le; + struct cfg80211_scan_request *scan_request; + u64 reqid; + u32 bucket; + s32 err = 0; + + inff_dbg(SCAN, "Enter\n"); + + /* clear scan request, because the FW abort can cause a second call */ + /* to this functon and might cause a double cfg80211_scan_done */ + scan_request = cfg->scan_request; + cfg->scan_request = NULL; + + timer_delete_sync(&cfg->escan_timeout); + + if (fw_abort) { + /* Do a scan abort to stop the driver's scan engine */ + inff_dbg(SCAN, "ABORT scan in firmware\n"); + + inff_escan_prep(cfg, ¶ms_v2_le, NULL); + + /* E-Scan (or anyother type) can be aborted by SCAN */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_SCAN_V2)) { + err = inff_fil_cmd_data_set(ifp, INFF_C_SCAN, + ¶ms_v2_le, + sizeof(params_v2_le)); + } else { + struct inff_scan_params_le params_le; + + inff_scan_params_v2_to_v1(¶ms_v2_le, ¶ms_le); + err = inff_fil_cmd_data_set(ifp, INFF_C_SCAN, + ¶ms_le, + sizeof(params_le)); + } + + if (err) + iphy_err(drvr, "Scan abort failed\n"); + } + + inff_scan_config_mpc(ifp, 1); + + /* + * e-scan can be initiated internally + * which takes precedence. + */ + if (cfg->int_escan_map) { + inff_dbg(SCAN, "scheduled scan completed (%x)\n", + cfg->int_escan_map); + while (cfg->int_escan_map) { + bucket = __ffs(cfg->int_escan_map); + cfg->int_escan_map &= ~BIT(bucket); + reqid = inff_pno_find_reqid_by_bucket(cfg->pno, + bucket); + if (!aborted) { + inff_dbg(SCAN, "report results: reqid=%llu\n", + reqid); + cfg80211_sched_scan_results(cfg_to_wiphy(cfg), + reqid); + } + } + } else if (scan_request) { + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + + inff_dbg(SCAN, "ESCAN Completed scan: %s\n", + aborted ? "Aborted" : "Done"); + cfg80211_scan_done(scan_request, &info); + } + if (!test_and_clear_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) + inff_dbg(SCAN, "Scan complete, probably P2P scan\n"); + + return err; +} + +int inff_internal_escan_add_info(struct cfg80211_scan_request *req, + u8 *ssid, u8 ssid_len, u8 channel) +{ + struct ieee80211_channel *chan; + enum nl80211_band band; + int freq, i; + + if (channel <= CH_MAX_2G_CHANNEL) + band = NL80211_BAND_2GHZ; + else + band = NL80211_BAND_5GHZ; + + freq = ieee80211_channel_to_frequency(channel, band); + if (!freq) + return -EINVAL; + + chan = ieee80211_get_channel(req->wiphy, freq); + if (!chan) + return -EINVAL; + + for (i = 0; i < req->n_channels; i++) { + if (req->channels[i] == chan) + break; + } + if (i == req->n_channels) { + req->n_channels++; + req->channels[i] = chan; + } + + for (i = 0; i < req->n_ssids; i++) { + if (req->ssids[i].ssid_len == ssid_len && + !memcmp(req->ssids[i].ssid, ssid, ssid_len)) + break; + } + if (i == req->n_ssids) { + memcpy(req->ssids[req->n_ssids].ssid, ssid, ssid_len); + req->ssids[req->n_ssids++].ssid_len = ssid_len; + } + return 0; +} diff --git a/drivers/net/wireless/infineon/inffmac/scan.h b/drivers/net/wireless/infineon/inffmac/scan.h new file mode 100644 index 000000000000..b1805a43c845 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/scan.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_SCAN_H +#define INFF_SCAN_H + +#define INFF_SCAN_IE_LEN_MAX 2048 + +#define INFF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS 320 +#define INFF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS 400 +#define INFF_SCAN_JOIN_ACTIVE_DWELL_TIME_MS_6E 80 +#define INFF_SCAN_JOIN_PASSIVE_DWELL_TIME_MS_6E 130 +#define INFF_SCAN_JOIN_PROBE_INTERVAL_MS 20 + +#define INFF_SCAN_CHANNEL_TIME 40 +#define INFF_SCAN_UNASSOC_TIME 40 +#define INFF_SCAN_PASSIVE_TIME 120 + +/* Dump obss definitions */ +#define ACS_MSRMNT_DELAY 80 +#define CHAN_NOISE_DUMMY (-80) +#define OBSS_TOKEN_IDX 15 +#define IBSS_TOKEN_IDX 15 +#define TX_TOKEN_IDX 14 +#define CTG_TOKEN_IDX 13 +#define PKT_TOKEN_IDX 15 +#define IDLE_TOKEN_IDX 12 + +struct inff_dump_survey { + u32 obss; + u32 ibss; + u32 no_ctg; + u32 no_pckt; + u32 tx; + u32 idle; +}; + +struct cca_stats_n_flags { + u32 msrmnt_time; /* Time for Measurement (msec) */ + u32 msrmnt_done; /* flag set when measurement complete */ + char buf[]; +}; + +struct cca_msrmnt_query { + u32 msrmnt_query; + u32 time_req; +}; + +enum cca_survey_config { + CCA_SURVEY_READ_CLEAR = 1, /* previous data only */ + CCA_SURVEY_READ = 2, /* sum for each of data */ + CCA_SURVEY_RESET = 3, /* Set for each of data */ +}; + +struct cca_survey { + u32 usecs; /**< millisecs spent sampling this channel */ + u32 ibss; /**< millisecs spent ibss */ + u32 obss; /**< millisecs spent obss */ + u32 txdur; /**< millisecs spent txdur */ + u32 noctg; /**< millisecs spent 802.11 of unknown type */ + u32 nopkt; /**< millisecs spent non 802.11 */ + u32 PM; /**< usecs MAC spent in doze mode for PM */ +}; + +struct cca_survey_req { + u16 chanspec; /**< Which channel? */ + u16 num_secs; /**< How many secs worth of data */ + struct cca_survey secs[1]; /**< Data */ + enum cca_survey_config config; /**< enum */ +}; + +struct escan_info { + u32 escan_state; + u8 *escan_buf; + struct wiphy *wiphy; + struct inff_if *ifp; + s32 (*run)(struct inff_cfg80211_info *cfg, struct inff_if *ifp, + struct cfg80211_scan_request *request); +}; + +int inff_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info); + +int inff_cfg80211_dump_survey_2(struct wiphy *wiphy, struct net_device *ndev, + int idx, struct survey_info *info); + +s32 inff_run_escan(struct inff_cfg80211_info *cfg, struct inff_if *ifp, + struct cfg80211_scan_request *request); + +s32 inff_cfg80211_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request); + +int inff_start_internal_escan(struct inff_if *ifp, u32 fwmap, + struct cfg80211_scan_request *request); + +void inff_escan_prep(struct inff_cfg80211_info *cfg, + struct inff_scan_params_v2_le *params_le, + struct cfg80211_scan_request *request); + +void inff_init_escan(struct inff_cfg80211_info *cfg); + +int inff_cfg80211_sched_scan_stop(struct wiphy *wiphy, + struct net_device *ndev, u64 reqid); + +int inff_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *ndev, + struct cfg80211_sched_scan_request *req); + +void inff_scan_params_v2_to_v1(struct inff_scan_params_v2_le *params_v2_le, + struct inff_scan_params_le *params_le); + +struct cfg80211_scan_request *inff_alloc_internal_escan_request(struct wiphy *wiphy, u32 n_netinfo); + +int inff_internal_escan_add_info(struct cfg80211_scan_request *req, + u8 *ssid, u8 ssid_len, u8 channel); + +s32 inff_notify_escan_complete(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, bool aborted, + bool fw_abort); +void inff_scan_config_mpc(struct inff_if *ifp, int mpc); +void inff_abort_scanning(struct inff_cfg80211_info *cfg); + +#endif /* INFF_SCAN_H */ -- 2.25.1 Implementions the functions for creating, mananging and deleting various type of WLAN interfaces. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/interface.c | 523 ++++++++++++++++++ .../net/wireless/infineon/inffmac/interface.h | 80 +++ 2 files changed, 603 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/interface.c create mode 100644 drivers/net/wireless/infineon/inffmac/interface.h diff --git a/drivers/net/wireless/infineon/inffmac/interface.c b/drivers/net/wireless/infineon/inffmac/interface.c new file mode 100644 index 000000000000..9fa4a78dd000 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/interface.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "chanspec.h" +#include "hw_ids.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fwil_types.h" +#include "p2p.h" +#include "btcoex.h" +#include "pno.h" +#include "fwsignal.h" +#include "cfg80211.h" +#include "feature.h" +#include "fwil.h" +#include "proto.h" +#include "vendor.h" +#include "vendor_inf.h" +#include "bus.h" +#include "common.h" +#include "he.h" +#include "eht.h" +#include "twt.h" +#include "offload.h" +#include "pmsr.h" + +bool inff_is_apmode_operating(struct wiphy *wiphy) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif; + bool ret = false; + + list_for_each_entry(vif, &cfg->vif_list, list) { + if (inff_is_apmode(vif) && + test_bit(INFF_VIF_STATUS_AP_CREATED, &vif->sme_state)) + ret = true; + } + + return ret; +} + +bool inff_is_apmode(struct inff_cfg80211_vif *vif) +{ + enum nl80211_iftype iftype; + + iftype = vif->wdev.iftype; + return iftype == NL80211_IFTYPE_AP || iftype == NL80211_IFTYPE_P2P_GO; +} + +bool inff_is_ibssmode(struct inff_cfg80211_vif *vif) +{ + return vif->wdev.iftype == NL80211_IFTYPE_ADHOC; +} + +bool check_vif_up(struct inff_cfg80211_vif *vif) +{ + if (!test_bit(INFF_VIF_STATUS_READY, &vif->sme_state)) { + inff_dbg(INFO, "device is not ready : status (%lu)\n", + vif->sme_state); + return false; + } + return true; +} + +enum nl80211_iftype inff_cfg80211_get_iftype(struct inff_if *ifp) +{ + struct wireless_dev *wdev = &ifp->vif->wdev; + + return wdev->iftype; +} + +static int inff_get_first_free_bsscfgidx(struct inff_pub *drvr) +{ + int bsscfgidx; + + for (bsscfgidx = 0; bsscfgidx < INFF_MAX_IFS; bsscfgidx++) { + /* bsscfgidx 1 is reserved for legacy P2P */ + if (bsscfgidx == 1) + continue; + if (!drvr->iflist[bsscfgidx]) + return bsscfgidx; + } + + return -ENOMEM; +} + +static void inff_set_vif_sta_macaddr(struct inff_if *ifp, u8 *mac_addr) +{ + u8 mac_idx = ifp->drvr->sta_mac_idx; + + /* set difference MAC address with locally administered bit */ + memcpy(mac_addr, ifp->mac_addr, ETH_ALEN); + mac_addr[0] |= 0x02; + mac_addr[3] ^= mac_idx ? 0xC0 : 0xA0; + mac_idx++; + mac_idx = mac_idx % 2; + ifp->drvr->sta_mac_idx = mac_idx; +} + +static int inff_cfg80211_request_sta_if(struct inff_if *ifp, u8 *macaddr) +{ + struct wl_interface_create_v1 iface_v1; + struct wl_interface_create_v2 iface_v2; + struct wl_interface_create_v3 iface_v3; + u32 iface_create_ver; + int err; + + /* interface_create version 1 */ + memset(&iface_v1, 0, sizeof(iface_v1)); + iface_v1.ver = WL_INTERFACE_CREATE_VER_1; + iface_v1.flags = WL_INTERFACE_CREATE_STA | + WL_INTERFACE_MAC_USE; + if (!is_zero_ether_addr(macaddr)) + memcpy(iface_v1.mac_addr, macaddr, ETH_ALEN); + else + inff_set_vif_sta_macaddr(ifp, iface_v1.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v1, + sizeof(iface_v1)); + if (err) { + inff_dbg(INFO, "failed to create interface(v1), err=%d\n", + err); + } else { + inff_dbg(INFO, "interface created(v1)\n"); + return 0; + } + + /* interface_create version 2 */ + memset(&iface_v2, 0, sizeof(iface_v2)); + iface_v2.ver = WL_INTERFACE_CREATE_VER_2; + iface_v2.flags = WL_INTERFACE_MAC_USE; + iface_v2.iftype = WL_INTERFACE_CREATE_STA; + if (!is_zero_ether_addr(macaddr)) + memcpy(iface_v2.mac_addr, macaddr, ETH_ALEN); + else + inff_set_vif_sta_macaddr(ifp, iface_v2.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v2, + sizeof(iface_v2)); + if (err) { + inff_dbg(INFO, "failed to create interface(v2), err=%d\n", + err); + } else { + inff_dbg(INFO, "interface created(v2)\n"); + return 0; + } + + /* interface_create version 3+ */ + /* get supported version from firmware side */ + iface_create_ver = 0; + err = inff_fil_bsscfg_int_get(ifp, "interface_create", + &iface_create_ver); + if (err) { + inff_err("fail to get supported version, err=%d\n", err); + return -EOPNOTSUPP; + } + + switch (iface_create_ver) { + case WL_INTERFACE_CREATE_VER_3: + memset(&iface_v3, 0, sizeof(iface_v3)); + iface_v3.ver = WL_INTERFACE_CREATE_VER_3; + iface_v3.flags = WL_INTERFACE_MAC_USE; + iface_v3.iftype = WL_INTERFACE_CREATE_STA; + if (!is_zero_ether_addr(macaddr)) + memcpy(iface_v3.mac_addr, macaddr, ETH_ALEN); + else + inff_set_vif_sta_macaddr(ifp, iface_v3.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v3, + sizeof(iface_v3)); + + if (!err) + inff_dbg(INFO, "interface created(v3)\n"); + break; + default: + inff_err("not support interface create(v%d)\n", + iface_create_ver); + err = -EOPNOTSUPP; + break; + } + + if (err) { + inff_info("station interface creation failed (%d)\n", + err); + return -EIO; + } + + return 0; +} + +static int inff_cfg80211_request_ap_if(struct inff_if *ifp) +{ + struct wl_interface_create_v1 iface_v1; + struct wl_interface_create_v2 iface_v2; + struct wl_interface_create_v3 iface_v3; + u32 iface_create_ver; + struct inff_pub *drvr = ifp->drvr; + struct inff_mbss_ssid_le mbss_ssid_le; + int bsscfgidx; + int err; + + /* interface_create version 1 */ + memset(&iface_v1, 0, sizeof(iface_v1)); + iface_v1.ver = WL_INTERFACE_CREATE_VER_1; + iface_v1.flags = WL_INTERFACE_CREATE_AP | + WL_INTERFACE_MAC_USE; + + inff_set_vif_sta_macaddr(ifp, iface_v1.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v1, + sizeof(iface_v1)); + if (err) { + inff_dbg(INFO, "failed to create interface(v1), err=%d\n", + err); + } else { + inff_dbg(INFO, "interface created(v1)\n"); + return 0; + } + + /* interface_create version 2 */ + memset(&iface_v2, 0, sizeof(iface_v2)); + iface_v2.ver = WL_INTERFACE_CREATE_VER_2; + iface_v2.flags = WL_INTERFACE_MAC_USE; + iface_v2.iftype = WL_INTERFACE_CREATE_AP; + + inff_set_vif_sta_macaddr(ifp, iface_v2.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v2, + sizeof(iface_v2)); + if (err) { + inff_dbg(INFO, "failed to create interface(v2), err=%d\n", + err); + } else { + inff_dbg(INFO, "interface created(v2)\n"); + return 0; + } + + /* interface_create version 3+ */ + /* get supported version from firmware side */ + iface_create_ver = 0; + err = inff_fil_bsscfg_int_get(ifp, "interface_create", + &iface_create_ver); + if (err) { + inff_err("fail to get supported version, err=%d\n", err); + return -EOPNOTSUPP; + } + + switch (iface_create_ver) { + case WL_INTERFACE_CREATE_VER_3: + memset(&iface_v3, 0, sizeof(iface_v3)); + iface_v3.ver = WL_INTERFACE_CREATE_VER_3; + iface_v3.flags = WL_INTERFACE_MAC_USE; + iface_v3.iftype = WL_INTERFACE_CREATE_AP; + inff_set_vif_sta_macaddr(ifp, iface_v3.mac_addr); + + err = inff_fil_iovar_data_get(ifp, "interface_create", + &iface_v3, + sizeof(iface_v3)); + + if (!err) + inff_dbg(INFO, "interface created(v3)\n"); + break; + default: + inff_err("not support interface create(v%d)\n", + iface_create_ver); + err = -EOPNOTSUPP; + break; + } + + if (err) { + inff_info("Does not support interface_create (%d)\n", + err); + memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le)); + bsscfgidx = inff_get_first_free_bsscfgidx(ifp->drvr); + if (bsscfgidx < 0) + return bsscfgidx; + + mbss_ssid_le.bsscfgidx = cpu_to_le32(bsscfgidx); + mbss_ssid_le.SSID_len = cpu_to_le32(5); + sprintf(mbss_ssid_le.SSID, "ssid%d", bsscfgidx); + + err = inff_fil_bsscfg_data_set(ifp, "bsscfg:ssid", &mbss_ssid_le, + sizeof(mbss_ssid_le)); + + if (err < 0) + iphy_err(drvr, "setting ssid failed %d\n", err); + } + + return err; +} + +/** + * inff_apsta_add_vif() - create a new AP or STA virtual interface + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * @params: contains mac address for AP or STA device. + * @type: interface type. + * + * Return: pointer to new vif on success, ERR_PTR(-errno) if not + */ +struct wireless_dev *inff_apsta_add_vif(struct wiphy *wiphy, const char *name, + struct vif_params *params, + enum nl80211_iftype type) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_vif *vif; + int err; + + if (type != NL80211_IFTYPE_STATION && type != NL80211_IFTYPE_AP) + return ERR_PTR(-EINVAL); + + if (inff_cfg80211_vif_event_armed(cfg)) + return ERR_PTR(-EBUSY); + + inff_dbg(INFO, "Adding vif \"%s\"\n", name); + + vif = inff_alloc_vif(cfg, type); + if (IS_ERR(vif)) + return (struct wireless_dev *)vif; + + inff_cfg80211_arm_vif_event(cfg, vif); + + if (type == NL80211_IFTYPE_STATION) + err = inff_cfg80211_request_sta_if(ifp, params->macaddr); + else + err = inff_cfg80211_request_ap_if(ifp); + if (err) { + inff_cfg80211_arm_vif_event(cfg, NULL); + goto fail; + } + + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(cfg, INFF_E_IF_ADD, + INFF_VIF_EVENT_TIMEOUT); + inff_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + iphy_err(drvr, "timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* interface created in firmware */ + ifp = vif->ifp; + if (!ifp) { + iphy_err(drvr, "no if pointer provided\n"); + err = -ENOENT; + goto fail; + } + + strscpy(ifp->ndev->name, name, sizeof(ifp->ndev->name)); + err = inff_net_attach(ifp, true); + if (err) { + iphy_err(drvr, "Registering netdevice failed\n"); + free_netdev(ifp->ndev); + goto fail; + } + + return &ifp->vif->wdev; + +fail: + inff_free_vif(vif); + return ERR_PTR(err); +} + +/** + * inff_mon_add_vif() - create monitor mode virtual interface + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * + * Return: pointer to new vif on success, ERR_PTR(-errno) if not + */ +struct wireless_dev *inff_mon_add_vif(struct wiphy *wiphy, const char *name) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_cfg80211_vif *vif; + struct net_device *ndev; + struct inff_if *ifp; + int err; + + if (cfg->pub->mon_if) { + err = -EEXIST; + goto err_out; + } + + vif = inff_alloc_vif(cfg, NL80211_IFTYPE_MONITOR); + if (IS_ERR(vif)) { + err = PTR_ERR(vif); + goto err_out; + } + + ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN, ether_setup); + if (!ndev) { + err = -ENOMEM; + goto err_free_vif; + } + ndev->type = ARPHRD_IEEE80211_RADIOTAP; + ndev->ieee80211_ptr = &vif->wdev; + ndev->needs_free_netdev = true; + ndev->priv_destructor = inff_cfg80211_free_netdev; + SET_NETDEV_DEV(ndev, wiphy_dev(cfg->wiphy)); + + ifp = netdev_priv(ndev); + ifp->vif = vif; + ifp->ndev = ndev; + ifp->drvr = cfg->pub; + + vif->ifp = ifp; + vif->wdev.netdev = ndev; + + err = inff_net_mon_attach(ifp); + if (err) { + inff_err("Failed to attach %s device\n", ndev->name); + free_netdev(ndev); + goto err_free_vif; + } + + cfg->pub->mon_if = ifp; + + return &vif->wdev; + +err_free_vif: + inff_free_vif(vif); +err_out: + return ERR_PTR(err); +} + +int inff_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = wdev->netdev; + + ndev->netdev_ops->ndo_stop(ndev); + + inff_net_detach(ndev, true); + + cfg->pub->mon_if = NULL; + + return 0; +} + +int inff_cfg80211_del_apsta_iface(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = wdev->netdev; + struct inff_if *ifp = netdev_priv(ndev); + struct inff_pub *drvr = cfg->pub; + int ret; + int err; + + inff_cfg80211_arm_vif_event(cfg, ifp->vif); + + err = inff_fil_bsscfg_data_set(ifp, "interface_remove", NULL, 0); + if (err) { + iphy_err(drvr, "interface_remove failed %d\n", err); + goto err_unarm; + } + + /* wait for firmware event */ + ret = inff_cfg80211_wait_vif_event(cfg, INFF_E_IF_DEL, INFF_VIF_EVENT_TIMEOUT); + if (!ret) { + iphy_err(drvr, "timeout occurred\n"); + err = -EIO; + goto err_unarm; + } + + inff_remove_interface(ifp, true); + +err_unarm: + inff_cfg80211_arm_vif_event(cfg, NULL); + return err; +} + +struct inff_cfg80211_vif *inff_alloc_vif(struct inff_cfg80211_info *cfg, + enum nl80211_iftype type) +{ + struct inff_cfg80211_vif *vif; + + inff_dbg(TRACE, "allocating virtual interface (size=%zu)\n", + sizeof(*vif)); + vif = kzalloc(sizeof(*vif), GFP_KERNEL); + if (!vif) + return ERR_PTR(-ENOMEM); + + vif->wdev.wiphy = cfg->wiphy; + vif->wdev.iftype = type; + + inff_init_prof(&vif->profile); + init_completion(&vif->mgmt_tx); + list_add_tail(&vif->list, &cfg->vif_list); + return vif; +} + +void inff_free_vif(struct inff_cfg80211_vif *vif) +{ + list_del(&vif->list); + kfree(vif); +} diff --git a/drivers/net/wireless/infineon/inffmac/interface.h b/drivers/net/wireless/infineon/inffmac/interface.h new file mode 100644 index 000000000000..3d35a67666f8 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/interface.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_INTERFACE_H +#define INFF_INTERFACE_H + +#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ +#define WLC_E_IF_ROLE_AP 1 /* Access Point */ +#define WLC_E_IF_ROLE_WLAN_SENSE 10 /* WLAN Sensing interface */ + +#define WL_INTERFACE_CREATE_VER_1 1 +#define WL_INTERFACE_CREATE_VER_2 2 +#define WL_INTERFACE_CREATE_VER_3 3 +#define WL_INTERFACE_CREATE_VER_MAX WL_INTERFACE_CREATE_VER_3 + +#define WL_INTERFACE_MAC_DONT_USE 0x0 +#define WL_INTERFACE_MAC_USE 0x2 + +#define WL_INTERFACE_CREATE_STA 0x0 +#define WL_INTERFACE_CREATE_AP 0x1 + +struct wl_interface_create_v1 { + u16 ver; /* structure version */ + u32 flags; /* flags for operation */ + u8 mac_addr[ETH_ALEN]; /* MAC address */ + u32 wlc_index; /* optional for wlc index */ +}; + +struct wl_interface_create_v2 { + u16 ver; /* structure version */ + u8 pad1[2]; + u32 flags; /* flags for operation */ + u8 mac_addr[ETH_ALEN]; /* MAC address */ + u8 iftype; /* type of interface created */ + u8 pad2; + u32 wlc_index; /* optional for wlc index */ +}; + +struct wl_interface_create_v3 { + u16 ver; /* structure version */ + u16 len; /* length of structure + data */ + u16 fixed_len; /* length of structure */ + u8 iftype; /* type of interface created */ + u8 wlc_index; /* optional for wlc index */ + u32 flags; /* flags for operation */ + u8 mac_addr[ETH_ALEN]; /* MAC address */ + u8 bssid[ETH_ALEN]; /* optional for BSSID */ + u8 if_index; /* interface index request */ + u8 pad[3]; + u8 data[]; /* Optional for specific data */ +}; + +#define WL_IOV_OP_BSSCFG_DISABLE 0 +#define WL_IOV_OP_BSSCFG_ENABLE 1 +#define WL_IOV_OP_MANUAL_STA_BSSCFG_CREATE 2 +#define WL_IOV_OP_MANUAL_AP_BSSCFG_CREATE 3 + +bool check_vif_up(struct inff_cfg80211_vif *vif); +int inff_cfg80211_del_apsta_iface(struct wiphy *wiphy, struct wireless_dev *wdev); +struct wireless_dev *inff_mon_add_vif(struct wiphy *wiphy, const char *name); +int inff_mon_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); + +struct wireless_dev *inff_apsta_add_vif(struct wiphy *wiphy, const char *name, + struct vif_params *params, + enum nl80211_iftype type); +enum nl80211_iftype inff_cfg80211_get_iftype(struct inff_if *ifp); + +struct inff_cfg80211_vif *inff_alloc_vif(struct inff_cfg80211_info *cfg, enum nl80211_iftype type); +void inff_free_vif(struct inff_cfg80211_vif *vif); +bool inff_is_apmode_operating(struct wiphy *wiphy); + +bool inff_is_apmode(struct inff_cfg80211_vif *vif); +bool inff_is_ibssmode(struct inff_cfg80211_vif *vif); + +#endif /* INFF_INTERFACE_H */ -- 2.25.1 Driver implementation of the Wi-Fi Connection security related operations. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/security.c | 752 ++++++++++++++++++ .../net/wireless/infineon/inffmac/security.h | 212 +++++ 2 files changed, 964 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/security.c create mode 100644 drivers/net/wireless/infineon/inffmac/security.h diff --git a/drivers/net/wireless/infineon/inffmac/security.c b/drivers/net/wireless/infineon/inffmac/security.c new file mode 100644 index 000000000000..52a7d93847fd --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/security.c @@ -0,0 +1,752 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "utils.h" +#include "defs.h" +#include "chanspec.h" +#include "hw_ids.h" +#include "core.h" +#include "debug.h" +#include "tracepoint.h" +#include "fwil_types.h" +#include "p2p.h" +#include "btcoex.h" +#include "pno.h" +#include "fwsignal.h" +#include "cfg80211.h" +#include "feature.h" +#include "fwil.h" +#include "proto.h" +#include "vendor.h" +#include "vendor_inf.h" +#include "bus.h" +#include "common.h" +#include "he.h" +#include "eht.h" +#include "twt.h" +#include "offload.h" +#include "pmsr.h" + +bool +inff_has_pmkid(const u8 *parse, u32 len, u32 *offset_in_ie) +{ + const struct inff_tlv *rsn_ie; + const u8 *ie; + u32 ie_len; + u32 offset; + u16 count; + + rsn_ie = inff_parse_tlvs(parse, len, WLAN_EID_RSN); + if (!rsn_ie) + goto done; + ie = (const u8 *)rsn_ie; + ie_len = rsn_ie->len + TLV_HDR_LEN; + /* Skip group data cipher suite */ + offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN; + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto done; + /* Skip pairwise cipher suite(s) */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto done; + /* Skip auth key management suite(s) */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + RSN_CAP_LEN >= ie_len) + goto done; + /* Skip rsn capabilities */ + offset += RSN_CAP_LEN; + if (offset + RSN_PMKID_COUNT_LEN > ie_len) + goto done; + /* Extract PMKID count */ + count = ie[offset] + (ie[offset + 1] << 8); + if (count) { + if (offset_in_ie) + *offset_in_ie = offset + RSN_PMKID_COUNT_LEN; + return true; + } + +done: + if (offset_in_ie) + *offset_in_ie = 0; + return false; +} + +int inff_set_pmk(struct inff_if *ifp, const u8 *pmk_data, u16 pmk_len) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_wsec_pmk_le pmk; + int err; + + memset(&pmk, 0, sizeof(pmk)); + + /* pass pmk directly */ + pmk.key_len = cpu_to_le16(pmk_len); + pmk.flags = cpu_to_le16(0); + memcpy(pmk.key, pmk_data, pmk_len); + + /* store psk in firmware */ + err = inff_fil_cmd_data_set(ifp, INFF_C_SET_WSEC_PMK, + &pmk, sizeof(pmk)); + if (err < 0) + iphy_err(drvr, "failed to change PSK in firmware (len=%u)\n", + pmk_len); + + return err; +} + +static void convert_key_from_CPU(struct inff_wsec_key *key, struct inff_wsec_key_le *key_le) +{ + key_le->index = cpu_to_le32(key->index); + key_le->len = cpu_to_le32(key->len); + key_le->algo = cpu_to_le32(key->algo); + key_le->flags = cpu_to_le32(key->flags); + key_le->rxiv.hi = cpu_to_le32(key->rxiv.hi); + key_le->rxiv.lo = cpu_to_le16(key->rxiv.lo); + key_le->iv_initialized = cpu_to_le32(key->iv_initialized); + memcpy(key_le->data, key->data, sizeof(key->data)); + memcpy(key_le->ea, key->ea, sizeof(key->ea)); +} + +int send_key_to_dongle(struct inff_if *ifp, struct inff_wsec_key *key) +{ + struct inff_pub *drvr = ifp->drvr; + int err; + struct inff_wsec_key_le key_le; + + convert_key_from_CPU(key, &key_le); + + inff_netdev_wait_pend8021x(ifp); + + err = inff_fil_bsscfg_data_set(ifp, "wsec_key", &key_le, + sizeof(key_le)); + + if (err) + iphy_err(drvr, "wsec_key error (%d)\n", err); + return err; +} + +s32 +wl_set_wsec_info_algos(struct inff_if *ifp, u32 algos, u32 mask) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err = 0; + struct wl_wsec_info *wsec_info; + struct inff_xtlv *wsec_info_tlv; + u16 tlv_data_len; + u8 tlv_data[8]; + u32 param_len; + u8 *buf; + u32 buf_len; + + inff_dbg(TRACE, "Enter\n"); + + buf_len = sizeof(struct wl_wsec_info) + sizeof(tlv_data); + buf = kzalloc(buf_len, GFP_KERNEL); + if (!buf) { + iphy_err(drvr, "unable to allocate.\n"); + return -ENOMEM; + } + + wsec_info = (struct wl_wsec_info *)buf; + wsec_info->version = WL_WSEC_INFO_VERSION; + wsec_info_tlv = (struct inff_xtlv *)(buf + offsetof(struct wl_wsec_info, tlvs)); + + wsec_info->num_tlvs++; + tlv_data_len = sizeof(tlv_data); + memcpy(tlv_data, &algos, sizeof(algos)); + memcpy(tlv_data + sizeof(algos), &mask, sizeof(mask)); + + wsec_info_tlv->id = cpu_to_le16(WL_WSEC_INFO_BSS_ALGOS); + wsec_info_tlv->len = cpu_to_le16(tlv_data_len); + unsafe_memcpy(wsec_info_tlv->data, tlv_data, tlv_data_len, + /* alloc enough buf*/); + + param_len = offsetof(struct wl_wsec_info, tlvs) + + offsetof(struct wl_wsec_info_tlv, data) + tlv_data_len; + + err = inff_fil_bsscfg_data_set(ifp, "wsec_info", buf, param_len); + if (err) + inff_err("set wsec_info_error:%d\n", err); + + kfree(buf); + return err; +} + +void +inff_cfg80211_reconfigure_wep(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + u8 key_idx; + struct inff_wsec_key *key; + s32 wsec; + + for (key_idx = 0; key_idx < INFF_MAX_DEFAULT_KEYS; key_idx++) { + key = &ifp->vif->profile.key[key_idx]; + if (key->algo == CRYPTO_ALGO_WEP1 || + key->algo == CRYPTO_ALGO_WEP128) + break; + } + if (key_idx == INFF_MAX_DEFAULT_KEYS) + return; + + err = send_key_to_dongle(ifp, key); + if (err) { + iphy_err(drvr, "Setting WEP key failed (%d)\n", err); + return; + } + err = inff_fil_bsscfg_int_get(ifp, "wsec", &wsec); + if (err) { + iphy_err(drvr, "get wsec error (%d)\n", err); + return; + } + wsec |= WEP_ENABLED; + err = inff_fil_bsscfg_int_set(ifp, "wsec", wsec); + if (err) + iphy_err(drvr, "set wsec error (%d)\n", err); +} + +s32 +inff_update_pmklist(struct inff_cfg80211_info *cfg, struct inff_if *ifp) +{ + struct inff_pmk_list_le *pmk_list; + int i; + u32 npmk; + + pmk_list = &cfg->pmk_list; + npmk = le32_to_cpu(pmk_list->npmk); + + inff_dbg(CONN, "No of elements %d\n", npmk); + for (i = 0; i < npmk; i++) + inff_dbg(CONN, "PMK[%d]: %pM\n", i, &pmk_list->pmk[i].bssid); + + return inff_fil_iovar_data_set(ifp, "pmkid_info", pmk_list, + sizeof(*pmk_list)); +} + +s32 +inff_update_pmksa(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, + const u8 *bssid, + const u8 *pmkid, + enum inff_pmksa_action action) +{ + struct inff_pmksa *pmk = &cfg->pmk_list.pmk[0]; + struct inff_pub *drvr = cfg->pub; + s32 err; + u32 npmk, i; + + if (!check_vif_up(ifp->vif)) + return -EIO; + + switch (action) { + case PMKSA_SET: + npmk = le32_to_cpu(cfg->pmk_list.npmk); + for (i = 0; i < npmk; i++) + if (!memcmp(bssid, pmk[i].bssid, ETH_ALEN)) + break; + if (i < INFF_MAXPMKID) { + memcpy(pmk[i].bssid, bssid, ETH_ALEN); + memcpy(pmk[i].pmkid, pmkid, WLAN_PMKID_LEN); + if (i == npmk) { + npmk++; + cfg->pmk_list.npmk = cpu_to_le32(npmk); + } + } else { + iphy_err(drvr, "Too many PMKSA entries cached %d\n", npmk); + return -EINVAL; + } + + inff_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[i].bssid); + inff_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmk[i].pmkid); + + err = inff_update_pmklist(cfg, ifp); + break; + case PMKSA_DELETE: + npmk = le32_to_cpu(cfg->pmk_list.npmk); + for (i = 0; i < npmk; i++) + if (!memcmp(bssid, pmk[i].bssid, ETH_ALEN)) + break; + + if (npmk > 0 && i < npmk) { + for (; i < (npmk - 1); i++) { + memcpy(&pmk[i].bssid, &pmk[i + 1].bssid, ETH_ALEN); + memcpy(&pmk[i].pmkid, &pmk[i + 1].pmkid, + WLAN_PMKID_LEN); + } + memset(&pmk[i], 0, sizeof(*pmk)); + cfg->pmk_list.npmk = cpu_to_le32(npmk - 1); + } else { + iphy_err(drvr, "Cache entry not found\n"); + return -EINVAL; + } + + err = inff_update_pmklist(cfg, ifp); + break; + default: + err = -EINVAL; + } + + return err; +} + +s32 inff_configure_opensecurity(struct inff_if *ifp) +{ + struct inff_pub *drvr = ifp->drvr; + s32 err; + s32 wpa_val; + + /* set auth */ + err = inff_fil_bsscfg_int_set(ifp, "auth", 0); + if (err < 0) { + iphy_err(drvr, "auth error %d\n", err); + return err; + } + /* set wsec */ + err = inff_fil_bsscfg_int_set(ifp, "wsec", 0); + if (err < 0) { + iphy_err(drvr, "wsec error %d\n", err); + return err; + } + /* set upper-layer auth */ + if (inff_is_ibssmode(ifp->vif)) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = inff_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_val); + if (err < 0) { + iphy_err(drvr, "wpa_auth error %d\n", err); + return err; + } + + return 0; +} + +static s32 +inff_configure_wpaie(struct inff_if *ifp, + const struct inff_vs_tlv *wpa_ie, + bool is_rsn_ie) +{ + struct inff_pub *drvr = ifp->drvr; + u32 auth = 0; /* d11 open authentication */ + u16 count; + s32 err = 0; + s32 len; + u32 i; + u32 wsec; + u32 pval = 0; + u32 gval = 0; + u32 wpa_auth = 0; + u32 offset; + u8 *data; + u16 rsn_cap; + u32 wme_bss_disable; + u32 mfp; + + inff_dbg(TRACE, "Enter\n"); + if (!wpa_ie) + goto exit; + + len = wpa_ie->len + TLV_HDR_LEN; + data = (u8 *)wpa_ie; + offset = TLV_HDR_LEN; + if (!is_rsn_ie) + offset += VS_IE_FIXED_HDR_LEN; + else + offset += WPA_IE_VERSION_LEN; + + /* check for multicast cipher suite */ + if (offset + WPA_IE_MIN_OUI_LEN > len) { + err = -EINVAL; + iphy_err(drvr, "no multicast cipher suite\n"); + goto exit; + } + + if (!inff_valid_wpa_oui(&data[offset], is_rsn_ie)) { + err = -EINVAL; + iphy_err(drvr, "invalid OUI\n"); + goto exit; + } + offset += TLV_OUI_LEN; + + /* pick up multicast cipher */ + switch (data[offset]) { + case WPA_CIPHER_NONE: + gval = 0; + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + gval = WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + gval = TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + gval = AES_ENABLED; + break; + default: + err = -EINVAL; + iphy_err(drvr, "Invalid multi cast cipher info\n"); + goto exit; + } + + offset++; + /* walk thru unicast cipher list and pick up what we recognize */ + count = data[offset] + (data[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN; + /* Check for unicast suite(s) */ + if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { + err = -EINVAL; + iphy_err(drvr, "no unicast cipher suite\n"); + goto exit; + } + for (i = 0; i < count; i++) { + if (!inff_valid_wpa_oui(&data[offset], is_rsn_ie)) { + err = -EINVAL; + iphy_err(drvr, "invalid OUI\n"); + goto exit; + } + offset += TLV_OUI_LEN; + switch (data[offset]) { + case WPA_CIPHER_NONE: + break; + case WPA_CIPHER_WEP_40: + case WPA_CIPHER_WEP_104: + pval |= WEP_ENABLED; + break; + case WPA_CIPHER_TKIP: + pval |= TKIP_ENABLED; + break; + case WPA_CIPHER_AES_CCM: + pval |= AES_ENABLED; + break; + default: + iphy_err(drvr, "Invalid unicast security info\n"); + } + offset++; + } + /* walk thru auth management suite list and pick up what we recognize */ + count = data[offset] + (data[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN; + /* Check for auth key management suite(s) */ + if (offset + (WPA_IE_MIN_OUI_LEN * count) > len) { + err = -EINVAL; + iphy_err(drvr, "no auth key mgmt suite\n"); + goto exit; + } + for (i = 0; i < count; i++) { + if (inff_valid_dpp_suite(&data[offset])) { + wpa_auth |= WFA_AUTH_DPP; + offset += TLV_OUI_LEN; + } else if (inff_valid_wpa_oui(&data[offset], is_rsn_ie)) { + offset += TLV_OUI_LEN; + switch (data[offset]) { + case RSN_AKM_NONE: + inff_dbg(TRACE, "RSN_AKM_NONE\n"); + wpa_auth |= WPA_AUTH_NONE; + break; + case RSN_AKM_UNSPECIFIED: + inff_dbg(TRACE, "RSN_AKM_UNSPECIFIED\n"); + is_rsn_ie ? (wpa_auth |= WPA2_AUTH_UNSPECIFIED) : + (wpa_auth |= WPA_AUTH_UNSPECIFIED); + break; + case RSN_AKM_PSK: + inff_dbg(TRACE, "RSN_AKM_PSK\n"); + is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) : + (wpa_auth |= WPA_AUTH_PSK); + break; + case RSN_AKM_SHA256_PSK: + inff_dbg(TRACE, "RSN_AKM_MFP_PSK\n"); + wpa_auth |= WPA2_AUTH_PSK_SHA256; + break; + case RSN_AKM_SHA256_1X: + inff_dbg(TRACE, "RSN_AKM_MFP_1X\n"); + wpa_auth |= WPA2_AUTH_1X_SHA256; + break; + case RSN_AKM_SAE: + inff_dbg(TRACE, "RSN_AKM_SAE\n"); + wpa_auth |= WPA3_AUTH_SAE_PSK; + break; + case RSN_AKM_OWE: + inff_dbg(TRACE, "RSN_AKM_OWE\n"); + wpa_auth |= WPA3_AUTH_OWE; + break; + default: + iphy_err(drvr, "Invalid key mgmt info\n"); + } + } else { + err = -EINVAL; + iphy_err(drvr, "invalid OUI\n"); + goto exit; + } + offset++; + } + + mfp = INFF_MFP_NONE; + if (is_rsn_ie) { + wme_bss_disable = 1; + if ((offset + RSN_CAP_LEN) <= len) { + rsn_cap = data[offset] + (data[offset + 1] << 8); + if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK) + wme_bss_disable = 0; + if (rsn_cap & RSN_CAP_MFPR_MASK) { + inff_dbg(TRACE, "MFP Required\n"); + mfp = INFF_MFP_REQUIRED; + /* Firmware only supports mfp required in + * combination with WPA2_AUTH_PSK_SHA256, + * WPA2_AUTH_1X_SHA256, or WPA3_AUTH_SAE_PSK. + */ + if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 | + WPA2_AUTH_1X_SHA256 | + WFA_AUTH_DPP | + WPA3_AUTH_SAE_PSK | + WPA3_AUTH_OWE))) { + err = -EINVAL; + goto exit; + } + + /* Firmware has requirement that WPA2_AUTH_PSK/ + * WPA2_AUTH_UNSPECIFIED be set, if SHA256 OUI + * is to be included in the rsn ie. + */ + if (wpa_auth & WPA2_AUTH_PSK_SHA256) + wpa_auth |= WPA2_AUTH_PSK; + else if (wpa_auth & WPA2_AUTH_1X_SHA256) + wpa_auth |= WPA2_AUTH_UNSPECIFIED; + } else if (rsn_cap & RSN_CAP_MFPC_MASK) { + inff_dbg(TRACE, "MFP Capable\n"); + mfp = INFF_MFP_CAPABLE; + } + } + offset += RSN_CAP_LEN; + /* set wme_bss_disable to sync RSN Capabilities */ + err = inff_fil_bsscfg_int_set(ifp, "wme_bss_disable", + wme_bss_disable); + if (err < 0) { + iphy_err(drvr, "wme_bss_disable error %d\n", err); + goto exit; + } + + /* Skip PMKID cnt as it is know to be 0 for AP. */ + offset += RSN_PMKID_COUNT_LEN; + + /* See if there is BIP wpa suite left for MFP */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_MFP) && + ((offset + WPA_IE_MIN_OUI_LEN) <= len)) { + err = inff_fil_bsscfg_data_set(ifp, "bip", + &data[offset], + WPA_IE_MIN_OUI_LEN); + if (err < 0) { + iphy_err(drvr, "bip error %d\n", err); + goto exit; + } + } + } + /* FOR WPS , set SES_OW_ENABLED */ + wsec = (pval | gval | SES_OW_ENABLED); + + /* set auth */ + err = inff_fil_bsscfg_int_set(ifp, "auth", auth); + if (err < 0) { + iphy_err(drvr, "auth error %d\n", err); + goto exit; + } + /* set wsec */ + err = inff_fil_bsscfg_int_set(ifp, "wsec", wsec); + if (err < 0) { + iphy_err(drvr, "wsec error %d\n", err); + goto exit; + } + /* Configure MFP, this needs to go after wsec otherwise the wsec command + * will overwrite the values set by MFP + */ + if (inff_feat_is_enabled(ifp, INFF_FEAT_MFP)) { + err = inff_fil_bsscfg_int_set(ifp, "mfp", mfp); + if (err < 0) { + iphy_err(drvr, "mfp error %d\n", err); + goto exit; + } + } + /* set upper-layer auth */ + err = inff_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth); + if (err < 0) { + iphy_err(drvr, "wpa_auth error %d\n", err); + goto exit; + } + +exit: + return err; +} + +static s32 +inff_parse_configure_sae_pwe(struct inff_if *ifp, + struct cfg80211_ap_settings *settings) +{ + s32 err = 0; + const struct inff_tlv *rsnx_ie; + const struct inff_tlv *ext_rate_ie; + const struct inff_tlv *supp_rate_ie; + u8 ie_len, i; + bool support_sae_h2e = false, must_sae_h2e = false; + u32 wpa_auth = 0; + + /* get configured wpa_auth */ + err = inff_fil_bsscfg_int_get(ifp, "wpa_auth", &wpa_auth); + if ((wpa_auth & WPA3_AUTH_SAE_PSK) == 0) { + /* wpa_auth is not SAE, ignore sae_pwe. */ + inff_dbg(INFO, "wpa_auth is not SAE:0x%x\n", wpa_auth); + return 0; + } + + if (inff_feat_is_enabled(ifp, INFF_FEAT_SAE_EXT)) { + err = inff_fil_iovar_int_set(ifp, "extsae_pwe", 0); + if (err) { + inff_err("extsae_pwe iovar is not supported\n"); + return -EOPNOTSUPP; + } + + rsnx_ie = inff_parse_tlvs((u8 *)settings->beacon.tail, + settings->beacon.tail_len, + WLAN_EID_RSNX); + if (rsnx_ie) { + ie_len = rsnx_ie->len; + if (ie_len) { + if (rsnx_ie->data[0] & WLAN_RSNX_CAPA_SAE_H2E) + support_sae_h2e = true; + } + inff_dbg(INFO, "found RSNX IE, support_sae_h2e:%d\n", + support_sae_h2e); + } + + /* found rsnx_ie with SAE_H2E, check the bss selector to know if it is a H2E only */ + if (support_sae_h2e) { + supp_rate_ie = inff_parse_tlvs((u8 *)settings->beacon.head, + settings->beacon.head_len, + WLAN_EID_SUPP_RATES); + ext_rate_ie = inff_parse_tlvs((u8 *)settings->beacon.tail, + settings->beacon.tail_len, + WLAN_EID_EXT_SUPP_RATES); + if (ext_rate_ie) { + ie_len = ext_rate_ie->len; + for (i = 0; i < ie_len; i++) { + if (ext_rate_ie->data[i] == SAE_H2E_ONLY_ENABLE) { + must_sae_h2e = true; + break; + } + } + } + + /* if we cannot found H2E only selector in ext_supp_rate ie. + * traversal supp_rate ie to make sure it really doesn't exist. + */ + if (!must_sae_h2e && supp_rate_ie) { + ie_len = supp_rate_ie->len; + for (i = 0; i < ie_len; i++) { + if (supp_rate_ie->data[i] == SAE_H2E_ONLY_ENABLE) { + must_sae_h2e = true; + break; + } + } + } + inff_dbg(INFO, "must_sae_h2e:%d\n", must_sae_h2e); + } + + if (must_sae_h2e) /* support SAE H2E only */ + err = inff_fil_iovar_int_set(ifp, "extsae_pwe", 1); + else if (support_sae_h2e) /* support SAE P&H and H2E both */ + err = inff_fil_iovar_int_set(ifp, "extsae_pwe", 2); + else /* support SAE P&H only */ + err = inff_fil_iovar_int_set(ifp, "extsae_pwe", 0); + } + + return err; +} + +s32 +inff_parse_configure_security(struct inff_if *ifp, + struct cfg80211_ap_settings *settings, + enum nl80211_iftype dev_role) +{ + const struct inff_tlv *rsn_ie; + const struct inff_vs_tlv *wpa_ie; + s32 err = 0; + + /* find the RSN_IE */ + rsn_ie = inff_parse_tlvs((u8 *)settings->beacon.tail, + settings->beacon.tail_len, WLAN_EID_RSN); + + /* find the WPA_IE */ + wpa_ie = inff_find_wpaie((u8 *)settings->beacon.tail, + settings->beacon.tail_len); + + if (wpa_ie || rsn_ie) { + inff_dbg(TRACE, "WPA(2) IE is found\n"); + if (wpa_ie) { + /* WPA IE */ + err = inff_configure_wpaie(ifp, wpa_ie, false); + if (err < 0) + return err; + } else { + struct inff_vs_tlv *tmp_ie; + + tmp_ie = (struct inff_vs_tlv *)rsn_ie; + + /* RSN IE */ + err = inff_configure_wpaie(ifp, tmp_ie, true); + if (err < 0) + return err; + + err = inff_parse_configure_sae_pwe(ifp, settings); + if (err < 0) + return err; + } + } else { + inff_dbg(TRACE, "No WPA(2) IEs found\n"); + inff_configure_opensecurity(ifp); + } + + return err; +} + +#ifdef CONFIG_PM +int +inff_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_gtk_rekey_data *gtk) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_pub *drvr = cfg->pub; + struct inff_if *ifp = netdev_priv(ndev); + struct inff_gtk_keyinfo_le gtk_le; + int ret; + + inff_dbg(TRACE, "Enter, bssidx=%d\n", ifp->bsscfgidx); + + memcpy(gtk_le.kck, gtk->kck, sizeof(gtk_le.kck)); + memcpy(gtk_le.kek, gtk->kek, sizeof(gtk_le.kek)); + memcpy(gtk_le.replay_counter, gtk->replay_ctr, + sizeof(gtk_le.replay_counter)); + + ret = inff_fil_iovar_data_set(ifp, "gtk_key_info", >k_le, + sizeof(gtk_le)); + if (ret < 0) + iphy_err(drvr, "gtk_key_info iovar failed: ret=%d\n", ret); + + return ret; +} +#endif diff --git a/drivers/net/wireless/infineon/inffmac/security.h b/drivers/net/wireless/infineon/inffmac/security.h new file mode 100644 index 000000000000..ebb70175f4cf --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/security.h @@ -0,0 +1,212 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_SECURITY_H +#define INFF_SECURITY_H + +/* Enumerate crypto algorithms */ +#define CRYPTO_ALGO_OFF 0 +#define CRYPTO_ALGO_WEP1 1 +#define CRYPTO_ALGO_TKIP 2 +#define CRYPTO_ALGO_WEP128 3 +#define CRYPTO_ALGO_AES_CCM 4 +#define CRYPTO_ALGO_AES_RESERVED1 5 +#define CRYPTO_ALGO_AES_RESERVED2 6 +#define CRYPTO_ALGO_NALG 7 + +#define CRYPTO_ALGO_AES_GCM 14 /* 128 bit GCM */ +#define CRYPTO_ALGO_AES_CCM256 15 /* 256 bit CCM */ +#define CRYPTO_ALGO_AES_GCM256 16 /* 256 bit GCM */ +#define CRYPTO_ALGO_BIP_CMAC256 17 /* 256 bit BIP CMAC */ +#define CRYPTO_ALGO_BIP_GMAC 18 /* 128 bit BIP GMAC */ +#define CRYPTO_ALGO_BIP_GMAC256 19 /* 256 bit BIP GMAC */ + +/* wireless security bitvec */ + +#define WEP_ENABLED 0x0001 +#define TKIP_ENABLED 0x0002 +#define AES_ENABLED 0x0004 +#define WSEC_SWFLAG 0x0008 +/* to go into transition mode without setting wep */ +#define SES_OW_ENABLED 0x0040 +/* MFP */ +#define MFP_CAPABLE 0x0200 +#define MFP_REQUIRED 0x0400 + +/* WPA authentication mode bitvec */ +#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ +#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ +#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ +#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ +#define WPA_AUTH_RESERVED1 0x0008 +#define WPA_AUTH_RESERVED2 0x0010 + +#define WPA2_AUTH_RESERVED1 0x0020 +#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ +#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ +#define WPA2_AUTH_RESERVED3 0x0200 +#define WPA2_AUTH_RESERVED4 0x0400 +#define WPA2_AUTH_RESERVED5 0x0800 +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_FT 0x4000 /* Fast BSS Transition */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ + +#define WPA3_AUTH_SAE_PSK 0x40000 /* SAE with 4-way handshake */ +#define WPA3_AUTH_SAE_FBT 0x80000 /* FT authentication over SAE */ +#define WPA3_AUTH_SAE_FT_1X 0x2000000 /* SAE with FT 1X */ +#define WPA3_AUTH_OWE 0x100000 /* OWE */ +#define WFA_AUTH_DPP 0x200000 /* WFA DPP AUTH */ +#define WPA3_AUTH_1X_SUITE_B_SHA384 0x400000 /* Suite B-192 SHA384 */ + +#define DPP_AKM_SUITE_TYPE 2 + +/* WPA3 1x with SHA256 key derivation. + * Shares the same value as WPA2_AUTH_1X_SHA256 since the value of + * akm-suite is the same for both + */ +#define WPA3_AUTH_1X_SHA256 WPA2_AUTH_1X_SHA256 + +#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ + +#define DOT11_DEFAULT_RTS_LEN 2347 +#define DOT11_DEFAULT_FRAG_LEN 2346 + +#define DOT11_ICV_AES_LEN 8 +#define DOT11_QOS_LEN 2 +#define DOT11_IV_MAX_LEN 8 +#define DOT11_A4_HDR_LEN 30 + +#define HT_CAP_RX_STBC_NO 0x0 +#define HT_CAP_RX_STBC_ONE_STREAM 0x1 + +#define WPA_PROTO_WPA BIT(0) +#define WPA_PROTO_RSN BIT(1) + +/* algo bit vector */ +#define KEY_ALGO_MASK(_algo) (1 << (_algo)) +/* version of the wl_wsec_info structure */ +#define WL_WSEC_INFO_VERSION 0x01 + +/* start enum value for BSS properties */ +#define WL_WSEC_INFO_BSS_BASE 0x0100 +#define WL_WSEC_INFO_BSS_ALGOS (WL_WSEC_INFO_BSS_BASE + 6) + +#define BSS_MEMBERSHIP_SELECTOR_SAE_H2E_ONLY 123 +#define BSS_MEMBERSHIP_SELECTOR_SET 0x80 +#define SAE_H2E_ONLY_ENABLE (BSS_MEMBERSHIP_SELECTOR_SAE_H2E_ONLY | \ + BSS_MEMBERSHIP_SELECTOR_SET) + +#define WPA_CIPHER_NONE 0 /* None */ +#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */ +#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */ +#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */ +#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ + +#define RSN_AKM_NONE 0 /* None (IBSS) */ +#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ +#define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */ +#define RSN_AKM_SAE 8 /* SAE */ +#define RSN_AKM_OWE 18 /* OWE */ +#define RSN_CAP_LEN 2 /* Length of RSN capabilities */ +#define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3)) +#define RSN_CAP_MFPR_MASK BIT(6) +#define RSN_CAP_MFPC_MASK BIT(7) +#define RSN_PMKID_COUNT_LEN 2 + +#define INFF_KEY_MGMT_ID_WPA BIT(0) +#define INFF_KEY_MGMT_ID_WPA2 BIT(1) +#define INFF_KEY_MGMT_ID_WPA_PSK BIT(2) +#define INFF_KEY_MGMT_ID_WPA2_PSK BIT(3) +#define INFF_KEY_MGMT_ID_WPA_NONE BIT(4) +#define INFF_KEY_MGMT_ID_FT BIT(5) +#define INFF_KEY_MGMT_ID_FT_PSK BIT(6) +#define INFF_KEY_MGMT_ID_WAPI_PSK BIT(7) +#define INFF_KEY_MGMT_ID_SUITE_B BIT(8) +#define INFF_KEY_MGMT_ID_SUITE_B_192 BIT(9) +#define INFF_KEY_MGMT_ID_OWE BIT(10) +#define INFF_KEY_MGMT_ID_DPP BIT(11) +#define INFF_KEY_MGMT_ID_FILS_SHA256 BIT(12) +#define INFF_KEY_MGMT_ID_FILS_SHA384 BIT(13) +#define INFF_KEY_MGMT_ID_FT_FILS_SHA256 BIT(14) +#define INFF_KEY_MGMT_ID_FT_FILS_SHA384 BIT(15) +#define INFF_KEY_MGMT_ID_SAE BIT(16) +#define INFF_KEY_MGMT_ID_802_1X_SHA256 BIT(17) +#define INFF_KEY_MGMT_ID_PSK_SHA256 BIT(18) +#define INFF_KEY_MGMT_ID_TPK_HANDSHAKE BIT(19) +#define INFF_KEY_MGMT_ID_FT_SAE BIT(20) +#define INFF_KEY_MGMT_ID_FT_802_1X_SHA384 BIT(21) +#define INFF_KEY_MGMT_ID_CCKM BIT(22) +#define INFF_KEY_MGMT_ID_OSEN BIT(23) + +static inline int wpa_akm_ft(int akm) +{ + return !!((akm == WLAN_AKM_SUITE_FT_PSK) || + (akm == WLAN_AKM_SUITE_FT_8021X) || + (akm == WLAN_AKM_SUITE_FT_8021X_SHA384) || + (akm == WLAN_AKM_SUITE_FT_OVER_SAE) || + (akm == WLAN_AKM_SUITE_FT_FILS_SHA256) || + (akm == WLAN_AKM_SUITE_FT_FILS_SHA384) || + (akm == WLAN_AKM_SUITE_FT_PSK_SHA384)); +} + +enum inff_pmksa_action { + PMKSA_SET = 0, + PMKSA_DELETE = 1 +}; + +/* tlv used to return wl_wsec_info properties */ +struct wl_wsec_info_tlv { + u16 type; + u16 len; /* data length */ + u8 data[1]; /* data follows */ +}; + +/* input/output data type for wsec_info iovar */ +struct wl_wsec_info { + u8 version; /* structure version */ + u8 pad[2]; + u8 num_tlvs; + struct wl_wsec_info_tlv tlvs[1]; /* tlv data follows */ +}; + +struct inff_owe_info_buf { + bool with_pmkid; + bool with_ecdh;/* if doing PMK Caching, might not have ECDH IE. */ + __le16 status_le16;/* hostapd gives status of Assoc Resp */ + u8 peer_mac[ETH_ALEN]; + u8 pmkid[WLAN_PMKID_LEN]; + u8 ecdh_ie_info[]; +}; + +bool inff_has_pmkid(const u8 *parse, u32 len, u32 *offset_in_ie); +int inff_set_pmk(struct inff_if *ifp, const u8 *pmk_data, u16 pmk_len); +int send_key_to_dongle(struct inff_if *ifp, struct inff_wsec_key *key); +s32 wl_set_wsec_info_algos(struct inff_if *ifp, u32 algos, u32 mask); + +void inff_cfg80211_reconfigure_wep(struct inff_if *ifp); + +s32 inff_update_pmklist(struct inff_cfg80211_info *cfg, struct inff_if *ifp); +s32 inff_parse_configure_security(struct inff_if *ifp, + struct cfg80211_ap_settings *settings, + enum nl80211_iftype dev_role); +s32 inff_configure_opensecurity(struct inff_if *ifp); +s32 inff_update_pmksa(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, + const u8 *bssid, + const u8 *pmkid, + enum inff_pmksa_action action); + +#ifdef CONFIG_PM +int +inff_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_gtk_rekey_data *gtk); +#endif /* CONFIG_PM*/ + +#endif /* INFF_SECURITY_H */ -- 2.25.1 Driver implementation of the Wi-Fi Direct (Peer-to-Peer) Mode of connection with a Peer device. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/p2p.c | 2605 +++++++++++++++++++ drivers/net/wireless/infineon/inffmac/p2p.h | 186 ++ 2 files changed, 2791 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/p2p.c create mode 100644 drivers/net/wireless/infineon/inffmac/p2p.h diff --git a/drivers/net/wireless/infineon/inffmac/p2p.c b/drivers/net/wireless/infineon/inffmac/p2p.c new file mode 100644 index 000000000000..da27bc89bb6f --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/p2p.c @@ -0,0 +1,2605 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ +#include +#include +#include +#include +#include + +#include "chanspec.h" +#include "utils.h" +#include "defs.h" +#include "core.h" +#include "debug.h" +#include "fwil.h" +#include "fwil_types.h" +#include "p2p.h" +#include "cfg80211.h" +#include "feature.h" +#include "security.h" + +/* parameters used for p2p escan */ +#define P2PAPI_SCAN_NPROBES 1 +#define P2PAPI_SCAN_DWELL_TIME_MS 80 +#define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 40 +#define P2PAPI_SCAN_HOME_TIME_MS 60 +#define P2PAPI_SCAN_NPROBS_TIME_MS 30 +#define P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS 100 +#define WL_SCAN_CONNECT_DWELL_TIME_MS 200 +#define WL_SCAN_JOIN_PROBE_INTERVAL_MS 20 + +#define INFF_P2P_WILDCARD_SSID "DIRECT-" +#define INFF_P2P_WILDCARD_SSID_LEN (sizeof(INFF_P2P_WILDCARD_SSID) - 1) + +#define SOCIAL_CHAN_1 1 +#define SOCIAL_CHAN_2 6 +#define SOCIAL_CHAN_3 11 +#define IS_P2P_SOCIAL_CHANNEL(channel) ({ \ + typeof(channel) __channel = (channel); \ + (__channel == SOCIAL_CHAN_1) || \ + (__channel == SOCIAL_CHAN_2) || \ + (__channel == SOCIAL_CHAN_3); \ +}) + +#define INFF_P2P_TEMP_CHAN SOCIAL_CHAN_3 +#define SOCIAL_CHAN_CNT 3 +#define AF_PEER_SEARCH_CNT 2 + +#define INFF_SCB_TIMEOUT_VALUE 20 + +#define P2P_VER 9 /* P2P version: 9=WiFi P2P v1.0 */ +#define P2P_PUB_AF_CATEGORY 0x04 +#define P2P_PUB_AF_ACTION 0x09 +#define P2P_AF_CATEGORY 0x7f +#define P2P_OUI "\x50\x6F\x9A" /* P2P OUI */ +#define P2P_OUI_LEN 3 /* P2P OUI length */ + +/* Action Frame Constants */ +#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action */ +#define DOT11_ACTION_CAT_OFF 0 /* category offset */ +#define DOT11_ACTION_ACT_OFF 1 /* action offset */ + +#define P2P_AF_DWELL_TIME 200 +#define P2P_AF_MIN_DWELL_TIME 100 +#define P2P_AF_MED_DWELL_TIME 400 +#define P2P_AF_LONG_DWELL_TIME 1000 +#define P2P_AF_TX_MAX_RETRY 5 +#define P2P_AF_MAX_WAIT_TIME msecs_to_jiffies(2000) +#define P2P_INVALID_CHANNEL -1 +#define P2P_INVALID_CHANSPEC 0 +#define P2P_CHANNEL_SYNC_RETRY 5 +#define P2P_AF_FRM_SCAN_MAX_WAIT msecs_to_jiffies(450) +#define P2P_DEFAULT_SLEEP_TIME_VSDB 200 +#define P2P_AF_RETRY_DELAY_TIME 40 + +/* WiFi P2P Public Action Frame OUI Subtypes */ +#define P2P_PAF_GON_REQ 0 /* Group Owner Negotiation Req */ +#define P2P_PAF_GON_RSP 1 /* Group Owner Negotiation Rsp */ +#define P2P_PAF_GON_CONF 2 /* Group Owner Negotiation Confirm */ +#define P2P_PAF_INVITE_REQ 3 /* P2P Invitation Request */ +#define P2P_PAF_INVITE_RSP 4 /* P2P Invitation Response */ +#define P2P_PAF_DEVDIS_REQ 5 /* Device Discoverability Request */ +#define P2P_PAF_DEVDIS_RSP 6 /* Device Discoverability Response */ +#define P2P_PAF_PROVDIS_REQ 7 /* Provision Discovery Request */ +#define P2P_PAF_PROVDIS_RSP 8 /* Provision Discovery Response */ +#define P2P_PAF_SUBTYPE_INVALID 255 /* Invalid Subtype */ + +/* WiFi P2P Action Frame OUI Subtypes */ +#define P2P_AF_NOTICE_OF_ABSENCE 0 /* Notice of Absence */ +#define P2P_AF_PRESENCE_REQ 1 /* P2P Presence Request */ +#define P2P_AF_PRESENCE_RSP 2 /* P2P Presence Response */ +#define P2P_AF_GO_DISC_REQ 3 /* GO Discoverability Request */ + +/* P2P Service Discovery related */ +#define P2PSD_ACTION_CATEGORY 0x04 /* Public action frame */ +#define P2PSD_ACTION_ID_GAS_IREQ 0x0a /* GAS Initial Request AF */ +#define P2PSD_ACTION_ID_GAS_IRESP 0x0b /* GAS Initial Response AF */ +#define P2PSD_ACTION_ID_GAS_CREQ 0x0c /* GAS Comeback Request AF */ +#define P2PSD_ACTION_ID_GAS_CRESP 0x0d /* GAS Comeback Response AF */ + +#define INFF_P2P_DISABLE_TIMEOUT msecs_to_jiffies(500) + +/* Mask for retry counter of custom dwell time */ +#define CUSTOM_RETRY_MASK 0xff000000 +/** + * struct inff_p2p_disc_st_le - set discovery state in firmware. + * + * @state: requested discovery state (see enum inff_p2p_disc_state). + * @chspec: channel parameter for %WL_P2P_DISC_ST_LISTEN state. + * @dwell: dwell time in ms for %WL_P2P_DISC_ST_LISTEN state. + */ +struct inff_p2p_disc_st_le { + u8 state; + __le16 chspec; + __le16 dwell; +}; + +/** + * enum inff_p2p_disc_state - P2P discovery state values + * + * @WL_P2P_DISC_ST_SCAN: P2P discovery with wildcard SSID and P2P IE. + * @WL_P2P_DISC_ST_LISTEN: P2P discovery off-channel for specified time. + * @WL_P2P_DISC_ST_SEARCH: P2P discovery with P2P wildcard SSID and P2P IE. + */ +enum inff_p2p_disc_state { + WL_P2P_DISC_ST_SCAN, + WL_P2P_DISC_ST_LISTEN, + WL_P2P_DISC_ST_SEARCH +}; + +/** + * struct inff_p2p_scan_le - P2P specific scan request. + * + * @type: type of scan method requested (values: 'E' or 'S'). + * @reserved: reserved (ignored). + * @eparams: parameters used for type 'E'. + * @sparams: parameters used for type 'S'. + */ +struct inff_p2p_scan_le { + u8 type; + u8 reserved[3]; + union { + struct inff_escan_params_le eparams; + struct inff_scan_params_le sparams; + }; +}; + +/** + * struct inff_p2p_pub_act_frame - WiFi P2P Public Action Frame + * + * @category: P2P_PUB_AF_CATEGORY + * @action: P2P_PUB_AF_ACTION + * @oui: P2P_OUI + * @oui_type: OUI type - P2P_VER + * @subtype: OUI subtype - P2P_TYPE_* + * @dialog_token: nonzero, identifies req/rsp transaction + * @elts: Variable length information elements. + */ +struct inff_p2p_pub_act_frame { + u8 category; + u8 action; + u8 oui[3]; + u8 oui_type; + u8 subtype; + u8 dialog_token; + u8 elts[]; +}; + +/** + * struct inff_p2p_action_frame - WiFi P2P Action Frame + * + * @category: P2P_AF_CATEGORY + * @oui: OUI - P2P_OUI + * @type: OUI Type - P2P_VER + * @subtype: OUI Subtype - P2P_AF_* + * @dialog_token: nonzero, identifies req/resp tranaction + * @elts: Variable length information elements. + */ +struct inff_p2p_action_frame { + u8 category; + u8 oui[3]; + u8 type; + u8 subtype; + u8 dialog_token; + u8 elts[]; +}; + +/** + * struct inff_p2psd_gas_pub_act_frame - Wi-Fi GAS Public Action Frame + * + * @category: 0x04 Public Action Frame + * @action: 0x6c Advertisement Protocol + * @dialog_token: nonzero, identifies req/rsp transaction + * @query_data: Query Data. SD gas ireq SD gas iresp + */ +struct inff_p2psd_gas_pub_act_frame { + u8 category; + u8 action; + u8 dialog_token; + u8 query_data[]; +}; + +/** + * struct inff_config_af_params - Action Frame Parameters for tx. + * + * @mpc_onoff: To make sure to send successfully action frame, we have to + * turn off mpc 0: off, 1: on, (-1): do nothing + * @search_channel: 1: search peer's channel to send af + * @extra_listen: keep the dwell time to get af response frame. + */ +struct inff_config_af_params { + s32 mpc_onoff; + bool search_channel; + bool extra_listen; +}; + +/** + * inff_p2p_is_pub_action() - true if p2p public type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p public action type + */ +static bool inff_p2p_is_pub_action(void *frame, u32 frame_len) +{ + struct inff_p2p_pub_act_frame *pact_frm; + + if (!frame) + return false; + + pact_frm = (struct inff_p2p_pub_act_frame *)frame; + if (frame_len < sizeof(*pact_frm)) + return false; + + if (pact_frm->category == P2P_PUB_AF_CATEGORY && + pact_frm->action == P2P_PUB_AF_ACTION && + pact_frm->oui_type == P2P_VER && + memcmp(pact_frm->oui, WFA_OUI, P2P_OUI_LEN) == 0) + return true; + + return false; +} + +/** + * inff_p2p_is_dpp_pub_action() - true if dpp public type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is dpp public action type + */ +static bool inff_p2p_is_dpp_pub_action(void *frame, u32 frame_len) +{ + struct inff_p2p_pub_act_frame *pact_frm; + + if (!frame) + return false; + + pact_frm = (struct inff_p2p_pub_act_frame *)frame; + if (frame_len < sizeof(struct inff_p2p_pub_act_frame) - 1) + return false; + + if (pact_frm->category == WLAN_CATEGORY_PUBLIC && + pact_frm->action == WLAN_PUB_ACTION_VENDOR_SPECIFIC && + pact_frm->oui_type == WLAN_OUI_TYPE_WFA_DPP && + memcmp(pact_frm->oui, WFA_OUI, TLV_OUI_LEN) == 0) + return true; + + return false; +} + +/** + * inff_p2p_is_p2p_action() - true if p2p action type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p action type + */ +static bool inff_p2p_is_p2p_action(void *frame, u32 frame_len) +{ + struct inff_p2p_action_frame *act_frm; + + if (!frame) + return false; + + act_frm = (struct inff_p2p_action_frame *)frame; + if (frame_len < sizeof(*act_frm)) + return false; + + if (act_frm->category == P2P_AF_CATEGORY && + act_frm->type == P2P_VER && + memcmp(act_frm->oui, P2P_OUI, P2P_OUI_LEN) == 0) + return true; + + return false; +} + +/** + * inff_p2p_is_gas_action() - true if p2p gas action type frame. + * + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Determine if action frame is p2p gas action type + */ +static bool inff_p2p_is_gas_action(void *frame, u32 frame_len) +{ + struct inff_p2psd_gas_pub_act_frame *sd_act_frm; + + if (!frame) + return false; + + sd_act_frm = (struct inff_p2psd_gas_pub_act_frame *)frame; + if (frame_len < sizeof(*sd_act_frm)) + return false; + + if (sd_act_frm->category != P2PSD_ACTION_CATEGORY) + return false; + + if (sd_act_frm->action == P2PSD_ACTION_ID_GAS_IREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_IRESP || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CREQ || + sd_act_frm->action == P2PSD_ACTION_ID_GAS_CRESP) + return true; + + return false; +} + +/** + * inff_p2p_print_actframe() - debug print routine. + * + * @tx: Received or to be transmitted + * @frame: action frame data. + * @frame_len: length of action frame data. + * + * Print information about the p2p action frame + */ + +#ifdef DEBUG + +static void inff_p2p_print_actframe(bool tx, void *frame, u32 frame_len) +{ + struct inff_p2p_pub_act_frame *pact_frm; + struct inff_p2p_action_frame *act_frm; + struct inff_p2psd_gas_pub_act_frame *sd_act_frm; + + if (!frame || frame_len <= 2) + return; + + if (inff_p2p_is_pub_action(frame, frame_len)) { + pact_frm = (struct inff_p2p_pub_act_frame *)frame; + switch (pact_frm->subtype) { + case P2P_PAF_GON_REQ: + inff_dbg(TRACE, "%s P2P Group Owner Negotiation Req Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_GON_RSP: + inff_dbg(TRACE, "%s P2P Group Owner Negotiation Rsp Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_GON_CONF: + inff_dbg(TRACE, "%s P2P Group Owner Negotiation Confirm Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_INVITE_REQ: + inff_dbg(TRACE, "%s P2P Invitation Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_INVITE_RSP: + inff_dbg(TRACE, "%s P2P Invitation Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_DEVDIS_REQ: + inff_dbg(TRACE, "%s P2P Device Discoverability Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_DEVDIS_RSP: + inff_dbg(TRACE, "%s P2P Device Discoverability Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_PROVDIS_REQ: + inff_dbg(TRACE, "%s P2P Provision Discovery Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_PAF_PROVDIS_RSP: + inff_dbg(TRACE, "%s P2P Provision Discovery Response Frame\n", + (tx) ? "TX" : "RX"); + break; + default: + inff_dbg(TRACE, "%s Unknown P2P Public Action Frame\n", + (tx) ? "TX" : "RX"); + break; + } + } else if (inff_p2p_is_p2p_action(frame, frame_len)) { + act_frm = (struct inff_p2p_action_frame *)frame; + switch (act_frm->subtype) { + case P2P_AF_NOTICE_OF_ABSENCE: + inff_dbg(TRACE, "%s P2P Notice of Absence Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_PRESENCE_REQ: + inff_dbg(TRACE, "%s P2P Presence Request Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_PRESENCE_RSP: + inff_dbg(TRACE, "%s P2P Presence Response Frame\n", + (tx) ? "TX" : "RX"); + break; + case P2P_AF_GO_DISC_REQ: + inff_dbg(TRACE, "%s P2P Discoverability Request Frame\n", + (tx) ? "TX" : "RX"); + break; + default: + inff_dbg(TRACE, "%s Unknown P2P Action Frame\n", + (tx) ? "TX" : "RX"); + } + + } else if (inff_p2p_is_gas_action(frame, frame_len)) { + sd_act_frm = (struct inff_p2psd_gas_pub_act_frame *)frame; + switch (sd_act_frm->action) { + case P2PSD_ACTION_ID_GAS_IREQ: + inff_dbg(TRACE, "%s P2P GAS Initial Request\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_IRESP: + inff_dbg(TRACE, "%s P2P GAS Initial Response\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_CREQ: + inff_dbg(TRACE, "%s P2P GAS Comeback Request\n", + (tx) ? "TX" : "RX"); + break; + case P2PSD_ACTION_ID_GAS_CRESP: + inff_dbg(TRACE, "%s P2P GAS Comeback Response\n", + (tx) ? "TX" : "RX"); + break; + default: + inff_dbg(TRACE, "%s Unknown P2P GAS Frame\n", + (tx) ? "TX" : "RX"); + break; + } + } +} + +#else + +static void inff_p2p_print_actframe(bool tx, void *frame, u32 frame_len) +{ +} + +#endif + +/** + * inff_p2p_set_firmware() - prepare firmware for peer-to-peer operation. + * + * @ifp: ifp to use for iovars (primary). + * @p2p_mac: mac address to configure for p2p_da_override + */ +static int inff_p2p_set_firmware(struct inff_if *ifp, u8 *p2p_mac) +{ + struct inff_pub *drvr = ifp->drvr; + s32 ret = 0, apsta = 0; + + ret = inff_fil_iovar_int_get(ifp, "apsta", &apsta); + if (ret) { + iphy_err(drvr, "failed to query apsta IOVAR"); + } else if (!apsta) { + if (inff_fil_cmd_int_set(ifp, INFF_C_DOWN, 1) || + inff_fil_iovar_int_set(ifp, "apsta", 1) || + inff_fil_cmd_int_set(ifp, INFF_C_UP, 1)) { + iphy_err(drvr, "failed to set apsta IOVAR"); + } + } + + /* In case of COB type, firmware has default mac address + * After Initializing firmware, we have to set current mac address to + * firmware for P2P device address. This must be done with discovery + * disabled. + */ + inff_fil_iovar_int_set(ifp, "p2p_disc", 0); + + ret = inff_fil_iovar_data_set(ifp, "p2p_da_override", p2p_mac, + ETH_ALEN); + if (ret) + iphy_err(drvr, "failed to update device address ret %d\n", ret); + + return ret; +} + +/** + * inff_p2p_generate_bss_mac() - derive mac addresses for P2P. + * + * @p2p: P2P specific data. + * @dev_addr: optional device address. + * + * P2P needs mac addresses for P2P device and interface. If no device + * address it specified, these are derived from a random ethernet + * address. + */ +static void inff_p2p_generate_bss_mac(struct inff_p2p_info *p2p, u8 *dev_addr) +{ + struct inff_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + bool random_addr = false; + bool local_admin = false; + + if (!dev_addr || is_zero_ether_addr(dev_addr)) { + /* If the primary interface address is already locally + * administered, create a new random address. + */ + if (pri_ifp->mac_addr[0] & 0x02) { + random_addr = true; + } else { + dev_addr = pri_ifp->mac_addr; + local_admin = true; + } + } + + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. + */ + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); + + if (local_admin) + p2p->dev_addr[0] |= 0x02; + + /* Generate the P2P Interface Address. If the discovery and connection + * BSSCFGs need to simultaneously co-exist, then this address must be + * different from the P2P Device Address, but also locally administered. + */ + memcpy(p2p->conn_int_addr, p2p->dev_addr, ETH_ALEN); + p2p->conn_int_addr[0] |= 0x02; + p2p->conn_int_addr[4] ^= 0x80; + + memcpy(p2p->conn2_int_addr, p2p->dev_addr, ETH_ALEN); + p2p->conn2_int_addr[0] |= 0x02; + p2p->conn2_int_addr[4] ^= 0x90; +} + +/** + * inff_p2p_scan_is_p2p_request() - is cfg80211 scan request a P2P scan. + * + * @request: the scan request as received from cfg80211. + * + * returns true if one of the ssids in the request matches the + * P2P wildcard ssid; otherwise returns false. + */ +static bool inff_p2p_scan_is_p2p_request(struct cfg80211_scan_request *request) +{ + struct cfg80211_ssid *ssids = request->ssids; + int i; + + for (i = 0; i < request->n_ssids; i++) { + if (ssids[i].ssid_len != INFF_P2P_WILDCARD_SSID_LEN) + continue; + + inff_dbg(INFO, "comparing ssid \"%s\"", ssids[i].ssid); + if (!memcmp(INFF_P2P_WILDCARD_SSID, ssids[i].ssid, + INFF_P2P_WILDCARD_SSID_LEN)) + return true; + } + return false; +} + +/** + * inff_p2p_set_discover_state - set discover state in firmware. + * + * @ifp: low-level interface object. + * @state: discover state to set. + * @chanspec: channel parameters (for state @WL_P2P_DISC_ST_LISTEN only). + * @listen_ms: duration to listen (for state @WL_P2P_DISC_ST_LISTEN only). + */ +static s32 inff_p2p_set_discover_state(struct inff_if *ifp, u8 state, + u16 chanspec, u16 listen_ms) +{ + struct inff_p2p_disc_st_le discover_state; + s32 ret = 0; + + inff_dbg(TRACE, "enter\n"); + + discover_state.state = state; + discover_state.chspec = cpu_to_le16(chanspec); + discover_state.dwell = cpu_to_le16(listen_ms); + ret = inff_fil_bsscfg_data_set(ifp, "p2p_state", &discover_state, + sizeof(discover_state)); + return ret; +} + +/** + * inff_p2p_deinit_discovery() - disable P2P device discovery. + * + * @p2p: P2P specific data. + * + * Resets the discovery state and disables it in firmware. + */ +static s32 inff_p2p_deinit_discovery(struct inff_p2p_info *p2p) +{ + struct inff_cfg80211_vif *vif; + + inff_dbg(TRACE, "enter\n"); + + /* Set the discovery state to SCAN */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + (void)inff_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + + /* Disable P2P discovery in the firmware */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + (void)inff_fil_iovar_int_set(vif->ifp, "p2p_disc", 0); + + return 0; +} + +/** + * inff_p2p_enable_discovery() - initialize and configure discovery. + * + * @p2p: P2P specific data. + * + * Initializes the discovery device and configure the virtual interface. + */ +static int inff_p2p_enable_discovery(struct inff_p2p_info *p2p) +{ + struct inff_pub *drvr = p2p->cfg->pub; + struct inff_cfg80211_vif *vif; + s32 ret = 0; + + inff_dbg(TRACE, "enter\n"); + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (!vif) { + iphy_err(drvr, "P2P config device not available\n"); + ret = -EPERM; + goto exit; + } + + if (test_bit(INFF_P2P_STATUS_ENABLED, &p2p->status)) { + inff_dbg(INFO, "P2P config device already configured\n"); + goto exit; + } + + /* Re-initialize P2P Discovery in the firmware */ + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + ret = inff_fil_iovar_int_set(vif->ifp, "p2p_disc", 1); + if (ret < 0) { + iphy_err(drvr, "set p2p_disc error\n"); + goto exit; + } + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + ret = inff_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + if (ret < 0) { + iphy_err(drvr, "unable to set WL_P2P_DISC_ST_SCAN\n"); + goto exit; + } + + /* + * Set wsec to any non-zero value in the discovery bsscfg + * to ensure our P2P probe responses have the privacy bit + * set in the 802.11 WPA IE. Some peer devices may not + * initiate WPS with us if this bit is not set. + */ + ret = inff_fil_bsscfg_int_set(vif->ifp, "wsec", AES_ENABLED); + if (ret < 0) { + iphy_err(drvr, "wsec error %d\n", ret); + goto exit; + } + + set_bit(INFF_P2P_STATUS_ENABLED, &p2p->status); +exit: + return ret; +} + +/** + * inff_p2p_escan() - initiate a P2P scan. + * + * @p2p: P2P specific data. + * @num_chans: number of channels to scan. + * @chanspecs: channel parameters for @num_chans channels. + * @search_state: P2P discover state to use. + * @bss_type: type of P2P bss. + */ +static s32 inff_p2p_escan(struct inff_p2p_info *p2p, u32 num_chans, + u16 chanspecs[], s32 search_state, + enum p2p_bss_type bss_type) +{ + struct inff_pub *drvr = p2p->cfg->pub; + s32 ret = 0; + s32 memsize = offsetof(struct inff_p2p_scan_le, + eparams.params_le.channel_list); + s32 nprobes; + s32 active; + u32 i; + u8 *memblk; + struct inff_cfg80211_vif *vif; + struct inff_p2p_scan_le *p2p_params; + struct inff_scan_params_le *sparams; + + memsize += num_chans * sizeof(__le16); + memblk = kzalloc(memsize, GFP_KERNEL); + if (!memblk) + return -ENOMEM; + + vif = p2p->bss_idx[bss_type].vif; + if (!vif) { + iphy_err(drvr, "no vif for bss type %d\n", bss_type); + ret = -EINVAL; + goto exit; + } + p2p_params = (struct inff_p2p_scan_le *)memblk; + sparams = &p2p_params->eparams.params_le; + + switch (search_state) { + case WL_P2P_DISC_ST_SEARCH: + /* + * If we in SEARCH STATE, we don't need to set SSID explicitly + * because dongle use P2P WILDCARD internally by default, use + * null ssid, which it is already due to kzalloc. + */ + break; + case WL_P2P_DISC_ST_SCAN: + /* + * wpa_supplicant has p2p_find command with type social or + * progressive. For progressive, we need to set the ssid to + * P2P WILDCARD because we just do broadcast scan unless + * setting SSID. + */ + sparams->ssid_le.SSID_len = + cpu_to_le32(INFF_P2P_WILDCARD_SSID_LEN); + memcpy(sparams->ssid_le.SSID, INFF_P2P_WILDCARD_SSID, + INFF_P2P_WILDCARD_SSID_LEN); + break; + default: + iphy_err(drvr, " invalid search state %d\n", search_state); + ret = -EINVAL; + goto exit; + } + + inff_p2p_set_discover_state(vif->ifp, search_state, 0, 0); + + /* + * set p2p scan parameters. + */ + p2p_params->type = 'E'; + + /* determine the scan engine parameters */ + sparams->bss_type = DOT11_BSSTYPE_ANY; + sparams->scan_type = INFF_SCANTYPE_ACTIVE; + + eth_broadcast_addr(sparams->bssid); + sparams->home_time = cpu_to_le32(P2PAPI_SCAN_HOME_TIME_MS); + + /* + * SOCIAL_CHAN_CNT + 1 takes care of the Progressive scan + * supported by the supplicant. + */ + if (num_chans == SOCIAL_CHAN_CNT || num_chans == (SOCIAL_CHAN_CNT + 1)) + active = P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS; + else if (num_chans == AF_PEER_SEARCH_CNT) + active = P2PAPI_SCAN_AF_SEARCH_DWELL_TIME_MS; + else if (inff_get_vif_state_any(p2p->cfg, INFF_VIF_STATUS_CONNECTED)) + active = -1; + else + active = P2PAPI_SCAN_DWELL_TIME_MS; + + /* Override scan params to find a peer for a connection */ + if (num_chans == 1) { + active = WL_SCAN_CONNECT_DWELL_TIME_MS; + /* WAR to sync with presence period of VSDB GO. + * send probe request more frequently + */ + nprobes = active / WL_SCAN_JOIN_PROBE_INTERVAL_MS; + } else { + nprobes = active / P2PAPI_SCAN_NPROBS_TIME_MS; + } + + if (nprobes <= 0) + nprobes = 1; + + inff_dbg(INFO, "nprobes # %d, active_time %d\n", nprobes, active); + sparams->active_time = cpu_to_le32(active); + sparams->nprobes = cpu_to_le32(nprobes); + sparams->passive_time = cpu_to_le32(-1); + sparams->channel_num = cpu_to_le32(num_chans & + INFF_SCAN_PARAMS_COUNT_MASK); + for (i = 0; i < num_chans; i++) + sparams->channel_list[i] = cpu_to_le16(chanspecs[i]); + + /* set the escan specific parameters */ + p2p_params->eparams.version = cpu_to_le32(INFF_ESCAN_REQ_VERSION); + p2p_params->eparams.action = cpu_to_le16(WL_ESCAN_ACTION_START); + p2p_params->eparams.sync_id = cpu_to_le16(0x1234); + /* perform p2p scan on primary device */ + ret = inff_fil_bsscfg_data_set(vif->ifp, "p2p_scan", memblk, memsize); + if (!ret) + set_bit(INFF_SCAN_STATUS_BUSY, &p2p->cfg->scan_status); +exit: + kfree(memblk); + return ret; +} + +/** + * inff_p2p_run_escan() - escan callback for peer-to-peer. + * + * @cfg: driver private data for cfg80211 interface. + * @ifp: interface control. + * @request: scan request from cfg80211. + * + * Determines the P2P discovery state based to scan request parameters and + * validates the channels in the request. + */ +static s32 inff_p2p_run_escan(struct inff_cfg80211_info *cfg, + struct inff_if *ifp, + struct cfg80211_scan_request *request) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_pub *drvr = cfg->pub; + s32 err = 0; + s32 search_state = WL_P2P_DISC_ST_SCAN; + struct inff_cfg80211_vif *vif; + struct net_device *dev = NULL; + int i, num_nodfs = 0; + u16 *chanspecs; + + inff_dbg(TRACE, "enter\n"); + + if (!request) { + err = -EINVAL; + goto exit; + } + + if (request->n_channels) { + chanspecs = kcalloc(request->n_channels, sizeof(*chanspecs), + GFP_KERNEL); + if (!chanspecs) { + err = -ENOMEM; + goto exit; + } + vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; + if (vif) + dev = vif->wdev.netdev; + if (request->n_channels == 3 && + request->channels[0]->hw_value == SOCIAL_CHAN_1 && + request->channels[1]->hw_value == SOCIAL_CHAN_2 && + request->channels[2]->hw_value == SOCIAL_CHAN_3) { + /* SOCIAL CHANNELS 1, 6, 11 */ + search_state = WL_P2P_DISC_ST_SEARCH; + inff_dbg(INFO, "P2P SEARCH PHASE START\n"); + } else if (dev && vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) { + /* If you are already a GO, then do SEARCH only */ + inff_dbg(INFO, "Already a GO. Do SEARCH Only\n"); + search_state = WL_P2P_DISC_ST_SEARCH; + } else { + inff_dbg(INFO, "P2P SCAN STATE START\n"); + } + + /* + * no P2P scanning on passive or DFS channels. + */ + for (i = 0; i < request->n_channels; i++) { + struct ieee80211_channel *chan = request->channels[i]; + + if (chan->flags & (IEEE80211_CHAN_RADAR | + IEEE80211_CHAN_NO_IR)) + continue; + + chanspecs[i] = channel_to_chanspec(&p2p->cfg->d11inf, + chan); + inff_dbg(INFO, "%d: chan=%d, channel spec=%x\n", + num_nodfs, chan->hw_value, chanspecs[i]); + num_nodfs++; + } + err = inff_p2p_escan(p2p, num_nodfs, chanspecs, search_state, + P2PAPI_BSSCFG_DEVICE); + kfree(chanspecs); + } +exit: + if (err) + iphy_err(drvr, "error (%d)\n", err); + return err; +} + +/** + * inff_p2p_find_listen_channel() - find listen channel in ie string. + * + * @ie: string of information elements. + * @ie_len: length of string. + * + * Scan ie for p2p ie and look for attribute 6 channel. If available determine + * channel and return it. + */ +static s32 inff_p2p_find_listen_channel(const u8 *ie, u32 ie_len) +{ + u8 channel_ie[5]; + s32 listen_channel; + s32 err; + + err = cfg80211_get_p2p_attr(ie, ie_len, + IEEE80211_P2P_ATTR_LISTEN_CHANNEL, + channel_ie, sizeof(channel_ie)); + if (err < 0) + return err; + + /* listen channel subel length format: */ + /* 3(country) + 1(op. class) + 1(chan num) */ + listen_channel = (s32)channel_ie[3 + 1]; + + if (listen_channel == SOCIAL_CHAN_1 || + listen_channel == SOCIAL_CHAN_2 || + listen_channel == SOCIAL_CHAN_3) { + inff_dbg(INFO, "Found my Listen Channel %d\n", listen_channel); + return listen_channel; + } + + return -EPERM; +} + +/** + * inff_p2p_scan_prep() - prepare scan based on request. + * + * @wiphy: wiphy device. + * @request: scan request from cfg80211. + * @vif: vif on which scan request is to be executed. + * + * Prepare the scan appropriately for type of scan requested. Overrides the + * escan .run() callback for peer-to-peer scanning. + */ +int inff_p2p_scan_prep(struct wiphy *wiphy, + struct cfg80211_scan_request *request, + struct inff_cfg80211_vif *vif) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_p2p_info *p2p = &cfg->p2p; + int err = 0; + struct inff_chan ch; + + if (inff_p2p_scan_is_p2p_request(request)) { + /* find my listen channel */ + err = inff_p2p_find_listen_channel(request->ie, + request->ie_len); + if (err < 0) + return err; + + ch.band = INFF_CHAN_BAND_2G; + ch.bw = INFF_CHAN_BW_20; + ch.sb = INFF_CHAN_SB_NONE; + ch.chnum = err; + p2p->cfg->d11inf.encchspec(&ch); + p2p->afx_hdl.my_listen_chan = ch.chspec; + + clear_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + inff_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n"); + + err = inff_p2p_enable_discovery(p2p); + if (err) + return err; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + + /* override .run_escan() callback. */ + cfg->escan_info.run = inff_p2p_run_escan; + } + err = inff_vif_set_mgmt_ie(vif, INFF_VNDR_IE_PRBREQ_FLAG, + request->ie, request->ie_len); + return err; +} + +/** + * inff_p2p_discover_listen() - set firmware to discover listen state. + * + * @p2p: p2p device. + * @chspec: chspec for discover listen. + * @duration: time in ms to stay on channel. + * + */ +static s32 +inff_p2p_discover_listen(struct inff_p2p_info *p2p, u16 chspec, u32 duration) +{ + struct inff_pub *drvr = p2p->cfg->pub; + struct inff_cfg80211_vif *vif; + s32 err = 0; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (!vif) { + iphy_err(drvr, "Discovery is not set, so we have nothing to do\n"); + err = -EPERM; + goto exit; + } + + if (test_bit(INFF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status)) { + iphy_err(drvr, "Previous LISTEN is not completed yet\n"); + /* WAR: prevent cookie mismatch in wpa_supplicant return OK */ + goto exit; + } + + err = inff_p2p_set_discover_state(vif->ifp, WL_P2P_DISC_ST_LISTEN, + chspec, (u16)duration); + if (!err) { + set_bit(INFF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status); + p2p->remain_on_channel_cookie++; + } +exit: + return err; +} + +/** + * inff_p2p_remain_on_channel() - put device on channel and stay there. + * + * @wiphy: wiphy device. + * @wdev: wireless device. + * @channel: channel to stay on. + * @duration: time in ms to remain on channel. + * @cookie: cookie. + */ +int inff_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *channel, + unsigned int duration, u64 *cookie) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_p2p_info *p2p = &cfg->p2p; + s32 err; + + err = inff_p2p_enable_discovery(p2p); + if (err) + goto exit; + err = inff_p2p_discover_listen(p2p, channel_to_chanspec(&cfg->d11inf, channel), + duration); + if (err) + goto exit; + + p2p->remin_on_channel_wdev = wdev; + + memcpy(&p2p->remain_on_channel, channel, sizeof(*channel)); + *cookie = p2p->remain_on_channel_cookie; + cfg80211_ready_on_channel(wdev, *cookie, channel, duration, GFP_KERNEL); + +exit: + return err; +} + +/** + * inff_p2p_notify_listen_complete() - p2p listen has completed. + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: payload of message. Not used. + * + */ +int inff_p2p_notify_listen_complete(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_p2p_info *p2p = &cfg->p2p; + struct wireless_dev *wdev = p2p->remin_on_channel_wdev; + + inff_dbg(TRACE, "Enter\n"); + if (test_and_clear_bit(INFF_P2P_STATUS_DISCOVER_LISTEN, + &p2p->status)) { + if (test_and_clear_bit(INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status)) { + clear_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status); + inff_dbg(INFO, "Listen DONE, wake up wait_next_af\n"); + complete(&p2p->wait_next_af); + } + + wdev = p2p->remin_on_channel_wdev ? + p2p->remin_on_channel_wdev : + &ifp->vif->wdev; + + cfg80211_remain_on_channel_expired(wdev, + p2p->remain_on_channel_cookie, + &p2p->remain_on_channel, + GFP_KERNEL); + p2p->remin_on_channel_wdev = NULL; + } + return 0; +} + +/** + * inff_p2p_cancel_remain_on_channel() - cancel p2p listen state. + * + * @ifp: interfac control. + * + */ +void inff_p2p_cancel_remain_on_channel(struct inff_if *ifp) +{ + if (!ifp) + return; + inff_p2p_set_discover_state(ifp, WL_P2P_DISC_ST_SCAN, 0, 0); + inff_p2p_notify_listen_complete(ifp, NULL, NULL); +} + +/** + * inff_p2p_act_frm_search() - search function for action frame. + * + * @p2p: p2p device. + * @chspec: chspec on which action frame is to be trasmitted. + * + * search function to reach at common channel to send action frame. When + * channel is 0 then all social channels will be used to send af + */ +static s32 inff_p2p_act_frm_search(struct inff_p2p_info *p2p, u16 chspec) +{ + s32 err; + u32 channel_cnt; + u16 *default_chan_list; + u32 i; + struct inff_chan ch; + + inff_dbg(TRACE, "Enter\n"); + + if (chspec) + channel_cnt = AF_PEER_SEARCH_CNT; + else + channel_cnt = SOCIAL_CHAN_CNT; + default_chan_list = kcalloc(channel_cnt, sizeof(*default_chan_list), + GFP_KERNEL); + if (!default_chan_list) { + err = -ENOMEM; + goto exit; + } + + if (chspec) { + for (i = 0; i < channel_cnt; i++) + default_chan_list[i] = chspec; + } else { + ch.band = INFF_CHAN_BAND_2G; + ch.bw = INFF_CHAN_BW_20; + ch.chnum = SOCIAL_CHAN_1; + p2p->cfg->d11inf.encchspec(&ch); + default_chan_list[0] = ch.chspec; + ch.chnum = SOCIAL_CHAN_2; + p2p->cfg->d11inf.encchspec(&ch); + default_chan_list[1] = ch.chspec; + ch.chnum = SOCIAL_CHAN_3; + p2p->cfg->d11inf.encchspec(&ch); + default_chan_list[2] = ch.chspec; + } + err = inff_p2p_escan(p2p, channel_cnt, default_chan_list, + WL_P2P_DISC_ST_SEARCH, P2PAPI_BSSCFG_DEVICE); + kfree(default_chan_list); +exit: + return err; +} + +/** + * inff_p2p_afx_handler() - afx worker thread. + * + * @work: + * + */ +static void inff_p2p_afx_handler(struct work_struct *work) +{ + struct afx_hdl *afx_hdl = container_of(work, struct afx_hdl, afx_work); + struct inff_p2p_info *p2p = container_of(afx_hdl, + struct inff_p2p_info, + afx_hdl); + struct inff_pub *drvr = p2p->cfg->pub; + s32 err; + + if (!afx_hdl->is_active) + return; + + if (afx_hdl->is_listen && afx_hdl->my_listen_chan) + /* 100ms ~ 300ms */ + err = inff_p2p_discover_listen(p2p, afx_hdl->my_listen_chan, + 100 * get_random_u32_inclusive(1, 3)); + else + err = inff_p2p_act_frm_search(p2p, afx_hdl->peer_listen_chan); + + if (err) { + iphy_err(drvr, "ERROR occurred! value is (%d)\n", err); + if (test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status)) + complete(&afx_hdl->act_frm_scan); + } +} + +/** + * inff_p2p_af_searching_channel() - search channel. + * + * @p2p: p2p device info struct. + * + */ +static u16 inff_p2p_af_searching_channel(struct inff_p2p_info *p2p) +{ + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct inff_cfg80211_vif *pri_vif; + s32 retry; + + inff_dbg(TRACE, "Enter\n"); + + pri_vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + + reinit_completion(&afx_hdl->act_frm_scan); + set_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status); + afx_hdl->is_active = true; + afx_hdl->peer_chan = P2P_INVALID_CHANSPEC; + + /* Loop to wait until we find a peer's channel or the + * pending action frame tx is cancelled. + */ + retry = 0; + while ((retry < P2P_CHANNEL_SYNC_RETRY) && + (afx_hdl->peer_chan == P2P_INVALID_CHANSPEC)) { + afx_hdl->is_listen = false; + inff_dbg(TRACE, "Scheduling action frame for sending.. (%d)\n", + retry); + /* search peer on peer's listen channel */ + schedule_work(&afx_hdl->afx_work); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); + if (afx_hdl->peer_chan != P2P_INVALID_CHANSPEC || + (!test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status))) + break; + + if (afx_hdl->my_listen_chan) { + inff_dbg(TRACE, "Scheduling listen peer, chanspec=0x%04x\n", + afx_hdl->my_listen_chan); + /* listen on my listen channel */ + afx_hdl->is_listen = true; + schedule_work(&afx_hdl->afx_work); + wait_for_completion_timeout(&afx_hdl->act_frm_scan, + P2P_AF_FRM_SCAN_MAX_WAIT); + } + if (afx_hdl->peer_chan != P2P_INVALID_CHANSPEC || + (!test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status))) + break; + retry++; + + /* if sta is connected or connecting, sleep for a while before + * retry af tx or finding a peer + */ + if (test_bit(INFF_VIF_STATUS_CONNECTED, &pri_vif->sme_state) || + test_bit(INFF_VIF_STATUS_CONNECTING, &pri_vif->sme_state)) + msleep(P2P_DEFAULT_SLEEP_TIME_VSDB); + } + + inff_dbg(TRACE, "Completed search/listen peer_chan=0x%4x\n", + afx_hdl->peer_chan); + afx_hdl->is_active = false; + + clear_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status); + + return afx_hdl->peer_chan; +} + +/** + * inff_p2p_scan_finding_common_channel() - was escan used for finding channel + * + * @cfg: common configuration struct. + * @bi: bss info struct, result from scan. + * + */ +bool inff_p2p_scan_finding_common_channel(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bi) + +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + u8 *ie; + s32 err; + u8 p2p_dev_addr[ETH_ALEN]; + + if (!test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status)) + return false; + + if (!bi) { + inff_dbg(TRACE, "ACTION FRAME SCAN Done\n"); + if (afx_hdl->peer_chan == P2P_INVALID_CHANSPEC) + complete(&afx_hdl->act_frm_scan); + return true; + } + + ie = ((u8 *)bi) + le16_to_cpu(bi->ie_offset); + memset(p2p_dev_addr, 0, sizeof(p2p_dev_addr)); + err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length), + IEEE80211_P2P_ATTR_DEVICE_INFO, + p2p_dev_addr, sizeof(p2p_dev_addr)); + if (err < 0) + err = cfg80211_get_p2p_attr(ie, le32_to_cpu(bi->ie_length), + IEEE80211_P2P_ATTR_DEVICE_ID, + p2p_dev_addr, sizeof(p2p_dev_addr)); + if (err >= 0 && + (ether_addr_equal(p2p_dev_addr, afx_hdl->tx_dst_addr))) { + afx_hdl->peer_chan = le16_to_cpu(bi->chanspec); + inff_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, chanspec : 0x%04x\n", + afx_hdl->tx_dst_addr, afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + return true; +} + +/** + * inff_p2p_abort_action_frame() - abort action frame. + * + * @cfg: common configuration struct. + * + */ +static s32 inff_p2p_abort_action_frame(struct inff_cfg80211_info *cfg) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_cfg80211_vif *vif; + s32 err; + s32 int_val = 1; + + inff_dbg(TRACE, "Enter\n"); + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + + if (!vif) + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + + err = inff_fil_bsscfg_data_set(vif->ifp, "actframe_abort", &int_val, + sizeof(s32)); + if (err) + inff_err(" aborting action frame has failed (%d)\n", err); + + return err; +} + +/** + * inff_p2p_stop_wait_next_action_frame() - finish scan if af tx complete. + * + * @cfg: common configuration struct. + * + */ +static void +inff_p2p_stop_wait_next_action_frame(struct inff_cfg80211_info *cfg) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + s32 err; + + if (test_bit(INFF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) && + (test_bit(INFF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) || + test_bit(INFF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status))) { + inff_dbg(TRACE, "*** Wake UP ** abort actframe iovar\n"); + /* if channel is not zero, "actfame" uses off channel scan. + * So abort scan for off channel completion. + */ + if (p2p->af_sent_channel) { + /* abort actframe using actframe_abort or abort scan */ + err = inff_p2p_abort_action_frame(cfg); + if (err) + inff_notify_escan_complete(cfg, ifp, true, + true); + } + } else if (test_bit(INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status)) { + inff_dbg(TRACE, "*** Wake UP ** abort listen for next af frame\n"); + /* So abort scan to cancel listen */ + inff_notify_escan_complete(cfg, ifp, true, true); + } +} + +/** + * inff_p2p_gon_req_collision() - Check if go negotiaton collission + * + * @p2p: p2p device info struct. + * @mac: MAC address. + * + * return true if received action frame is to be dropped. + */ +static bool +inff_p2p_gon_req_collision(struct inff_p2p_info *p2p, u8 *mac) +{ + struct inff_cfg80211_info *cfg = p2p->cfg; + struct inff_if *ifp; + + inff_dbg(TRACE, "Enter\n"); + + if (!test_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) || + !p2p->gon_req_action) + return false; + + inff_dbg(TRACE, "GO Negotiation Request COLLISION !!!\n"); + /* if sa(peer) addr is less than da(my) addr, then this device + * process peer's gon request and block to send gon req. + * if not (sa addr > da addr), + * this device will process gon request and drop gon req of peer. + */ + ifp = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->ifp; + if (memcmp(mac, ifp->mac_addr, ETH_ALEN) < 0) { + inff_dbg(INFO, "Block transmit gon req !!!\n"); + p2p->block_gon_req_tx = true; + /* if we are finding a common channel for sending af, + * do not scan more to block to send current gon req + */ + if (test_and_clear_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status)) + complete(&p2p->afx_hdl.act_frm_scan); + if (test_and_clear_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status)) + inff_p2p_stop_wait_next_action_frame(cfg); + return false; + } + + /* drop gon request of peer to process gon request by this device. */ + inff_dbg(INFO, "Drop received gon req !!!\n"); + + return true; +} + +/** + * inff_p2p_notify_action_frame_rx() - received action frame. + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: payload of message, containing action frame data. + * + */ +int inff_p2p_notify_action_frame_rx(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_pub *drvr = ifp->drvr; + struct inff_cfg80211_info *cfg = drvr->config; + struct inff_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct wireless_dev *wdev; + u32 mgmt_frame_len = e->datalen - sizeof(struct inff_rx_mgmt_data); + struct inff_rx_mgmt_data *rxframe = (struct inff_rx_mgmt_data *)data; + u8 *frame = (u8 *)(rxframe + 1); + struct inff_p2p_pub_act_frame *act_frm; + struct inff_p2psd_gas_pub_act_frame *sd_act_frm; + struct inff_chan ch; + struct ieee80211_mgmt *mgmt_frame; + s32 freq; + u16 mgmt_type; + u8 action; + + if (e->datalen < sizeof(*rxframe)) { + inff_dbg(SCAN, "Event data too small. Ignore\n"); + return 0; + } + + ch.chspec = be16_to_cpu(rxframe->chanspec); + cfg->d11inf.decchspec(&ch); + /* Check if wpa_supplicant has registered for this frame */ + inff_dbg(INFO, "ifp->vif->mgmt_rx_reg %04x\n", ifp->vif->mgmt_rx_reg); + mgmt_type = (IEEE80211_STYPE_ACTION & IEEE80211_FCTL_STYPE) >> 4; + if ((ifp->vif->mgmt_rx_reg & BIT(mgmt_type)) == 0) + return 0; + + inff_p2p_print_actframe(false, frame, mgmt_frame_len); + + action = P2P_PAF_SUBTYPE_INVALID; + if (inff_p2p_is_pub_action(frame, mgmt_frame_len)) { + act_frm = (struct inff_p2p_pub_act_frame *)frame; + action = act_frm->subtype; + if (action == P2P_PAF_GON_REQ && + (inff_p2p_gon_req_collision(p2p, (u8 *)e->addr))) { + if (test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, + &p2p->status) && + (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { + afx_hdl->peer_chan = be16_to_cpu(rxframe->chanspec); + inff_dbg(INFO, "GON request: Peer found, chanspec=0x%04x\n", + afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + return 0; + } + /* After complete GO Negotiation, roll back to mpc mode */ + if (action == P2P_PAF_GON_CONF || + action == P2P_PAF_PROVDIS_RSP) + inff_set_mpc(ifp, 1); + if (action == P2P_PAF_GON_CONF) { + inff_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n"); + clear_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + } + } else if (inff_p2p_is_gas_action(frame, mgmt_frame_len)) { + sd_act_frm = (struct inff_p2psd_gas_pub_act_frame *)frame; + action = sd_act_frm->action; + } + + if (test_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) && + p2p->next_af_subtype == action) { + inff_dbg(TRACE, "We got a right next frame! (%d)\n", action); + clear_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + &p2p->status); + /* Stop waiting for next AF. */ + inff_p2p_stop_wait_next_action_frame(cfg); + } + + mgmt_frame = kzalloc(offsetof(struct ieee80211_mgmt, u) + + mgmt_frame_len, GFP_KERNEL); + if (!mgmt_frame) + return -ENOMEM; + + memcpy(mgmt_frame->da, ifp->mac_addr, ETH_ALEN); + inff_fil_cmd_data_get(ifp, INFF_C_GET_BSSID, mgmt_frame->bssid, + ETH_ALEN); + memcpy(mgmt_frame->sa, e->addr, ETH_ALEN); + mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION); + memcpy(mgmt_frame->u.body, frame, mgmt_frame_len); + mgmt_frame_len += offsetof(struct ieee80211_mgmt, u.body); + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, + inff_d11_chan_band_to_nl80211(ch.band)); + + wdev = &ifp->vif->wdev; + cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0); + + kfree(mgmt_frame); + return 0; +} + +/** + * inff_p2p_notify_action_tx_complete() - transmit action frame complete + * + * @ifp: interfac control. + * @e: event message. Not used, to make it usable for fweh event dispatcher. + * @data: not used. + * + */ +int inff_p2p_notify_action_tx_complete(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_p2p_info *p2p = &cfg->p2p; + + inff_dbg(INFO, "Enter: event %s, status=%d\n", + e->event_code == INFF_E_ACTION_FRAME_OFF_CHAN_COMPLETE ? + "ACTION_FRAME_OFF_CHAN_COMPLETE" : "ACTION_FRAME_COMPLETE", + e->status); + + if (!test_bit(INFF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status)) + return 0; + + if (e->event_code == INFF_E_ACTION_FRAME_COMPLETE) { + if (e->status == INFF_E_STATUS_SUCCESS) { + set_bit(INFF_P2P_STATUS_ACTION_TX_COMPLETED, + &p2p->status); + if (!p2p->wait_for_offchan_complete) + complete(&p2p->send_af_done); + } else { + set_bit(INFF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + /* If there is no ack, we don't need to wait for + * WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE event + */ + inff_p2p_stop_wait_next_action_frame(cfg); + } + + } else { + complete(&p2p->send_af_done); + } + return 0; +} + +/** + * inff_p2p_tx_action_frame() - send action frame over fil. + * + * @p2p: p2p info struct for vif. + * @vif: vif to send. + * @af_params: action frame data/info. + * @band: nl80211 band info. + * + * Send an action frame immediately without doing channel synchronization. + * + * This function waits for a completion event before returning. + * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action + * frame is transmitted. + */ +static s32 inff_p2p_tx_action_frame(struct inff_p2p_info *p2p, + struct inff_cfg80211_vif *vif, + struct inff_fil_af_params_le *af_params, + u8 band) +{ + struct inff_pub *drvr = p2p->cfg->pub; + s32 err = 0; + struct inff_fil_af_params_v2_le *af_params_v2; + + inff_dbg(TRACE, "Enter\n"); + + reinit_completion(&p2p->send_af_done); + clear_bit(INFF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); + clear_bit(INFF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + + if (drvr->wlc_ver.wlc_ver_major == INFF_AF_PARAM_V2_FW_MAJOR && + drvr->wlc_ver.wlc_ver_minor >= INFF_AF_PARAM_V2_FW_MINOR) { + af_params_v2 = kzalloc(sizeof(*af_params_v2), GFP_KERNEL); + if (!af_params_v2) { + err = -ENOMEM; + goto exit; + } + + /* set actframe iovar with af_params_v2 */ + af_params_v2->band = nl80211_band_to_fwil(band); + af_params_v2->channel = af_params->channel; + af_params_v2->dwell_time = af_params->dwell_time; + memcpy(af_params_v2->bssid, af_params->bssid, ETH_ALEN); + memcpy(&af_params_v2->action_frame, &af_params->action_frame, + sizeof(af_params_v2->action_frame)); + + err = inff_fil_bsscfg_data_set(vif->ifp, "actframe", af_params_v2, + sizeof(*af_params_v2)); + kfree(af_params_v2); + } else { + /* set actframe iovar with af_params */ + err = inff_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, + sizeof(*af_params)); + } + + if (err) { + iphy_err(drvr, " sending action frame has failed\n"); + goto exit; + } + + p2p->af_sent_channel = le32_to_cpu(af_params->channel); + p2p->af_tx_sent_jiffies = jiffies; + + if (test_bit(INFF_P2P_STATUS_DISCOVER_LISTEN, &p2p->status) && + p2p->af_sent_channel == + ieee80211_frequency_to_channel(p2p->remain_on_channel.center_freq)) + p2p->wait_for_offchan_complete = false; + else + p2p->wait_for_offchan_complete = true; + + inff_dbg(TRACE, "Waiting for %s tx completion event\n", + (p2p->wait_for_offchan_complete) ? + "off-channel" : "on-channel"); + + wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME); + + if (test_bit(INFF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) { + inff_dbg(TRACE, "TX action frame operation is success\n"); + } else { + err = -EIO; + inff_dbg(TRACE, "TX action frame operation has failed\n"); + } + /* clear status bit for action tx */ + clear_bit(INFF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); + clear_bit(INFF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); + +exit: + return err; +} + +/** + * inff_p2p_pub_af_tx() - public action frame tx routine. + * + * @cfg: driver private data for cfg80211 interface. + * @af_params: action frame data/info. + * @config_af_params: configuration data for action frame. + * + * routine which transmits ation frame public type. + */ +static s32 inff_p2p_pub_af_tx(struct inff_cfg80211_info *cfg, + struct inff_fil_af_params_le *af_params, + struct inff_config_af_params *config_af_params) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_pub *drvr = cfg->pub; + struct inff_fil_action_frame_le *action_frame; + struct inff_p2p_pub_act_frame *act_frm; + s32 err = 0; + u16 ie_len; + + action_frame = &af_params->action_frame; + act_frm = (struct inff_p2p_pub_act_frame *)(action_frame->data); + + config_af_params->extra_listen = true; + + switch (act_frm->subtype) { + case P2P_PAF_GON_REQ: + inff_dbg(TRACE, "P2P: GO_NEG_PHASE status set\n"); + set_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + config_af_params->mpc_onoff = 0; + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + p2p->gon_req_action = true; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_GON_RSP: + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for CONF frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_GON_CONF: + /* If we reached till GO Neg confirmation reset the filter */ + inff_dbg(TRACE, "P2P: GO_NEG_PHASE status cleared\n"); + clear_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + /* turn on mpc again if go nego is done */ + config_af_params->mpc_onoff = 1; + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_INVITE_REQ: + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_INVITE_RSP: + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_DEVDIS_REQ: + config_af_params->search_channel = true; + p2p->next_af_subtype = act_frm->subtype + 1; + /* maximize dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_LONG_DWELL_TIME); + break; + case P2P_PAF_DEVDIS_RSP: + /* minimize dwell time */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + config_af_params->extra_listen = false; + break; + case P2P_PAF_PROVDIS_REQ: + ie_len = le16_to_cpu(action_frame->len) - + offsetof(struct inff_p2p_pub_act_frame, elts); + if (cfg80211_get_p2p_attr(&act_frm->elts[0], ie_len, + IEEE80211_P2P_ATTR_GROUP_ID, + NULL, 0) < 0) + config_af_params->search_channel = true; + config_af_params->mpc_onoff = 0; + p2p->next_af_subtype = act_frm->subtype + 1; + /* increase dwell time to wait for RESP frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + break; + case P2P_PAF_PROVDIS_RSP: + /* wpa_supplicant send go nego req right after prov disc */ + p2p->next_af_subtype = P2P_PAF_GON_REQ; + /* increase dwell time to MED level */ + af_params->dwell_time = cpu_to_le32(P2P_AF_MED_DWELL_TIME); + config_af_params->extra_listen = false; + break; + default: + iphy_err(drvr, "Unknown p2p pub act frame subtype: %d\n", + act_frm->subtype); + err = -EINVAL; + } + return err; +} + +static bool inff_p2p_check_dwell_overflow(u32 requested_dwell, + unsigned long dwell_jiffies) +{ + if ((requested_dwell & CUSTOM_RETRY_MASK) && + (jiffies_to_msecs(jiffies - dwell_jiffies) > + (requested_dwell & ~CUSTOM_RETRY_MASK))) { + inff_err("Action frame TX retry time over dwell time!\n"); + return true; + } + return false; +} + +/** + * inff_p2p_send_action_frame() - send action frame . + * + * @cfg: driver private data for cfg80211 interface. + * @ndev: net device to transmit on. + * @af_params: configuration data for action frame. + * @vif: virtual interface to send + */ +bool inff_p2p_send_action_frame(struct inff_cfg80211_info *cfg, + struct net_device *ndev, + struct inff_fil_af_params_le *af_params, + struct inff_cfg80211_vif *vif, + struct ieee80211_channel *peer_listen_chan) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_if *ifp = netdev_priv(ndev); + struct inff_fil_action_frame_le *action_frame; + struct inff_config_af_params config_af_params; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct inff_pub *drvr = cfg->pub; + struct inff_chan ch; + u16 action_frame_len; + bool ack = false; + u8 category; + u8 action; + s32 tx_retry; + s32 extra_listen_time; + uint delta_ms; + unsigned long dwell_jiffies = 0; + bool dwell_overflow = false; + u32 requested_dwell = le32_to_cpu(af_params->dwell_time); + + action_frame = &af_params->action_frame; + action_frame_len = le16_to_cpu(action_frame->len); + + inff_p2p_print_actframe(true, action_frame->data, action_frame_len); + + /* Add the default dwell time. Dwell time to stay off-channel */ + /* to wait for a response action frame after transmitting an */ + /* GO Negotiation action frame */ + af_params->dwell_time = cpu_to_le32(P2P_AF_DWELL_TIME); + + category = action_frame->data[DOT11_ACTION_CAT_OFF]; + action = action_frame->data[DOT11_ACTION_ACT_OFF]; + + /* initialize variables */ + p2p->next_af_subtype = P2P_PAF_SUBTYPE_INVALID; + p2p->gon_req_action = false; + + /* config parameters */ + config_af_params.mpc_onoff = -1; + config_af_params.search_channel = false; + config_af_params.extra_listen = false; + + if (inff_p2p_is_pub_action(action_frame->data, action_frame_len)) { + /* p2p public action frame process */ + if (inff_p2p_pub_af_tx(cfg, af_params, &config_af_params)) { + /* Just send unknown subtype frame with */ + /* default parameters. */ + iphy_err(drvr, "P2P Public action frame, unknown subtype.\n"); + } + } else if (inff_p2p_is_gas_action(action_frame->data, + action_frame_len)) { + /* service discovery process */ + if (action == P2PSD_ACTION_ID_GAS_IREQ || + action == P2PSD_ACTION_ID_GAS_CREQ) { + /* configure service discovery query frame */ + config_af_params.search_channel = true; + + /* save next af suptype to cancel */ + /* remaining dwell time */ + p2p->next_af_subtype = action + 1; + + af_params->dwell_time = + cpu_to_le32(P2P_AF_MED_DWELL_TIME); + } else if (action == P2PSD_ACTION_ID_GAS_IRESP || + action == P2PSD_ACTION_ID_GAS_CRESP) { + /* configure service discovery response frame */ + af_params->dwell_time = + cpu_to_le32(P2P_AF_MIN_DWELL_TIME); + } else { + iphy_err(drvr, "Unknown action type: %d\n", action); + goto exit; + } + } else if (inff_p2p_is_p2p_action(action_frame->data, + action_frame_len) || + inff_p2p_is_dpp_pub_action(action_frame->data, + action_frame_len)) { + /* do not configure anything. it will be */ + /* sent with a default configuration */ + } else { + iphy_err(drvr, "Unknown Frame: category 0x%x, action 0x%x\n", + category, action); + return false; + } + + /* if connecting on primary iface, sleep for a while before sending + * af tx for VSDB + */ + if (test_bit(INFF_VIF_STATUS_CONNECTING, + &p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->sme_state)) + msleep(50); + + /* if scan is ongoing, abort current scan. */ + if (test_bit(INFF_SCAN_STATUS_BUSY, &cfg->scan_status)) + inff_abort_scanning(cfg); + + memcpy(afx_hdl->tx_dst_addr, action_frame->da, ETH_ALEN); + + /* To make sure to send successfully action frame, turn off mpc */ + if (config_af_params.mpc_onoff == 0) + inff_set_mpc(ifp, 0); + + /* set status and destination address before sending af */ + if (p2p->next_af_subtype != P2P_PAF_SUBTYPE_INVALID) { + /* set status to cancel the remained dwell time in rx process */ + set_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status); + } + + p2p->af_sent_channel = 0; + set_bit(INFF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status); + /* validate channel and p2p ies */ + if (config_af_params.search_channel && + IS_P2P_SOCIAL_CHANNEL(le32_to_cpu(af_params->channel)) && + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif && + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif->saved_ie.probe_req_ie_len) { + afx_hdl = &p2p->afx_hdl; + afx_hdl->peer_listen_chan = channel_to_chanspec(&cfg->d11inf, peer_listen_chan); + + if (inff_p2p_af_searching_channel(p2p) == + P2P_INVALID_CHANSPEC) { + iphy_err(drvr, "Couldn't find peer's channel.\n"); + goto exit; + } + + /* Abort scan even for VSDB scenarios. Scan gets aborted in + * firmware but after the check of piggyback algorithm. To take + * care of current piggback algo, lets abort the scan here + * itself. + */ + inff_notify_escan_complete(cfg, ifp, true, true); + + /* update channel */ + ch.chspec = afx_hdl->peer_chan; + cfg->d11inf.decchspec(&ch); + af_params->channel = cpu_to_le32(ch.control_ch_num); + } + dwell_jiffies = jiffies; + dwell_overflow = inff_p2p_check_dwell_overflow(requested_dwell, + dwell_jiffies); + + tx_retry = 0; + while (!p2p->block_gon_req_tx && + (!ack) && (tx_retry < P2P_AF_TX_MAX_RETRY) && + !dwell_overflow) { + if (af_params->channel) + msleep(P2P_AF_RETRY_DELAY_TIME); + + ack = !inff_p2p_tx_action_frame(p2p, vif, af_params, peer_listen_chan->band); + tx_retry++; + dwell_overflow = inff_p2p_check_dwell_overflow(requested_dwell, + dwell_jiffies); + } + if (!ack) { + iphy_err(drvr, "Failed to send Action Frame(retry %d)\n", + tx_retry); + clear_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + } + +exit: + clear_bit(INFF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status); + + /* WAR: sometimes dongle does not keep the dwell time of 'actframe'. + * if we coundn't get the next action response frame and dongle does + * not keep the dwell time, go to listen state again to get next action + * response frame. + */ + ch.chspec = afx_hdl->my_listen_chan; + cfg->d11inf.decchspec(&ch); + if (ack && config_af_params.extra_listen && !p2p->block_gon_req_tx && + test_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status) && + p2p->af_sent_channel == ch.control_ch_num) { + delta_ms = jiffies_to_msecs(jiffies - p2p->af_tx_sent_jiffies); + if (le32_to_cpu(af_params->dwell_time) > delta_ms) + extra_listen_time = le32_to_cpu(af_params->dwell_time) - + delta_ms; + else + extra_listen_time = 0; + if (extra_listen_time > 50) { + set_bit(INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status); + inff_dbg(INFO, "Wait more time! actual af time:%d, calculated extra listen:%d\n", + le32_to_cpu(af_params->dwell_time), + extra_listen_time); + extra_listen_time += 100; + if (!inff_p2p_discover_listen(p2p, + afx_hdl->my_listen_chan, + extra_listen_time)) { + unsigned long duration; + + extra_listen_time += 100; + duration = msecs_to_jiffies(extra_listen_time); + wait_for_completion_timeout(&p2p->wait_next_af, + duration); + } + clear_bit(INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + &p2p->status); + } + } + + if (p2p->block_gon_req_tx) { + /* if ack is true, supplicant will wait more time(100ms). + * so we will return it as a success to get more time . + */ + p2p->block_gon_req_tx = false; + ack = true; + } + + clear_bit(INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, &p2p->status); + /* if all done, turn mpc on again */ + if (config_af_params.mpc_onoff == 1) + inff_set_mpc(ifp, 1); + + return ack; +} + +/** + * inff_p2p_notify_rx_mgmt_p2p_probereq() - Event handler for p2p probe req. + * + * @ifp: interface pointer for which event was received. + * @e: even message. + * @data: payload of event message (probe request). + */ +s32 inff_p2p_notify_rx_mgmt_p2p_probereq(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data) +{ + struct inff_cfg80211_info *cfg = ifp->drvr->config; + struct inff_p2p_info *p2p = &cfg->p2p; + struct afx_hdl *afx_hdl = &p2p->afx_hdl; + struct inff_cfg80211_vif *vif = ifp->vif; + struct inff_rx_mgmt_data *rxframe = (struct inff_rx_mgmt_data *)data; + struct inff_chan ch; + u8 *mgmt_frame; + u32 mgmt_frame_len; + s32 freq; + u16 mgmt_type; + + inff_dbg(INFO, "Enter: event %d reason %d\n", e->event_code, + e->reason); + + if (e->datalen < sizeof(*rxframe)) { + inff_dbg(SCAN, "Event data too small. Ignore\n"); + return 0; + } + + ch.chspec = be16_to_cpu(rxframe->chanspec); + cfg->d11inf.decchspec(&ch); + + if (test_bit(INFF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && + (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { + afx_hdl->peer_chan = be16_to_cpu(rxframe->chanspec); + inff_dbg(INFO, "PROBE REQUEST: Peer found, chanspec=0x%04x\n", + afx_hdl->peer_chan); + complete(&afx_hdl->act_frm_scan); + } + + /* Firmware sends us two proberesponses for each idx one. At the */ + /* moment anything but bsscfgidx 0 is passed up to supplicant */ + if (e->bsscfgidx == 0) + return 0; + + /* Filter any P2P probe reqs arriving during the GO-NEG Phase */ + if (test_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status)) { + inff_dbg(INFO, "Filtering P2P probe_req in GO-NEG phase\n"); + return 0; + } + + /* Check if wpa_supplicant has registered for this frame */ + inff_dbg(INFO, "vif->mgmt_rx_reg %04x\n", vif->mgmt_rx_reg); + mgmt_type = (IEEE80211_STYPE_PROBE_REQ & IEEE80211_FCTL_STYPE) >> 4; + if ((vif->mgmt_rx_reg & BIT(mgmt_type)) == 0) + return 0; + + mgmt_frame = (u8 *)(rxframe + 1); + mgmt_frame_len = e->datalen - sizeof(*rxframe); + freq = ieee80211_channel_to_frequency(ch.control_ch_num, + inff_d11_chan_band_to_nl80211(ch.band)); + + cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0); + + inff_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n", + mgmt_frame_len, e->datalen, ch.chspec, freq); + + return 0; +} + +/** + * inff_p2p_get_current_chanspec() - Get current operation channel. + * + * @p2p: P2P specific data. + * @chanspec: chanspec to be returned. + */ +static void inff_p2p_get_current_chanspec(struct inff_p2p_info *p2p, + u16 *chanspec) +{ + struct inff_if *ifp; + u8 mac_addr[ETH_ALEN]; + struct inff_chan ch; + struct inff_bss_info_le *bi; + u8 *buf; + + ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + + if (inff_fil_cmd_data_get(ifp, INFF_C_GET_BSSID, mac_addr, + ETH_ALEN) == 0) { + buf = kzalloc(WL_BSS_INFO_MAX, GFP_KERNEL); + if (buf) { + *(__le32 *)buf = cpu_to_le32(WL_BSS_INFO_MAX); + if (inff_fil_cmd_data_get(ifp, INFF_C_GET_BSS_INFO, + buf, WL_BSS_INFO_MAX) == 0) { + bi = (struct inff_bss_info_le *)(buf + 4); + *chanspec = le16_to_cpu(bi->chanspec); + kfree(buf); + return; + } + kfree(buf); + } + } + /* Use default channel for P2P */ + ch.band = INFF_CHAN_BAND_2G; + ch.chnum = INFF_P2P_TEMP_CHAN; + ch.bw = INFF_CHAN_BW_20; + p2p->cfg->d11inf.encchspec(&ch); + *chanspec = ch.chspec; +} + +/** + * inff_p2p_ifchange - Change a P2P Role. + * @cfg: driver private data for cfg80211 interface. + * @if_type: interface type. + * Returns 0 if success. + */ +int inff_p2p_ifchange(struct inff_cfg80211_info *cfg, + enum inff_fil_p2p_if_types if_type) +{ + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_vif *vif; + struct inff_fil_p2p_if_le if_request; + s32 err; + u16 chanspec; + + inff_dbg(TRACE, "Enter\n"); + + vif = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif; + if (!vif) { + iphy_err(drvr, "vif for P2PAPI_BSSCFG_PRIMARY does not exist\n"); + return -EPERM; + } + inff_notify_escan_complete(cfg, vif->ifp, true, true); + vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; + if (!vif) { + iphy_err(drvr, "vif for P2PAPI_BSSCFG_CONNECTION does not exist\n"); + return -EPERM; + } + inff_set_mpc(vif->ifp, 0); + + /* In concurrency case, STA may be already associated in a particular */ + /* channel. so retrieve the current channel of primary interface and */ + /* then start the virtual interface on that. */ + inff_p2p_get_current_chanspec(p2p, &chanspec); + + if_request.type = cpu_to_le16((u16)if_type); + if_request.chspec = cpu_to_le16(chanspec); + memcpy(if_request.addr, p2p->conn_int_addr, sizeof(if_request.addr)); + + inff_cfg80211_arm_vif_event(cfg, vif); + err = inff_fil_iovar_data_set(vif->ifp, "p2p_ifupd", &if_request, + sizeof(if_request)); + if (err) { + iphy_err(drvr, "p2p_ifupd FAILED, err=%d\n", err); + inff_cfg80211_arm_vif_event(cfg, NULL); + return err; + } + err = inff_cfg80211_wait_vif_event(cfg, INFF_E_IF_CHANGE, + INFF_VIF_EVENT_TIMEOUT); + inff_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + iphy_err(drvr, "No INFF_E_IF_CHANGE event received\n"); + return -EIO; + } + + err = inff_fil_cmd_int_set(vif->ifp, INFF_C_SET_SCB_TIMEOUT, + INFF_SCB_TIMEOUT_VALUE); + + return err; +} + +static int inff_p2p_request_p2p_if(struct inff_p2p_info *p2p, + struct inff_if *ifp, u8 ea[ETH_ALEN], + enum inff_fil_p2p_if_types iftype) +{ + struct inff_fil_p2p_if_le if_request; + int err; + u16 chanspec; + + /* we need a default channel */ + inff_p2p_get_current_chanspec(p2p, &chanspec); + + /* fill the firmware request */ + memcpy(if_request.addr, ea, ETH_ALEN); + if_request.type = cpu_to_le16((u16)iftype); + if_request.chspec = cpu_to_le16(chanspec); + + err = inff_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request, + sizeof(if_request)); + + return err; +} + +static int inff_p2p_disable_p2p_if(struct inff_cfg80211_vif *vif) +{ + struct inff_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); + struct net_device *pri_ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(pri_ndev); + const u8 *addr = vif->wdev.netdev->dev_addr; + + return inff_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN); +} + +static int inff_p2p_release_p2p_if(struct inff_cfg80211_vif *vif) +{ + struct inff_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev); + struct net_device *pri_ndev = cfg_to_ndev(cfg); + struct inff_if *ifp = netdev_priv(pri_ndev); + const u8 *addr = vif->wdev.netdev->dev_addr; + + return inff_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN); +} + +/** + * inff_p2p_create_p2pdev() - create a P2P_DEVICE virtual interface. + * + * @p2p: P2P specific data. + * @wiphy: wiphy device of new interface. + * @addr: mac address for this new interface. + */ +static struct wireless_dev *inff_p2p_create_p2pdev(struct inff_p2p_info *p2p, + struct wiphy *wiphy, + u8 *addr) +{ + struct inff_pub *drvr = p2p->cfg->pub; + struct inff_cfg80211_vif *p2p_vif; + struct inff_if *p2p_ifp; + struct inff_if *pri_ifp; + int err; + u32 bsscfgidx; + + if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + return ERR_PTR(-ENOSPC); + + p2p_vif = inff_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE); + if (IS_ERR(p2p_vif)) { + iphy_err(drvr, "could not create discovery vif\n"); + return (struct wireless_dev *)p2p_vif; + } + + pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; + + /* firmware requires unique mac address for p2pdev interface */ + if (addr && ether_addr_equal(addr, pri_ifp->mac_addr)) { + iphy_err(drvr, "discovery vif must be different from primary interface\n"); + err = -EINVAL; + goto fail; + } + + inff_p2p_generate_bss_mac(p2p, addr); + inff_p2p_set_firmware(pri_ifp, p2p->dev_addr); + + inff_cfg80211_arm_vif_event(p2p->cfg, p2p_vif); + inff_fweh_p2pdev_setup(pri_ifp, true); + + /* Initialize P2P Discovery in the firmware */ + err = inff_fil_iovar_int_set(pri_ifp, "p2p_disc", 1); + if (err < 0) { + iphy_err(drvr, "set p2p_disc error\n"); + inff_fweh_p2pdev_setup(pri_ifp, false); + inff_cfg80211_arm_vif_event(p2p->cfg, NULL); + goto fail; + } + + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(p2p->cfg, INFF_E_IF_ADD, + INFF_VIF_EVENT_TIMEOUT); + inff_cfg80211_arm_vif_event(p2p->cfg, NULL); + inff_fweh_p2pdev_setup(pri_ifp, false); + if (!err) { + iphy_err(drvr, "timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* discovery interface created */ + p2p_ifp = p2p_vif->ifp; + p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif = p2p_vif; + memcpy(p2p_ifp->mac_addr, p2p->dev_addr, ETH_ALEN); + memcpy(&p2p_vif->wdev.address, p2p->dev_addr, sizeof(p2p->dev_addr)); + + /* verify bsscfg index for P2P discovery */ + err = inff_fil_iovar_int_get(pri_ifp, "p2p_dev", &bsscfgidx); + if (err < 0) { + iphy_err(drvr, "retrieving discover bsscfg index failed\n"); + goto fail; + } + + WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); + + init_completion(&p2p->send_af_done); + INIT_WORK(&p2p->afx_hdl.afx_work, inff_p2p_afx_handler); + init_completion(&p2p->afx_hdl.act_frm_scan); + init_completion(&p2p->wait_next_af); + + return &p2p_vif->wdev; + +fail: + inff_free_vif(p2p_vif); + return ERR_PTR(err); +} + +static int inff_p2p_get_conn_idx(struct inff_cfg80211_info *cfg) +{ + int i; + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + + if (!ifp) + return -ENODEV; + + for (i = P2PAPI_BSSCFG_CONNECTION; i < P2PAPI_BSSCFG_MAX; i++) { + if (!cfg->p2p.bss_idx[i].vif) { + if (i == P2PAPI_BSSCFG_CONNECTION2 && + !(inff_feat_is_enabled(ifp, INFF_FEAT_RSDB))) { + inff_err("Multi p2p not supported"); + return -EIO; + } + return i; + } + } + return -EIO; +} + +/** + * inff_p2p_add_vif() - create a new P2P virtual interface. + * + * @wiphy: wiphy device of new interface. + * @name: name of the new interface. + * @name_assign_type: origin of the interface name + * @type: nl80211 interface type. + * @params: contains mac address for P2P device. + */ +struct wireless_dev *inff_p2p_add_vif(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_if *ifp = netdev_priv(cfg_to_ndev(cfg)); + struct inff_pub *drvr = cfg->pub; + struct inff_cfg80211_vif *vif; + enum inff_fil_p2p_if_types iftype; + int err = 0; + int connidx; + u8 *p2p_intf_addr; + + if (inff_cfg80211_vif_event_armed(cfg)) + return ERR_PTR(-EBUSY); + + inff_dbg(INFO, "adding vif \"%s\" (type=%d)\n", name, type); + + switch (type) { + case NL80211_IFTYPE_P2P_CLIENT: + iftype = INFF_FIL_P2P_IF_CLIENT; + break; + case NL80211_IFTYPE_P2P_GO: + iftype = INFF_FIL_P2P_IF_GO; + break; + case NL80211_IFTYPE_P2P_DEVICE: + return inff_p2p_create_p2pdev(&cfg->p2p, wiphy, + params->macaddr); + default: + return ERR_PTR(-EOPNOTSUPP); + } + + vif = inff_alloc_vif(cfg, type); + if (IS_ERR(vif)) + return (struct wireless_dev *)vif; + inff_cfg80211_arm_vif_event(cfg, vif); + + connidx = inff_p2p_get_conn_idx(cfg); + + if (connidx == P2PAPI_BSSCFG_CONNECTION) + p2p_intf_addr = cfg->p2p.conn_int_addr; + else if (connidx == P2PAPI_BSSCFG_CONNECTION2) + p2p_intf_addr = cfg->p2p.conn2_int_addr; + else + err = -EINVAL; + + if (!err) + err = inff_p2p_request_p2p_if(&cfg->p2p, ifp, + p2p_intf_addr, iftype); + + if (err) { + inff_err("request p2p interface failed\n"); + inff_cfg80211_arm_vif_event(cfg, NULL); + goto fail; + } + + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(cfg, INFF_E_IF_ADD, + INFF_VIF_EVENT_TIMEOUT); + inff_cfg80211_arm_vif_event(cfg, NULL); + if (!err) { + iphy_err(drvr, "timeout occurred\n"); + err = -EIO; + goto fail; + } + + /* interface created in firmware */ + ifp = vif->ifp; + if (!ifp) { + iphy_err(drvr, "no if pointer provided\n"); + err = -ENOENT; + goto fail; + } + + strscpy(ifp->ndev->name, name, sizeof(ifp->ndev->name)); + ifp->ndev->name_assign_type = name_assign_type; + err = inff_net_attach(ifp, true); + if (err) { + iphy_err(drvr, "Registering netdevice failed\n"); + free_netdev(ifp->ndev); + goto fail; + } + + cfg->p2p.bss_idx[connidx].vif = vif; + /* Disable firmware roaming for P2P interface */ + inff_fil_iovar_int_set(ifp, "roam_off", 1); + if (iftype == INFF_FIL_P2P_IF_GO) { + /* set station timeout for p2p */ + inff_fil_cmd_int_set(ifp, INFF_C_SET_SCB_TIMEOUT, + INFF_SCB_TIMEOUT_VALUE); + } + return &ifp->vif->wdev; + +fail: + inff_free_vif(vif); + return ERR_PTR(err); +} + +/** + * inff_p2p_del_vif() - delete a P2P virtual interface. + * + * @wiphy: wiphy device of interface. + * @wdev: wireless device of interface. + */ +int inff_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_cfg80211_vif *vif; + enum nl80211_iftype iftype; + bool wait_for_disable = false; + int err; + + inff_dbg(TRACE, "delete P2P vif\n"); + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + + iftype = vif->wdev.iftype; + inff_cfg80211_arm_vif_event(cfg, vif); + switch (iftype) { + case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(INFF_VIF_STATUS_DISCONNECTING, &vif->sme_state)) + wait_for_disable = true; + break; + + case NL80211_IFTYPE_P2P_GO: + if (!inff_p2p_disable_p2p_if(vif)) + wait_for_disable = true; + break; + + case NL80211_IFTYPE_P2P_DEVICE: + if (!p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) + return 0; + inff_p2p_cancel_remain_on_channel(vif->ifp); + inff_p2p_deinit_discovery(p2p); + break; + + default: + return -EOPNOTSUPP; + } + + clear_bit(INFF_P2P_STATUS_GO_NEG_PHASE, &p2p->status); + inff_dbg(INFO, "P2P: GO_NEG_PHASE status cleared\n"); + + if (wait_for_disable) + wait_for_completion_timeout(&cfg->vif_disabled, + INFF_P2P_DISABLE_TIMEOUT); + + err = 0; + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { + inff_vif_clear_mgmt_ies(vif); + err = inff_p2p_release_p2p_if(vif); + } + if (!err) { + /* wait for firmware event */ + err = inff_cfg80211_wait_vif_event(cfg, INFF_E_IF_DEL, + INFF_VIF_EVENT_TIMEOUT); + if (!err) + err = -EIO; + else + err = 0; + } + inff_remove_interface(vif->ifp, true); + + inff_cfg80211_arm_vif_event(cfg, NULL); + if (iftype != NL80211_IFTYPE_P2P_DEVICE) { + if (vif == p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif) + p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL; + if (vif == p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION2].vif) + p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION2].vif = NULL; + } + + return err; +} + +void inff_p2p_ifp_removed(struct inff_if *ifp, bool locked) +{ + struct inff_cfg80211_info *cfg; + struct inff_cfg80211_vif *vif; + + inff_dbg(INFO, "P2P: device interface removed\n"); + vif = ifp->vif; + cfg = wdev_to_cfg(&vif->wdev); + cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; + if (!locked) { + rtnl_lock(); + wiphy_lock(cfg->wiphy); + cfg80211_unregister_wdev(&vif->wdev); + wiphy_unlock(cfg->wiphy); + rtnl_unlock(); + } else { + cfg80211_unregister_wdev(&vif->wdev); + } + inff_free_vif(vif); +} + +int inff_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_cfg80211_vif *vif; + int err; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + mutex_lock(&cfg->usr_sync); + err = inff_p2p_enable_discovery(p2p); + if (!err) + set_bit(INFF_VIF_STATUS_READY, &vif->sme_state); + mutex_unlock(&cfg->usr_sync); + return err; +} + +void inff_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev) +{ + struct inff_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct inff_p2p_info *p2p = &cfg->p2p; + struct inff_cfg80211_vif *vif; + + vif = container_of(wdev, struct inff_cfg80211_vif, wdev); + /* This call can be result of the unregister_wdev call. In that case + * we dont want to do anything anymore. Just return. The config vif + * will have been cleared at this point. + */ + if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif == vif) { + mutex_lock(&cfg->usr_sync); + /* Set the discovery state to SCAN */ + (void)inff_p2p_set_discover_state(vif->ifp, + WL_P2P_DISC_ST_SCAN, 0, 0); + inff_abort_scanning(cfg); + clear_bit(INFF_VIF_STATUS_READY, &vif->sme_state); + mutex_unlock(&cfg->usr_sync); + } +} + +/** + * inff_p2p_attach() - attach for P2P. + * + * @cfg: driver private data for cfg80211 interface. + * @p2pdev_forced: create p2p device interface at attach. + */ +s32 inff_p2p_attach(struct inff_cfg80211_info *cfg, bool p2pdev_forced) +{ + struct inff_pub *drvr = cfg->pub; + struct inff_p2p_info *p2p; + struct inff_if *pri_ifp; + s32 err = 0; + void *err_ptr; + + p2p = &cfg->p2p; + p2p->cfg = cfg; + + pri_ifp = inff_get_ifp(cfg->pub, 0); + p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; + init_completion(&p2p->send_af_done); + + if (p2pdev_forced) { + err_ptr = inff_p2p_create_p2pdev(p2p, NULL, NULL); + if (IS_ERR(err_ptr)) { + iphy_err(drvr, "P2P device creation failed.\n"); + err = PTR_ERR(err_ptr); + } + } else { + p2p->p2pdev_dynamically = true; + } + return err; +} + +/** + * inff_p2p_detach() - detach P2P. + * + * @p2p: P2P specific data. + */ +void inff_p2p_detach(struct inff_p2p_info *p2p) +{ + struct inff_cfg80211_vif *vif; + + vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; + if (vif) { + inff_p2p_cancel_remain_on_channel(vif->ifp); + inff_p2p_deinit_discovery(p2p); + inff_remove_interface(vif->ifp, false); + } + /* just set it all to zero */ + memset(p2p, 0, sizeof(*p2p)); +} diff --git a/drivers/net/wireless/infineon/inffmac/p2p.h b/drivers/net/wireless/infineon/inffmac/p2p.h new file mode 100644 index 000000000000..950d3e0419b3 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/p2p.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2012 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_P2P_H +#define INFF_P2P_H + +#include + +struct inff_cfg80211_info; + +/** + * enum p2p_bss_type - different type of BSS configurations. + * + * @P2PAPI_BSSCFG_PRIMARY: maps to driver's primary bsscfg. + * @P2PAPI_BSSCFG_DEVICE: maps to driver's P2P device discovery bsscfg. + * @P2PAPI_BSSCFG_CONNECTION: maps to driver's 1st P2P connection bsscfg. + * @P2PAPI_BSSCFG_CONNECTION2: maps to driver's 2nd P2P connection bsscfg. + * @P2PAPI_BSSCFG_MAX: used for range checking. + */ +enum p2p_bss_type { + P2PAPI_BSSCFG_PRIMARY, /* maps to driver's primary bsscfg */ + P2PAPI_BSSCFG_DEVICE, /* maps to driver's P2P device discovery bsscfg */ + P2PAPI_BSSCFG_CONNECTION, /* driver's 1st P2P connection bsscfg */ + P2PAPI_BSSCFG_CONNECTION2, /* driver's 2nd P2P connection bsscfg */ + P2PAPI_BSSCFG_MAX +}; + +/** + * struct p2p_bss - peer-to-peer bss related information. + * + * @vif: virtual interface of this P2P bss. + * @private_data: TBD + */ +struct p2p_bss { + struct inff_cfg80211_vif *vif; + void *private_data; +}; + +/** + * enum inff_p2p_status - P2P specific dongle status. + * + * @INFF_P2P_STATUS_IF_ADD: peer-to-peer vif add sent to dongle. + * @INFF_P2P_STATUS_IF_DEL: NOT-USED? + * @INFF_P2P_STATUS_IF_DELETING: peer-to-peer vif delete sent to dongle. + * @INFF_P2P_STATUS_IF_CHANGING: peer-to-peer vif change sent to dongle. + * @INFF_P2P_STATUS_IF_CHANGED: peer-to-peer vif change completed on dongle. + * @INFF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed. + * @INFF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked. + * @INFF_P2P_STATUS_GO_NEG_PHASE: P2P GO negotiation ongoing. + * @INFF_P2P_STATUS_DISCOVER_LISTEN: P2P listen, remaining on channel. + * @INFF_P2P_STATUS_SENDING_ACT_FRAME: In the process of sending action frame. + * @INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx. + * @INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME: waiting for action frame response. + * @INFF_P2P_STATUS_FINDING_COMMON_CHANNEL: search channel for AF active. + */ +enum inff_p2p_status { + INFF_P2P_STATUS_ENABLED, + INFF_P2P_STATUS_IF_ADD, + INFF_P2P_STATUS_IF_DEL, + INFF_P2P_STATUS_IF_DELETING, + INFF_P2P_STATUS_IF_CHANGING, + INFF_P2P_STATUS_IF_CHANGED, + INFF_P2P_STATUS_ACTION_TX_COMPLETED, + INFF_P2P_STATUS_ACTION_TX_NOACK, + INFF_P2P_STATUS_GO_NEG_PHASE, + INFF_P2P_STATUS_DISCOVER_LISTEN, + INFF_P2P_STATUS_SENDING_ACT_FRAME, + INFF_P2P_STATUS_WAITING_NEXT_AF_LISTEN, + INFF_P2P_STATUS_WAITING_NEXT_ACT_FRAME, + INFF_P2P_STATUS_FINDING_COMMON_CHANNEL +}; + +/** + * struct afx_hdl - action frame off channel storage. + * + * @afx_work: worker thread for searching channel + * @act_frm_scan: thread synchronizing struct. + * @is_active: channel searching active. + * @peer_chan: current channel. + * @is_listen: sets mode for afx worker. + * @my_listen_chan: this peers listen channel. + * @peer_listen_chan: remote peers listen channel. + * @tx_dst_addr: mac address where tx af should be sent to. + */ +struct afx_hdl { + struct work_struct afx_work; + struct completion act_frm_scan; + bool is_active; + u16 peer_chan; + bool is_listen; + u16 my_listen_chan; + u16 peer_listen_chan; + u8 tx_dst_addr[ETH_ALEN]; +}; + +/** + * struct inff_p2p_info - p2p specific driver information. + * + * @cfg: driver private data for cfg80211 interface. + * @status: status of P2P (see enum inff_p2p_status). + * @dev_addr: P2P device address. + * @int_addr: P2P interface address. + * @bss_idx: informate for P2P bss types. + * @listen_timer: timer for @WL_P2P_DISC_ST_LISTEN discover state. + * @listen_channel: channel for @WL_P2P_DISC_ST_LISTEN discover state. + * @remain_on_channel: contains copy of struct used by cfg80211. + * @remain_on_channel_cookie: cookie counter for remain on channel cmd + * @next_af_subtype: expected action frame subtype. + * @send_af_done: indication that action frame tx is complete. + * @afx_hdl: action frame search handler info. + * @af_sent_channel: channel action frame is sent. + * @af_tx_sent_jiffies: jiffies time when af tx was transmitted. + * @wait_next_af: thread synchronizing struct. + * @gon_req_action: about to send go negotiation requets frame. + * @block_gon_req_tx: drop tx go negotiation requets frame. + * @p2pdev_dynamically: is p2p device if created by module param or supplicant. + * @wait_for_offchan_complete: wait for off-channel tx completion event. + */ +struct inff_p2p_info { + struct inff_cfg80211_info *cfg; + unsigned long status; + u8 dev_addr[ETH_ALEN]; + u8 conn_int_addr[ETH_ALEN]; + u8 conn2_int_addr[ETH_ALEN]; + struct p2p_bss bss_idx[P2PAPI_BSSCFG_MAX]; + struct timer_list listen_timer; + u8 listen_channel; + struct ieee80211_channel remain_on_channel; + u32 remain_on_channel_cookie; + u8 next_af_subtype; + struct completion send_af_done; + struct afx_hdl afx_hdl; + u32 af_sent_channel; + unsigned long af_tx_sent_jiffies; + struct completion wait_next_af; + bool gon_req_action; + bool block_gon_req_tx; + bool p2pdev_dynamically; + bool wait_for_offchan_complete; + struct wireless_dev *remin_on_channel_wdev; +}; + +s32 inff_p2p_attach(struct inff_cfg80211_info *cfg, bool p2pdev_forced); +void inff_p2p_detach(struct inff_p2p_info *p2p); +struct wireless_dev *inff_p2p_add_vif(struct wiphy *wiphy, const char *name, + unsigned char name_assign_type, + enum nl80211_iftype type, + struct vif_params *params); +int inff_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); +int inff_p2p_ifchange(struct inff_cfg80211_info *cfg, + enum inff_fil_p2p_if_types if_type); +void inff_p2p_ifp_removed(struct inff_if *ifp, bool rtnl_locked); +int inff_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); +void inff_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); +int inff_p2p_scan_prep(struct wiphy *wiphy, + struct cfg80211_scan_request *request, + struct inff_cfg80211_vif *vif); +int inff_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct ieee80211_channel *channel, + unsigned int duration, u64 *cookie); +int inff_p2p_notify_listen_complete(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data); +void inff_p2p_cancel_remain_on_channel(struct inff_if *ifp); +int inff_p2p_notify_action_frame_rx(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data); +int inff_p2p_notify_action_tx_complete(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data); +bool inff_p2p_send_action_frame(struct inff_cfg80211_info *cfg, + struct net_device *ndev, + struct inff_fil_af_params_le *af_params, + struct inff_cfg80211_vif *vif, + struct ieee80211_channel *peer_listen_chan); +bool inff_p2p_scan_finding_common_channel(struct inff_cfg80211_info *cfg, + struct inff_bss_info_le *bi); +s32 inff_p2p_notify_rx_mgmt_p2p_probereq(struct inff_if *ifp, + const struct inff_event_msg *e, + void *data); +#endif /* INFF_P2P_H */ -- 2.25.1 Infineon Device specific definitions. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/hw_ids.h | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/hw_ids.h diff --git a/drivers/net/wireless/infineon/inffmac/hw_ids.h b/drivers/net/wireless/infineon/inffmac/hw_ids.h new file mode 100644 index 000000000000..25271fcf6c37 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/hw_ids.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_HW_IDS_H +#define INFF_HW_IDS_H + +#include +#include + +#define INF_PCIE_VENDOR_ID_CYPRESS 0x12be + +/* Chipcommon Core Chip IDs */ +#define INF_CC_5557X_CHIP_ID 0xd908 +#define INF_CC_5551X_CHIP_ID 0xD8CC +#define INF_CC_5591X_CHIP_ID 0xDA5C +#define INF_CC_43022_CHIP_ID 43022 + +/* PCIE Device IDs */ +#define INF_PCIE_5557X_DEVICE_ID 0xbd31 + +#endif /* INFF_HW_IDS_H */ -- 2.25.1 Structure and MACRO definitions related to the chipcommon region inside the Infineon Device. Signed-off-by: Gokul Sivakumar --- .../wireless/infineon/inffmac/chipcommon.h | 566 ++++++++++++++++++ 1 file changed, 566 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chipcommon.h diff --git a/drivers/net/wireless/infineon/inffmac/chipcommon.h b/drivers/net/wireless/infineon/inffmac/chipcommon.h new file mode 100644 index 000000000000..4e27e1c1d36c --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chipcommon.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_CHIPCOMMON_H +#define INFF_CHIPCOMMON_H + +#include "defs.h" /* for PAD macro */ + +#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field) + +struct chipcregs { + u32 chipid; /* 0x0 */ + u32 capabilities; + u32 corecontrol; /* corerev >= 1 */ + u32 bist; + + /* OTP */ + u32 otpstatus; /* 0x10, corerev >= 10 */ + u32 otpcontrol; + u32 otpprog; + u32 otplayout; /* corerev >= 23 */ + + /* Interrupt control */ + u32 intstatus; /* 0x20 */ + u32 intmask; + + /* Chip specific regs */ + u32 chipcontrol; /* 0x28, rev >= 11 */ + u32 chipstatus; /* 0x2c, rev >= 11 */ + + /* Jtag Master */ + u32 jtagcmd; /* 0x30, rev >= 10 */ + u32 jtagir; + u32 jtagdr; + u32 jtagctrl; + + /* serial flash interface registers */ + u32 flashcontrol; /* 0x40 */ + u32 flashaddress; + u32 flashdata; + u32 PAD[1]; + + /* Silicon backplane configuration broadcast control */ + u32 broadcastaddress; /* 0x50 */ + u32 broadcastdata; + + /* gpio - cleared only by power-on-reset */ + u32 gpiopullup; /* 0x58, corerev >= 20 */ + u32 gpiopulldown; /* 0x5c, corerev >= 20 */ + u32 gpioin; /* 0x60 */ + u32 gpioout; /* 0x64 */ + u32 gpioouten; /* 0x68 */ + u32 gpiocontrol; /* 0x6C */ + u32 gpiointpolarity; /* 0x70 */ + u32 gpiointmask; /* 0x74 */ + + /* GPIO events corerev >= 11 */ + u32 gpioevent; + u32 gpioeventintmask; + + /* Watchdog timer */ + u32 watchdog; /* 0x80 */ + + /* GPIO events corerev >= 11 */ + u32 gpioeventintpolarity; + + /* GPIO based LED powersave registers corerev >= 16 */ + u32 gpiotimerval; /* 0x88 */ + u32 gpiotimeroutmask; + + /* clock control */ + u32 clockcontrol_n; /* 0x90 */ + u32 clockcontrol_sb; /* aka m0 */ + u32 clockcontrol_pci; /* aka m1 */ + u32 clockcontrol_m2; /* mii/uart/mipsref */ + u32 clockcontrol_m3; /* cpu */ + u32 clkdiv; /* corerev >= 3 */ + u32 gpiodebugsel; /* corerev >= 28 */ + u32 capabilities_ext; /* 0xac */ + + /* pll delay registers (corerev >= 4) */ + u32 pll_on_delay; /* 0xb0 */ + u32 fref_sel_delay; + u32 slow_clk_ctl; /* 5 < corerev < 10 */ + u32 PAD; + + /* Instaclock registers (corerev >= 10) */ + u32 system_clk_ctl; /* 0xc0 */ + u32 clkstatestretch; + u32 PAD[2]; + + /* Indirect backplane access (corerev >= 22) */ + u32 bp_addrlow; /* 0xd0 */ + u32 bp_addrhigh; + u32 bp_data; + u32 PAD; + u32 bp_indaccess; + u32 PAD[3]; + + /* More clock dividers (corerev >= 32) */ + u32 clkdiv2; + u32 PAD[2]; + + /* In AI chips, pointer to erom */ + u32 eromptr; /* 0xfc */ + + /* ExtBus control registers (corerev >= 3) */ + u32 pcmcia_config; /* 0x100 */ + u32 pcmcia_memwait; + u32 pcmcia_attrwait; + u32 pcmcia_iowait; + u32 ide_config; + u32 ide_memwait; + u32 ide_attrwait; + u32 ide_iowait; + u32 prog_config; + u32 prog_waitcount; + u32 flash_config; + u32 flash_waitcount; + u32 SECI_config; /* 0x130 SECI configuration */ + u32 PAD[3]; + + /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */ + u32 eci_output; /* 0x140 */ + u32 eci_control; + u32 eci_inputlo; + u32 eci_inputmi; + u32 eci_inputhi; + u32 eci_inputintpolaritylo; + u32 eci_inputintpolaritymi; + u32 eci_inputintpolarityhi; + u32 eci_intmasklo; + u32 eci_intmaskmi; + u32 eci_intmaskhi; + u32 eci_eventlo; + u32 eci_eventmi; + u32 eci_eventhi; + u32 eci_eventmasklo; + u32 eci_eventmaskmi; + u32 eci_eventmaskhi; + u32 PAD[3]; + + /* SROM interface (corerev >= 32) */ + u32 sromcontrol; /* 0x190 */ + u32 sromaddress; + u32 sromdata; + u32 PAD[17]; + + /* Clock control and hardware workarounds (corerev >= 20) */ + u32 clk_ctl_st; /* 0x1e0 */ + u32 hw_war; + u32 PAD[70]; + + /* UARTs */ + u8 uart0data; /* 0x300 */ + u8 uart0imr; + u8 uart0fcr; + u8 uart0lcr; + u8 uart0mcr; + u8 uart0lsr; + u8 uart0msr; + u8 uart0scratch; + u8 PAD[248]; /* corerev >= 1 */ + + u8 uart1data; /* 0x400 */ + u8 uart1imr; + u8 uart1fcr; + u8 uart1lcr; + u8 uart1mcr; + u8 uart1lsr; + u8 uart1msr; + u8 uart1scratch; + u32 PAD[62]; + + /* save/restore, corerev >= 48 */ + u32 sr_capability; /* 0x500 */ + u32 sr_control0; /* 0x504 */ + u32 sr_control1; /* 0x508 */ + u32 gpio_control; /* 0x50C */ + u32 PAD[60]; + + /* PMU registers (corerev >= 20) */ + u32 pmucontrol; /* 0x600 */ + u32 pmucapabilities; + u32 pmustatus; + u32 res_state; + u32 res_pending; + u32 pmutimer; + u32 min_res_mask; + u32 max_res_mask; + u32 res_table_sel; + u32 res_dep_mask; + u32 res_updn_timer; + u32 res_timer; + u32 clkstretch; + u32 pmuwatchdog; + u32 gpiosel; /* 0x638, rev >= 1 */ + u32 gpioenable; /* 0x63c, rev >= 1 */ + u32 res_req_timer_sel; + u32 res_req_timer; + u32 res_req_mask; + u32 pmucapabilities_ext; /* 0x64c, pmurev >=15 */ + u32 chipcontrol_addr; /* 0x650 */ + u32 chipcontrol_data; /* 0x654 */ + u32 regcontrol_addr; + u32 regcontrol_data; + u32 pllcontrol_addr; + u32 pllcontrol_data; + u32 pmustrapopt; /* 0x668, corerev >= 28 */ + u32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ + u32 retention_ctl; /* 0x670, pmurev >= 15 */ + u32 PAD[3]; + u32 retention_grpidx; /* 0x680 */ + u32 retention_grpctl; /* 0x684 */ + u32 mac_res_req_timer; /* 0x688 */ + u32 mac_res_req_mask; /* 0x68c */ + u32 PAD[18]; + u32 pmucontrol_ext; /* 0x6d8 */ + u32 slowclkperiod; /* 0x6dc */ + u32 PAD[8]; + u32 pmuintmask0; /* 0x700 */ + u32 pmuintmask1; /* 0x704 */ + u32 PAD[14]; + u32 pmuintstatus; /* 0x740 */ + u32 extwakeupstatus; /* 0x744 */ + u32 watchdog_res_mask; /* 0x748 */ + u32 swscratch; /* 0x750 */ + u32 PAD[3]; + u32 extwakemask[2]; /* 0x760-0x764 */ + u32 PAD[2]; + u32 extwakereqmask[2]; /* 0x770-0x774 */ + u32 PAD[2]; + u32 pmuintctrl0; /* 0x780 */ + u32 pmuintctrl1; /* 0x784 */ + u32 PAD[2]; + u32 extwakectrl[2]; /* 0x790 */ +}; + +#define CHIPGCIREGOFFS(field) offsetof(struct chipgciregs, field) + +struct chipgciregs { + u32 gci_corecaps0; /* 0x000 */ + u32 gci_corecaps1; /* 0x004 */ + u32 gci_corecaps2; /* 0x008 */ + u32 gci_corectrl; /* 0x00c */ + u32 gci_corestat; /* 0x010 */ + u32 gci_intstat; /* 0x014 */ + u32 gci_intmask; /* 0x018 */ + u32 gci_wakemask; /* 0x01c */ + u32 gci_levelintstat; /* 0x020 */ + u32 gci_eventintstat; /* 0x024 */ + u32 gci_wakelevelintstat; /* 0x028 */ + u32 gci_wakeeventintstat; /* 0x02c */ + u32 semaphoreintstatus; /* 0x030 */ + u32 semaphoreintmask; /* 0x034 */ + u32 semaphorerequest; /* 0x038 */ + u32 semaphorereserve; /* 0x03c */ + u32 gci_indirect_addr; /* 0x040 */ + u32 gci_gpioctl; /* 0x044 */ + u32 gci_gpiostatus; /* 0x048 */ + u32 gci_gpiomask; /* 0x04c */ + u32 eventsummary; /* 0x050 */ + u32 gci_miscctl; /* 0x054 */ + u32 gci_gpiointmask; /* 0x058 */ + u32 gci_gpiowakemask; /* 0x05c */ + u32 gci_input[32]; /* 0x060 */ + u32 gci_event[32]; /* 0x0e0 */ + u32 gci_output[4]; /* 0x160 */ + u32 gci_control_0; /* 0x170 */ + u32 gci_control_1; /* 0x174 */ + u32 gci_intpolreg; /* 0x178 */ + u32 gci_levelintmask; /* 0x17c */ + u32 gci_eventintmask; /* 0x180 */ + u32 wakelevelintmask; /* 0x184 */ + u32 wakeeventintmask; /* 0x188 */ + u32 hwmask; /* 0x18c */ + u32 PAD; + u32 gci_inbandeventintmask; /* 0x194 */ + u32 PAD; + u32 gci_inbandeventstatus; /* 0x19c */ + u32 gci_seciauxtx; /* 0x1a0 */ + u32 gci_seciauxrx; /* 0x1a4 */ + u32 gci_secitx_datatag; /* 0x1a8 */ + u32 gci_secirx_datatag; /* 0x1ac */ + u32 gci_secitx_datamask; /* 0x1b0 */ + u32 gci_seciusef0tx_reg; /* 0x1b4 */ + u32 gci_secif0tx_offset; /* 0x1b8 */ + u32 gci_secif0rx_offset; /* 0x1bc */ + u32 gci_secif1tx_offset; /* 0x1c0 */ + u32 gci_rxfifo_common_ctrl; /* 0x1c4 */ + u32 gci_rxfifoctrl; /* 0x1c8 */ + u32 gci_hw_sema_status; /* 0x1cc */ + u32 gci_seciuartescval; /* 0x1d0 */ + u32 gic_seciuartautobaudctr; /* 0x1d4 */ + u32 gci_secififolevel; /* 0x1d8 */ + u32 gci_seciuartdata; /* 0x1dc */ + u32 gci_secibauddiv; /* 0x1e0 */ + u32 gci_secifcr; /* 0x1e4 */ + u32 gci_secilcr; /* 0x1e8 */ + u32 gci_secimcr; /* 0x1ec */ + u32 gci_secilsr; /* 0x1f0 */ + u32 gci_secimsr; /* 0x1f4 */ + u32 gci_baudadj; /* 0x1f8 */ + u32 gci_inbandintmask; /* 0x1fc */ + u32 gci_chipctrl; /* 0x200 */ + u32 gci_chipsts; /* 0x204 */ + u32 gci_gpioout; /* 0x208 */ + u32 gci_gpioout_read; /* 0x20C */ + u32 gci_mpwaketx; /* 0x210 */ + u32 gci_mpwakedetect; /* 0x214 */ + u32 gci_seciin_ctrl; /* 0x218 */ + u32 gci_seciout_ctrl; /* 0x21C */ + u32 gci_seciin_auxfifo_en; /* 0x220 */ + u32 gci_seciout_txen_txbr; /* 0x224 */ + u32 gci_seciin_rxbrstatus; /* 0x228 */ + u32 gci_seciin_rxerrstatus; /* 0x22C */ + u32 gci_seciin_fcstatus; /* 0x230 */ + u32 gci_seciout_txstatus; /* 0x234 */ + u32 gci_seciout_txbrstatus; /* 0x238 */ + u32 wlan_mem_info; /* 0x23C */ + u32 wlan_bankxinfo; /* 0x240 */ + u32 bt_smem_select; /* 0x244 */ + u32 bt_smem_stby; /* 0x248 */ + u32 bt_smem_status; /* 0x24C */ + u32 wlan_bankxactivepda; /* 0x250 */ + u32 wlan_bankxsleeppda; /* 0x254 */ + u32 wlan_bankxkill; /* 0x258 */ + u32 PAD[41]; + u32 gci_chipid; /* 0x300 */ + u32 PAD[3]; + u32 otpstatus; /* 0x310 */ + u32 otpcontrol; /* 0x314 */ + u32 otpprog; /* 0x318 */ + u32 otplayout; /* 0x31c */ + u32 otplayoutextension; /* 0x320 */ + u32 otpcontrol1; /* 0x324 */ + u32 otpprogdata; /* 0x328 */ + u32 PAD[52]; + u32 otp_ecc_status; /* 0x3FC */ + u32 PAD[512]; + u32 lhl_core_capab_adr; /* 0xC00 */ + u32 lhl_main_ctl_adr; /* 0xC04 */ + u32 lhl_pmu_ctl_adr; /* 0xC08 */ + u32 lhl_extlpo_ctl_adr; /* 0xC0C */ + u32 lpo_ctl_adr; /* 0xC10 */ + u32 lhl_lpo2_ctl_adr; /* 0xC14 */ + u32 lhl_osc32k_ctl_adr; /* 0xC18 */ + u32 lhl_clk_status_adr; /* 0xC1C */ + u32 lhl_clk_det_ctl_adr; /* 0xC20 */ + u32 lhl_clk_sel_adr; /* 0xC24 */ + u32 hidoff_cnt_adr[2]; /* 0xC28-0xC2C */ + u32 lhl_autoclk_ctl_adr; /* 0xC30 */ + u32 PAD; + u32 lhl_hibtim_adr; /* 0xC38 */ + u32 lhl_wl_ilp_val_adr; /* 0xC3C */ + u32 lhl_wl_armtim0_intrp_adr; /* 0xC40 */ + u32 lhl_wl_armtim0_st_adr; /* 0xC44 */ + u32 lhl_wl_armtim0_adr; /* 0xC48 */ + u32 PAD[9]; + u32 lhl_wl_mactim0_intrp_adr; /* 0xC70 */ + u32 lhl_wl_mactim0_st_adr; /* 0xC74 */ + u32 lhl_wl_mactim_int0_adr; /* 0xC78 */ + u32 lhl_wl_mactim_frac0_adr; /* 0xC7C */ + u32 lhl_wl_mactim1_intrp_adr; /* 0xC80 */ + u32 lhl_wl_mactim1_st_adr; /* 0xC84 */ + u32 lhl_wl_mactim_int1_adr; /* 0xC88 */ + u32 lhl_wl_mactim_frac1_adr; /* 0xC8C */ + u32 PAD[8]; + u32 gpio_int_en_port_adr[4]; /* 0xCB0-0xCBC */ + u32 gpio_int_st_port_adr[4]; /* 0xCC0-0xCCC */ + u32 gpio_ctrl_iocfg_p_adr[64]; /* 0xCD0-0xDCC */ + u32 gpio_gctrl_iocfg_p0_p39_adr; /* 0xDD0 */ + u32 gpio_gdsctrl_iocfg_p0_p25_p30_p39_adr; /* 0xDD4 */ + u32 gpio_gdsctrl_iocfg_p26_p29_adr; /* 0xDD8 */ + u32 PAD[8]; + u32 lhl_gpio_din0_adr; /* 0xDFC */ + u32 lhl_gpio_din1_adr; /* 0xE00 */ + u32 lhl_wkup_status_adr; /* 0xE04 */ + u32 lhl_ctl_adr; /* 0xE08 */ + u32 lhl_adc_ctl_adr; /* 0xE0C */ + u32 lhl_qdxyz_in_dly_adr; /* 0xE10 */ + u32 lhl_optctl_adr; /* 0xE14 */ + u32 lhl_optct2_adr; /* 0xE18 */ + u32 lhl_scanp_cntr_init_val_adr; /* 0xE1C */ + u32 lhl_opt_togg_val_adr[6]; /* 0xE20-0xE34 */ + u32 lhl_optx_smp_val_adr; /* 0xE38 */ + u32 lhl_opty_smp_val_adr; /* 0xE3C */ + u32 lhl_optz_smp_val_adr; /* 0xE40 */ + u32 lhl_hidoff_keepstate_adr[3]; /* 0xE44-0xE4C */ + u32 lhl_bt_slmboot_ctl0_adr[4]; /* 0xE50-0xE5C */ + u32 lhl_wl_fw_ctl; /* 0xE60 */ + u32 lhl_wl_hw_ctl_adr[2]; /* 0xE64-0xE68 */ + u32 lhl_bt_hw_ctl_adr; /* 0xE6C */ + u32 lhl_top_pwrseq_en_adr; /* 0xE70 */ + u32 lhl_top_pwrdn_ctl_adr; /* 0xE74 */ + u32 lhl_top_pwrup_ctl_adr; /* 0xE78 */ + u32 lhl_top_pwrseq_ctl_adr; /* 0xE7C */ + u32 lhl_top_pwrdn2_ctl_adr; /* 0xE80 */ + u32 lhl_top_pwrup2_ctl_adr; /* 0xE84 */ + u32 wpt_regon_intrp_cfg_adr; /* 0xE88 */ + u32 bt_regon_intrp_cfg_adr; /* 0xE8C */ + u32 wl_regon_intrp_cfg_adr; /* 0xE90 */ + u32 regon_intrp_st_adr; /* 0xE94 */ + u32 regon_intrp_en_adr; /* 0xE98 */ + +}; + +/* chipid */ +#define CID_ID_MASK 0x0000ffff /* Chip Id mask */ +#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */ +#define CID_REV_SHIFT 16 /* Chip Revision shift */ +#define CID_PKG_MASK 0x00f00000 /* Package Option mask */ +#define CID_PKG_SHIFT 20 /* Package Option shift */ +#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */ +#define CID_CC_SHIFT 24 +#define CID_TYPE_MASK 0xf0000000 /* Chip Type */ +#define CID_TYPE_SHIFT 28 + +/* capabilities */ +#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */ +#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */ +#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */ +/* UARTs are driven by internal divided clock */ +#define CC_CAP_UINTCLK 0x00000008 +#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */ +#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */ +#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */ +#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */ +#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */ +#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */ +#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */ +#define CC_CAP_PWR_CTL 0x00040000 /* Power control */ +#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */ +#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */ +#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */ +#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */ +#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */ +#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */ +#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */ +#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */ +/* Nand flash present, rev >= 35 */ +#define CC_CAP_NFLASH 0x80000000 + +#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */ +/* GSIO (spi/i2c) present, rev >= 37 */ +#define CC_CAP2_GSIO 0x00000002 + +/* sr_control0, rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK BIT(0) +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to + * sr_engine + */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk + * in sr_engine + */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power + * domains + */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + +/* pmucapabilities */ +#define PCAP_REV_MASK 0x000000ff +#define PCAP_RC_MASK 0x00001f00 +#define PCAP_RC_SHIFT 8 +#define PCAP_TC_MASK 0x0001e000 +#define PCAP_TC_SHIFT 13 +#define PCAP_PC_MASK 0x001e0000 +#define PCAP_PC_SHIFT 17 +#define PCAP_VC_MASK 0x01e00000 +#define PCAP_VC_SHIFT 21 +#define PCAP_CC_MASK 0x1e000000 +#define PCAP_CC_SHIFT 25 +#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */ +#define PCAP5_PC_SHIFT 17 +#define PCAP5_VC_MASK 0x07c00000 +#define PCAP5_VC_SHIFT 22 +#define PCAP5_CC_MASK 0xf8000000 +#define PCAP5_CC_SHIFT 27 +/* pmucapabilites_ext PMU rev >= 15 */ +#define PCAPEXT_SR_SUPPORTED_MASK BIT(1) +/* retention_ctl PMU rev >= 15 */ +#define PMU_RCTL_MACPHY_DISABLE_MASK BIT(26) +#define PMU_RCTL_LOGIC_DISABLE_MASK BIT(27) + +/* + * Maximum delay for the PMU state transition in us. + * This is an upper bound intended for spinwaits etc. + */ +#define PMU_MAX_TRANSITION_DLY 15000 + +#define DEFAULT_43022_MIN_RES_MASK 0x0f8bfe77 + +/* Chip Common registers */ +/* PMU registers (rev >= 20) */ +#define INF_CC_PMU_CTL 0x0600 /* PMU control */ +#define INF_CC_PMU_CTL_ILP_DIV 0xFFFF0000 /* ILP div mask */ +#define INF_CC_PMU_CTL_ILP_DIV_SHIFT 16 +#define INF_CC_PMU_CTL_RES 0x00006000 /* reset control mask */ +#define INF_CC_PMU_CTL_RES_SHIFT 13 +#define INF_CC_PMU_CTL_RES_RELOAD 0x2 /* reload POR values */ +#define INF_CC_PMU_CTL_PLL_UPD 0x00000400 +#define INF_CC_PMU_CTL_NOILPONW 0x00000200 /* No ILP on wait */ +#define INF_CC_PMU_CTL_HTREQEN 0x00000100 /* HT req enable */ +#define INF_CC_PMU_CTL_ALPREQEN 0x00000080 /* ALP req enable */ +#define INF_CC_PMU_CTL_XTALFREQ 0x0000007C /* Crystal freq */ +#define INF_CC_PMU_CTL_XTALFREQ_SHIFT 2 +#define INF_CC_PMU_CTL_ILPDIVEN 0x00000002 /* ILP div enable */ +#define INF_CC_PMU_CTL_LPOSEL 0x00000001 /* LPO sel */ + +#define INF_CC_CAP_EXT 0x00AC /* Capabilities */ +#define INF_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define INF_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define INF_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define INF_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define INF_CC_CAP_EXT_SRENG_REG_PRESENT 0x00000010 +#define INF_CC_CAP_EXT_ASCU_PRESENT 0x00000020 +#define INF_CC_CAP_EXT_AOB_PRESENT 0x00000040 + +#define INF_CC_SROM_CONTROL 0x0190 +#define INF_CC_SROM_CONTROL_START 0x80000000 +#define INF_CC_SROM_CONTROL_BUSY 0x80000000 +#define INF_CC_SROM_CONTROL_OPCODE 0x60000000 +#define INF_CC_SROM_CONTROL_OP_READ 0x00000000 +#define INF_CC_SROM_CONTROL_OP_WRITE 0x20000000 +#define INF_CC_SROM_CONTROL_OP_WRDIS 0x40000000 +#define INF_CC_SROM_CONTROL_OP_WREN 0x60000000 +#define INF_CC_SROM_CONTROL_OTPSEL 0x00000010 +#define INF_CC_SROM_CONTROL_OTP_PRESENT 0x00000020 +#define INF_CC_SROM_CONTROL_LOCK 0x00000008 +#define INF_CC_SROM_CONTROL_SIZE_MASK 0x00000006 +#define INF_CC_SROM_CONTROL_SIZE_1K 0x00000000 +#define INF_CC_SROM_CONTROL_SIZE_4K 0x00000002 +#define INF_CC_SROM_CONTROL_SIZE_16K 0x00000004 +#define INF_CC_SROM_CONTROL_SIZE_SHIFT 1 +#define INF_CC_SROM_CONTROL_PRESENT 0x00000001 + +/* Core-ID values. */ +#define INF_CORE_CHIPCOMMON 0x800 +#define INF_CORE_INTERNAL_MEM 0x80E +#define INF_CORE_80211 0x812 +#define INF_CORE_PMU 0x827 +#define INF_CORE_SDIO_DEV 0x829 +#define INF_CORE_ARM_CM3 0x82A +#define INF_CORE_PCIE2 0x83C /* PCI Express Gen2 */ +#define INF_CORE_ARM_CR4 0x83E +#define INF_CORE_GCI 0x840 +#define INF_CORE_SR 0x841 +#define INF_CORE_SYS_MEM 0x849 +#define INF_CORE_DEFAULT 0xFFF +#define INF_CORE_CHIPCOMMON 0x800 + +#define INF_MAX_NR_CORES 16 +#define INF_CORE_SIZE 0x1000 + +#endif /* INFF_CHIPCOMMON_H */ -- 2.25.1 Common definitions of the wifi chanspec specific structures and MACROs. Signed-off-by: Gokul Sivakumar --- .../net/wireless/infineon/inffmac/chanspec.c | 260 +++++++++++++ .../net/wireless/infineon/inffmac/chanspec.h | 357 ++++++++++++++++++ 2 files changed, 617 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/chanspec.c create mode 100644 drivers/net/wireless/infineon/inffmac/chanspec.h diff --git a/drivers/net/wireless/infineon/inffmac/chanspec.c b/drivers/net/wireless/infineon/inffmac/chanspec.c new file mode 100644 index 000000000000..7bf5a09c2530 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chanspec.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2013 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#include + +#include "utils.h" +#include "chanspec.h" + +static u16 d11n_sb(enum inff_chan_sb sb) +{ + switch (sb) { + case INFF_CHAN_SB_NONE: + return INFF_CHSPEC_D11N_SB_N; + case INFF_CHAN_SB_L: + return INFF_CHSPEC_D11N_SB_L; + case INFF_CHAN_SB_U: + return INFF_CHSPEC_D11N_SB_U; + default: + WARN_ON(1); + } + return 0; +} + +static u16 d11n_bw(enum inff_chan_bw bw) +{ + switch (bw) { + case INFF_CHAN_BW_20: + return INFF_CHSPEC_D11N_BW_20; + case INFF_CHAN_BW_40: + return INFF_CHSPEC_D11N_BW_40; + default: + WARN_ON(1); + } + return 0; +} + +static void inff_d11n_encchspec(struct inff_chan *ch) +{ + if (ch->bw == INFF_CHAN_BW_20) + ch->sb = INFF_CHAN_SB_NONE; + + ch->chspec = 0; + inff_maskset16(&ch->chspec, INFF_CHSPEC_CH_MASK, + INFF_CHSPEC_CH_SHIFT, ch->chnum); + inff_maskset16(&ch->chspec, INFF_CHSPEC_D11N_SB_MASK, + 0, d11n_sb(ch->sb)); + inff_maskset16(&ch->chspec, INFF_CHSPEC_D11N_BW_MASK, + 0, d11n_bw(ch->bw)); + + if (ch->chnum <= CH_MAX_2G_CHANNEL) + ch->chspec |= INFF_CHSPEC_D11N_BND_2G; + else + ch->chspec |= INFF_CHSPEC_D11N_BND_5G; +} + +static u16 d11ac_bw(enum inff_chan_bw bw) +{ + switch (bw) { + case INFF_CHAN_BW_20: + return INFF_CHSPEC_D11AC_BW_20; + case INFF_CHAN_BW_40: + return INFF_CHSPEC_D11AC_BW_40; + case INFF_CHAN_BW_80: + return INFF_CHSPEC_D11AC_BW_80; + case INFF_CHAN_BW_160: + return INFF_CHSPEC_D11AC_BW_160; + default: + WARN_ON(1); + } + return 0; +} + +static void inff_d11ac_encchspec(struct inff_chan *ch) +{ + if (ch->bw == INFF_CHAN_BW_20 || ch->sb == INFF_CHAN_SB_NONE) + ch->sb = INFF_CHAN_SB_L; + + inff_maskset16(&ch->chspec, INFF_CHSPEC_CH_MASK, + INFF_CHSPEC_CH_SHIFT, ch->chnum); + inff_maskset16(&ch->chspec, INFF_CHSPEC_D11AC_SB_MASK, + INFF_CHSPEC_D11AC_SB_SHIFT, ch->sb); + inff_maskset16(&ch->chspec, INFF_CHSPEC_D11AC_BW_MASK, + 0, d11ac_bw(ch->bw)); + + ch->chspec &= ~INFF_CHSPEC_D11AC_BND_MASK; + switch (ch->band) { + case INFF_CHAN_BAND_6G: + ch->chspec |= INFF_CHSPEC_D11AC_BND_6G; + break; + case INFF_CHAN_BAND_5G: + ch->chspec |= INFF_CHSPEC_D11AC_BND_5G; + break; + case INFF_CHAN_BAND_2G: + ch->chspec |= INFF_CHSPEC_D11AC_BND_2G; + break; + default: + WARN_ONCE(1, "Invalid band 0x%04x\n", ch->band); + break; + } +} + +static void inff_d11n_decchspec(struct inff_chan *ch) +{ + u16 val; + + ch->chnum = (u8)(ch->chspec & INFF_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; + + switch (ch->chspec & INFF_CHSPEC_D11N_BW_MASK) { + case INFF_CHSPEC_D11N_BW_20: + ch->bw = INFF_CHAN_BW_20; + ch->sb = INFF_CHAN_SB_NONE; + break; + case INFF_CHSPEC_D11N_BW_40: + ch->bw = INFF_CHAN_BW_40; + val = ch->chspec & INFF_CHSPEC_D11N_SB_MASK; + if (val == INFF_CHSPEC_D11N_SB_L) { + ch->sb = INFF_CHAN_SB_L; + ch->control_ch_num -= CH_10MHZ_APART; + } else { + ch->sb = INFF_CHAN_SB_U; + ch->control_ch_num += CH_10MHZ_APART; + } + break; + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } + + switch (ch->chspec & INFF_CHSPEC_D11N_BND_MASK) { + case INFF_CHSPEC_D11N_BND_5G: + ch->band = INFF_CHAN_BAND_5G; + break; + case INFF_CHSPEC_D11N_BND_2G: + ch->band = INFF_CHAN_BAND_2G; + break; + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } +} + +static void inff_d11ac_decchspec(struct inff_chan *ch) +{ + u16 val; + + ch->chnum = (u8)(ch->chspec & INFF_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; + + switch (ch->chspec & INFF_CHSPEC_D11AC_BW_MASK) { + case INFF_CHSPEC_D11AC_BW_20: + ch->bw = INFF_CHAN_BW_20; + ch->sb = INFF_CHAN_SB_NONE; + break; + case INFF_CHSPEC_D11AC_BW_40: + ch->bw = INFF_CHAN_BW_40; + val = ch->chspec & INFF_CHSPEC_D11AC_SB_MASK; + if (val == INFF_CHSPEC_D11AC_SB_L) { + ch->sb = INFF_CHAN_SB_L; + ch->control_ch_num -= CH_10MHZ_APART; + } else if (val == INFF_CHSPEC_D11AC_SB_U) { + ch->sb = INFF_CHAN_SB_U; + ch->control_ch_num += CH_10MHZ_APART; + } else { + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + } + break; + case INFF_CHSPEC_D11AC_BW_80: + ch->bw = INFF_CHAN_BW_80; + ch->sb = inff_maskget16(ch->chspec, INFF_CHSPEC_D11AC_SB_MASK, + INFF_CHSPEC_D11AC_SB_SHIFT); + switch (ch->sb) { + case INFF_CHAN_SB_LL: + ch->control_ch_num -= CH_30MHZ_APART; + break; + case INFF_CHAN_SB_LU: + ch->control_ch_num -= CH_10MHZ_APART; + break; + case INFF_CHAN_SB_UL: + ch->control_ch_num += CH_10MHZ_APART; + break; + case INFF_CHAN_SB_UU: + ch->control_ch_num += CH_30MHZ_APART; + break; + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } + break; + case INFF_CHSPEC_D11AC_BW_160: + ch->bw = INFF_CHAN_BW_160; + ch->sb = inff_maskget16(ch->chspec, INFF_CHSPEC_D11AC_SB_MASK, + INFF_CHSPEC_D11AC_SB_SHIFT); + switch (ch->sb) { + case INFF_CHAN_SB_LLL: + ch->control_ch_num -= CH_70MHZ_APART; + break; + case INFF_CHAN_SB_LLU: + ch->control_ch_num -= CH_50MHZ_APART; + break; + case INFF_CHAN_SB_LUL: + ch->control_ch_num -= CH_30MHZ_APART; + break; + case INFF_CHAN_SB_LUU: + ch->control_ch_num -= CH_10MHZ_APART; + break; + case INFF_CHAN_SB_ULL: + ch->control_ch_num += CH_10MHZ_APART; + break; + case INFF_CHAN_SB_ULU: + ch->control_ch_num += CH_30MHZ_APART; + break; + case INFF_CHAN_SB_UUL: + ch->control_ch_num += CH_50MHZ_APART; + break; + case INFF_CHAN_SB_UUU: + ch->control_ch_num += CH_70MHZ_APART; + break; + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } + break; + case INFF_CHSPEC_D11AC_BW_8080: + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } + + switch (ch->chspec & INFF_CHSPEC_D11AC_BND_MASK) { + case INFF_CHSPEC_D11AC_BND_6G: + ch->band = INFF_CHAN_BAND_6G; + break; + case INFF_CHSPEC_D11AC_BND_5G: + ch->band = INFF_CHAN_BAND_5G; + break; + case INFF_CHSPEC_D11AC_BND_2G: + ch->band = INFF_CHAN_BAND_2G; + break; + default: + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); + break; + } +} + +void inff_d11_attach(struct inff_d11inf *d11inf) +{ + if (d11inf->io_type == INFF_D11N_IOTYPE) { + d11inf->encchspec = inff_d11n_encchspec; + d11inf->decchspec = inff_d11n_decchspec; + } else { + d11inf->encchspec = inff_d11ac_encchspec; + d11inf->decchspec = inff_d11ac_decchspec; + } +} diff --git a/drivers/net/wireless/infineon/inffmac/chanspec.h b/drivers/net/wireless/infineon/inffmac/chanspec.h new file mode 100644 index 000000000000..3d62f21cc9ed --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/chanspec.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_WIFI_H +#define INFF_WIFI_H + +#include /* for ETH_ALEN */ +#include /* for WLAN_PMKID_LEN */ + +/* + * A chanspec (u16) holds the channel number, band, bandwidth and control + * sideband + */ + +/* channel defines */ +#define CH_UPPER_SB 0x01 +#define CH_LOWER_SB 0x02 +#define CH_EWA_VALID 0x04 +#define CH_70MHZ_APART 14 +#define CH_50MHZ_APART 10 +#define CH_30MHZ_APART 6 +#define CH_20MHZ_APART 4 +#define CH_10MHZ_APART 2 +#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ +#define CH_MIN_2G_CHANNEL 1 +#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ +#define CH_MIN_5G_CHANNEL 34 + +/* bandstate array indices */ +#define BAND_2G_INDEX 0 /* wlc->bandstate[x] index */ +#define BAND_5G_INDEX 1 /* wlc->bandstate[x] index */ + +/* + * max # supported channels. The max channel no is 216, this is that + 1 + * rounded up to a multiple of NBBY (8). DO NOT MAKE it > 255: channels are + * u8's all over + */ +#define MAXCHANNEL 224 + +#define WL_CHANSPEC_CHAN_MASK 0x00ff +#define WL_CHANSPEC_CHAN_SHIFT 0 + +#define WL_CHANSPEC_CTL_SB_MASK 0x0300 +#define WL_CHANSPEC_CTL_SB_SHIFT 8 +#define WL_CHANSPEC_CTL_SB_LOWER 0x0100 +#define WL_CHANSPEC_CTL_SB_UPPER 0x0200 +#define WL_CHANSPEC_CTL_SB_NONE 0x0300 + +#define WL_CHANSPEC_BW_MASK 0x0C00 +#define WL_CHANSPEC_BW_SHIFT 10 +#define WL_CHANSPEC_BW_10 0x0400 +#define WL_CHANSPEC_BW_20 0x0800 +#define WL_CHANSPEC_BW_40 0x0C00 +#define WL_CHANSPEC_BW_80 0x2000 + +#define WL_CHANSPEC_BAND_MASK 0xf000 +#define WL_CHANSPEC_BAND_SHIFT 12 +#define WL_CHANSPEC_BAND_5G 0x1000 +#define WL_CHANSPEC_BAND_2G 0x2000 +#define INVCHANSPEC 255 + +#define WL_CHAN_VALID_HW BIT(0) /* valid with current HW */ +#define WL_CHAN_VALID_SW BIT(1) /* valid with country sett. */ +#define WL_CHAN_BAND_5G BIT(2) /* 5GHz-band channel */ +#define WL_CHAN_RADAR BIT(3) /* radar sensitive channel */ +#define WL_CHAN_INACTIVE BIT(4) /* inactive due to radar */ +#define WL_CHAN_PASSIVE BIT(5) /* channel in passive mode */ +#define WL_CHAN_RESTRICTED BIT(6) /* restricted use channel */ + +/* values for band specific 40MHz capabilities */ +#define WLC_N_BW_20ALL 0 +#define WLC_N_BW_40ALL 1 +#define WLC_N_BW_20IN2G_40IN5G 2 + +#define WLC_BW_20MHZ_BIT BIT(0) +#define WLC_BW_40MHZ_BIT BIT(1) +#define WLC_BW_80MHZ_BIT BIT(2) +#define WLC_BW_160MHZ_BIT BIT(3) + +/* Bandwidth capabilities */ +#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT | WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT | WLC_BW_40MHZ_BIT | \ + WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_160MHZ (WLC_BW_160MHZ_BIT | WLC_BW_80MHZ_BIT | \ + WLC_BW_40MHZ_BIT | WLC_BW_20MHZ_BIT) +#define WLC_BW_CAP_UNRESTRICTED 0xFF + +/* band types */ +#define WLC_BAND_AUTO 0 /* auto-select */ +#define WLC_BAND_5G 1 /* 5 Ghz */ +#define WLC_BAND_2G 2 /* 2.4 Ghz */ +#define WLC_BAND_6G 3 /* 6 Ghz */ +#define WLC_BAND_ALL 4 /* all bands */ + +#define CHSPEC_CHANNEL(chspec) ((u8)((chspec) & WL_CHANSPEC_CHAN_MASK)) +#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) + +#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) +#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) + +#define CHSPEC_IS10(chspec) \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) + +#define CHSPEC_IS20(chspec) \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) + +#define CHSPEC_IS40(chspec) \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) + +#define CHSPEC_IS80(chspec) \ + (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) + +#define CHSPEC_IS5G(chspec) \ + (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) + +#define CHSPEC_IS2G(chspec) \ + (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) + +#define CHSPEC_SB_NONE(chspec) \ + (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE) + +#define CHSPEC_SB_UPPER(chspec) \ + (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) + +#define CHSPEC_SB_LOWER(chspec) \ + (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) + +#define CHSPEC_CTL_CHAN(chspec) {\ + typeof(i) _chspec = (chspec); \ + ((CHSPEC_SB_LOWER(_chspec)) ? \ + (lower_20_sb((_chspec & WL_CHANSPEC_CHAN_MASK))) : \ + (upper_20_sb((_chspec & WL_CHANSPEC_CHAN_MASK)))); \ + } + +/* band types */ +#define INF_BAND_AUTO 0 /* auto-select */ +#define INF_BAND_5G 1 /* 5 Ghz */ +#define INF_BAND_2G 2 /* 2.4 Ghz */ +#define INF_BAND_ALL 3 /* all bands */ + +#define CHSPEC2BAND(chspec) (CHSPEC_IS5G(chspec) ? INF_BAND_5G : INF_BAND_2G) + +#define CHANSPEC_STR_LEN 8 + +static inline int lower_20_sb(int channel) +{ + return channel > CH_10MHZ_APART ? (channel - CH_10MHZ_APART) : 0; +} + +static inline int upper_20_sb(int channel) +{ + return (channel < (MAXCHANNEL - CH_10MHZ_APART)) ? + channel + CH_10MHZ_APART : 0; +} + +static inline int chspec_bandunit(u16 chspec) +{ + return CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX; +} + +static inline u16 ch20mhz_chspec(int channel) +{ + u16 rc = channel <= CH_MAX_2G_CHANNEL ? + WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G; + + return (u16)((u16)channel | WL_CHANSPEC_BW_20 | + WL_CHANSPEC_CTL_SB_NONE | rc); +} + +static inline int next_20mhz_chan(int channel) +{ + return channel < (MAXCHANNEL - CH_20MHZ_APART) ? + channel + CH_20MHZ_APART : 0; +} + +/* defined rate in 500kbps */ +#define INF_MAXRATE 108 /* in 500kbps units */ +#define INF_RATE_1M 2 /* in 500kbps units */ +#define INF_RATE_2M 4 /* in 500kbps units */ +#define INF_RATE_5M5 11 /* in 500kbps units */ +#define INF_RATE_11M 22 /* in 500kbps units */ +#define INF_RATE_6M 12 /* in 500kbps units */ +#define INF_RATE_9M 18 /* in 500kbps units */ +#define INF_RATE_12M 24 /* in 500kbps units */ +#define INF_RATE_18M 36 /* in 500kbps units */ +#define INF_RATE_24M 48 /* in 500kbps units */ +#define INF_RATE_36M 72 /* in 500kbps units */ +#define INF_RATE_48M 96 /* in 500kbps units */ +#define INF_RATE_54M 108 /* in 500kbps units */ + +#define INF_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ + +#define MCSSET_LEN 16 + +static inline bool ac_bitmap_tst(u8 bitmap, int prec) +{ + return (bitmap & (1 << (prec))) != 0; +} + +/* d11 io type */ +#define INFF_D11N_IOTYPE 1 +#define INFF_D11AC_IOTYPE 2 + +/* A chanspec (channel specification) holds the channel number, band, + * bandwidth and control sideband + */ + +/* chanspec binary format */ + +#define INFF_CHSPEC_INVALID 255 +/* bit 0~7 channel number + * for 80+80 channels: bit 0~3 low channel id, bit 4~7 high channel id + */ +#define INFF_CHSPEC_CH_MASK 0x00ff +#define INFF_CHSPEC_CH_SHIFT 0 +#define INFF_CHSPEC_CHL_MASK 0x000f +#define INFF_CHSPEC_CHL_SHIFT 0 +#define INFF_CHSPEC_CHH_MASK 0x00f0 +#define INFF_CHSPEC_CHH_SHIFT 4 + +/* bit 8~16 for dot 11n IO types + * bit 8~9 sideband + * bit 10~11 bandwidth + * bit 12~13 spectral band + * bit 14~15 not used + */ +#define INFF_CHSPEC_D11N_SB_MASK 0x0300 +#define INFF_CHSPEC_D11N_SB_SHIFT 8 +#define INFF_CHSPEC_D11N_SB_L 0x0100 /* control lower */ +#define INFF_CHSPEC_D11N_SB_U 0x0200 /* control upper */ +#define INFF_CHSPEC_D11N_SB_N 0x0300 /* none */ +#define INFF_CHSPEC_D11N_BW_MASK 0x0c00 +#define INFF_CHSPEC_D11N_BW_SHIFT 10 +#define INFF_CHSPEC_D11N_BW_10 0x0400 +#define INFF_CHSPEC_D11N_BW_20 0x0800 +#define INFF_CHSPEC_D11N_BW_40 0x0c00 +#define INFF_CHSPEC_D11N_BND_MASK 0x3000 +#define INFF_CHSPEC_D11N_BND_SHIFT 12 +#define INFF_CHSPEC_D11N_BND_5G 0x1000 +#define INFF_CHSPEC_D11N_BND_2G 0x2000 + +/* bit 8~16 for dot 11ac IO types + * bit 8~10 sideband + * bit 11~13 bandwidth + * bit 14~15 spectral band + */ +#define INFF_CHSPEC_D11AC_SB_MASK 0x0700 +#define INFF_CHSPEC_D11AC_SB_SHIFT 8 +#define INFF_CHSPEC_D11AC_SB_LLL 0x0000 +#define INFF_CHSPEC_D11AC_SB_LLU 0x0100 +#define INFF_CHSPEC_D11AC_SB_LUL 0x0200 +#define INFF_CHSPEC_D11AC_SB_LUU 0x0300 +#define INFF_CHSPEC_D11AC_SB_ULL 0x0400 +#define INFF_CHSPEC_D11AC_SB_ULU 0x0500 +#define INFF_CHSPEC_D11AC_SB_UUL 0x0600 +#define INFF_CHSPEC_D11AC_SB_UUU 0x0700 +#define INFF_CHSPEC_D11AC_SB_LL INFF_CHSPEC_D11AC_SB_LLL +#define INFF_CHSPEC_D11AC_SB_LU INFF_CHSPEC_D11AC_SB_LLU +#define INFF_CHSPEC_D11AC_SB_UL INFF_CHSPEC_D11AC_SB_LUL +#define INFF_CHSPEC_D11AC_SB_UU INFF_CHSPEC_D11AC_SB_LUU +#define INFF_CHSPEC_D11AC_SB_L INFF_CHSPEC_D11AC_SB_LLL +#define INFF_CHSPEC_D11AC_SB_U INFF_CHSPEC_D11AC_SB_LLU +#define INFF_CHSPEC_D11AC_BW_MASK 0x3800 +#define INFF_CHSPEC_D11AC_BW_SHIFT 11 +#define INFF_CHSPEC_D11AC_BW_5 0x0000 +#define INFF_CHSPEC_D11AC_BW_10 0x0800 +#define INFF_CHSPEC_D11AC_BW_20 0x1000 +#define INFF_CHSPEC_D11AC_BW_40 0x1800 +#define INFF_CHSPEC_D11AC_BW_80 0x2000 +#define INFF_CHSPEC_D11AC_BW_160 0x2800 +#define INFF_CHSPEC_D11AC_BW_8080 0x3000 +#define INFF_CHSPEC_D11AC_BND_MASK 0xc000 +#define INFF_CHSPEC_D11AC_BND_SHIFT 14 +#define INFF_CHSPEC_D11AC_BND_2G 0x0000 +#define INFF_CHSPEC_D11AC_BND_3G 0x4000 +#define INFF_CHSPEC_D11AC_BND_6G 0x8000 +#define INFF_CHSPEC_D11AC_BND_5G 0xc000 +#define INFF_CHSPEC_IS5G(chspec) \ + (((chspec) & INFF_CHSPEC_D11AC_BND_MASK) == INFF_CHSPEC_D11AC_BND_5G) +#define INFF_CHSPEC_IS6G(chspec) \ + (((chspec) & INFF_CHSPEC_D11AC_BND_MASK) == INFF_CHSPEC_D11AC_BND_6G) +#define INFF_CHAN_BAND_2G 1 +#define INFF_CHAN_BAND_5G 2 +#define INFF_CHAN_BAND_6G 3 + +enum inff_chan_bw { + INFF_CHAN_BW_20, + INFF_CHAN_BW_40, + INFF_CHAN_BW_80, + INFF_CHAN_BW_80P80, + INFF_CHAN_BW_160, +}; + +enum inff_chan_sb { + INFF_CHAN_SB_NONE = -1, + INFF_CHAN_SB_LLL, + INFF_CHAN_SB_LLU, + INFF_CHAN_SB_LUL, + INFF_CHAN_SB_LUU, + INFF_CHAN_SB_ULL, + INFF_CHAN_SB_ULU, + INFF_CHAN_SB_UUL, + INFF_CHAN_SB_UUU, + INFF_CHAN_SB_L = INFF_CHAN_SB_LLL, + INFF_CHAN_SB_U = INFF_CHAN_SB_LLU, + INFF_CHAN_SB_LL = INFF_CHAN_SB_LLL, + INFF_CHAN_SB_LU = INFF_CHAN_SB_LLU, + INFF_CHAN_SB_UL = INFF_CHAN_SB_LUL, + INFF_CHAN_SB_UU = INFF_CHAN_SB_LUU, +}; + +/** + * struct inff_chan - stores channel formats + * + * This structure can be used with functions translating chanspec into generic + * channel info and the other way. + * + * @chspec: firmware specific format + * @chnum: center channel number + * @control_ch_num: control channel number + * @band: frequency band + * @bw: channel width + * @sb: control sideband (location of control channel against the center one) + */ +struct inff_chan { + u16 chspec; + u8 chnum; + u8 control_ch_num; + u8 band; + enum inff_chan_bw bw; + enum inff_chan_sb sb; +}; + +/** + * struct inff_d11inf - provides functions translating channel format + * + * @io_type: determines version of channel format used by firmware + * @encchspec: encodes channel info into a chanspec, requires center channel + * number, ignores control one + * @decchspec: decodes chanspec into generic info + */ +struct inff_d11inf { + u8 io_type; + + void (*encchspec)(struct inff_chan *ch); + void (*decchspec)(struct inff_chan *ch); +}; + +void inff_d11_attach(struct inff_d11inf *d11inf); + +#endif /* INFF_WIFI_H */ -- 2.25.1 Imeplemenation of common utility functions and MACRO definitions which can be used by other driver source files. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/utils.c | 323 ++++++++++++++++++ drivers/net/wireless/infineon/inffmac/utils.h | 254 ++++++++++++++ 2 files changed, 577 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/utils.c create mode 100644 drivers/net/wireless/infineon/inffmac/utils.h diff --git a/drivers/net/wireless/infineon/inffmac/utils.c b/drivers/net/wireless/infineon/inffmac/utils.c new file mode 100644 index 000000000000..c27864f207bd --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/utils.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: ISC + +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "utils.h" + +struct sk_buff *inff_pkt_buf_get_skb(uint len) +{ + struct sk_buff *skb; + + skb = dev_alloc_skb(len); + if (skb) { + skb_put(skb, len); + skb->priority = 0; + } + + return skb; +} + +/* Free the driver packet. Free the tag if present */ +void inff_pkt_buf_free_skb(struct sk_buff *skb) +{ + if (!skb) + return; + + WARN_ON(skb->next); + dev_kfree_skb_any(skb); +} + +/* + * osl multiple-precedence packet queue + * hi_prec is always >= the number of the highest non-empty precedence + */ +struct sk_buff *inff_pktq_penq(struct pktq *pq, int prec, + struct sk_buff *p) +{ + struct sk_buff_head *q; + + if (pktq_full(pq) || pktq_pfull(pq, prec)) + return NULL; + + q = &pq->q[prec].skblist; + skb_queue_tail(q, p); + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (u8)prec; + + return p; +} + +struct sk_buff *inff_pktq_penq_head(struct pktq *pq, int prec, + struct sk_buff *p) +{ + struct sk_buff_head *q; + + if (pktq_full(pq) || pktq_pfull(pq, prec)) + return NULL; + + q = &pq->q[prec].skblist; + skb_queue_head(q, p); + pq->len++; + + if (pq->hi_prec < prec) + pq->hi_prec = (u8)prec; + + return p; +} + +struct sk_buff *inff_pktq_pdeq(struct pktq *pq, int prec) +{ + struct sk_buff_head *q; + struct sk_buff *p; + + q = &pq->q[prec].skblist; + p = skb_dequeue(q); + if (!p) + return NULL; + + pq->len--; + return p; +} + +/* + * precedence based dequeue with match function. Passing a NULL pointer + * for the match function parameter is considered to be a wildcard so + * any packet on the queue is returned. In that case it is no different + * from inff_pktq_pdeq() above. + */ +struct sk_buff *inff_pktq_pdeq_match(struct pktq *pq, int prec, + bool (*match_fn)(struct sk_buff *skb, + void *arg), void *arg) +{ + struct sk_buff_head *q; + struct sk_buff *p, *next; + + q = &pq->q[prec].skblist; + skb_queue_walk_safe(q, p, next) { + if (!match_fn || match_fn(p, arg)) { + skb_unlink(p, q); + pq->len--; + return p; + } + } + return NULL; +} + +struct sk_buff *inff_pktq_pdeq_tail(struct pktq *pq, int prec) +{ + struct sk_buff_head *q; + struct sk_buff *p; + + q = &pq->q[prec].skblist; + p = skb_dequeue_tail(q); + if (!p) + return NULL; + + pq->len--; + return p; +} + +void +inff_pktq_pflush(struct pktq *pq, int prec, bool dir, + bool (*fn)(struct sk_buff *, void *), void *arg) +{ + struct sk_buff_head *q; + struct sk_buff *p, *next; + + q = &pq->q[prec].skblist; + skb_queue_walk_safe(q, p, next) { + if (!fn || (*fn) (p, arg)) { + skb_unlink(p, q); + inff_pkt_buf_free_skb(p); + pq->len--; + } + } +} + +void inff_pktq_flush(struct pktq *pq, bool dir, + bool (*fn)(struct sk_buff *, void *), void *arg) +{ + int prec; + + for (prec = 0; prec < pq->num_prec; prec++) + inff_pktq_pflush(pq, prec, dir, fn, arg); +} + +void inff_pktq_init(struct pktq *pq, int num_prec, int max_len) +{ + int prec; + + /* pq is variable size; only zero out what's requested */ + memset(pq, 0, + offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); + + pq->num_prec = (u16)num_prec; + + pq->max = (u16)max_len; + + for (prec = 0; prec < num_prec; prec++) { + pq->q[prec].max = pq->max; + skb_queue_head_init(&pq->q[prec].skblist); + } +} + +struct sk_buff *inff_pktq_peek_tail(struct pktq *pq, int *prec_out) +{ + int prec; + + if (pktq_empty(pq)) + return NULL; + + for (prec = 0; prec < pq->hi_prec; prec++) + if (!skb_queue_empty(&pq->q[prec].skblist)) + break; + + if (prec_out) + *prec_out = prec; + + return skb_peek_tail(&pq->q[prec].skblist); +} + +/* Return sum of lengths of a specific set of precedences */ +int inff_pktq_mlen(struct pktq *pq, uint prec_bmp) +{ + int prec, len; + + len = 0; + + for (prec = 0; prec <= pq->hi_prec; prec++) + if (prec_bmp & (1 << prec)) + len += pq->q[prec].skblist.qlen; + + return len; +} + +/* Priority dequeue from a specific set of precedences */ +struct sk_buff *inff_pktq_mdeq(struct pktq *pq, uint prec_bmp, + int *prec_out) +{ + struct sk_buff_head *q; + struct sk_buff *p; + int prec; + + if (pktq_empty(pq)) + return NULL; + + while ((prec = pq->hi_prec) > 0 && + skb_queue_empty(&pq->q[prec].skblist)) + pq->hi_prec--; + + while ((prec_bmp & (1 << prec)) == 0 || + skb_queue_empty(&pq->q[prec].skblist)) + if (prec-- == 0) + return NULL; + + q = &pq->q[prec].skblist; + p = skb_dequeue(q); + if (!p) + return NULL; + + pq->len--; + + if (prec_out) + *prec_out = prec; + + return p; +} + +/* Produce a human-readable string for boardrev */ +char *inff_boardrev_str(u32 brev, char *buf) +{ + char c; + + if (brev < 0x100) { + snprintf(buf, INFF_BOARDREV_LEN, "%d.%d", + (brev & 0xf0) >> 4, brev & 0xf); + } else { + c = (brev & 0xf000) == 0x1000 ? 'P' : 'A'; + snprintf(buf, INFF_BOARDREV_LEN, "%c%03x", c, brev & 0xfff); + } + return buf; +} + +char *inff_dotrev_str(u32 dotrev, char *buf) +{ + u8 dotval[4]; + + if (!dotrev) { + snprintf(buf, INFF_DOTREV_LEN, "unknown"); + return buf; + } + dotval[0] = (dotrev >> 24) & 0xFF; + dotval[1] = (dotrev >> 16) & 0xFF; + dotval[2] = (dotrev >> 8) & 0xFF; + dotval[3] = dotrev & 0xFF; + + if (dotval[3]) + snprintf(buf, INFF_DOTREV_LEN, "%d.%d.%d.%d", dotval[0], + dotval[1], dotval[2], dotval[3]); + else if (dotval[2]) + snprintf(buf, INFF_DOTREV_LEN, "%d.%d.%d", dotval[0], + dotval[1], dotval[2]); + else + snprintf(buf, INFF_DOTREV_LEN, "%d.%d", dotval[0], + dotval[1]); + + return buf; +} + +struct sk_buff *__inff_pkt_buf_get_skb(uint len, gfp_t gfp_mask) +{ + struct sk_buff *skb; + + skb = __netdev_alloc_skb(NULL, len, gfp_mask); + if (skb) { + skb_put(skb, len); + skb->priority = 0; + } + return skb; +} + +#if defined(DEBUG) +/* pretty hex print a pkt buffer chain */ +void inff_prpkt(const char *msg, struct sk_buff *p0) +{ + struct sk_buff *p; + + if (msg && (msg[0] != '\0')) + pr_debug("%s:\n", msg); + + for (p = p0; p; p = p->next) + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, p->data, p->len); +} + +void inff_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + pr_debug("%pV", &vaf); + + va_end(args); + + print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, data, size); +} + +#endif /* defined(DEBUG) */ diff --git a/drivers/net/wireless/infineon/inffmac/utils.h b/drivers/net/wireless/infineon/inffmac/utils.h new file mode 100644 index 000000000000..e8cf19d8e0c3 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/utils.h @@ -0,0 +1,254 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_UTILS_H +#define INFF_UTILS_H + +#include + +/* + * Spin at most 'us' microseconds while 'exp' is true. + * Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#define SPINWAIT(exp, us) { \ + uint countdown = (us) + 9; \ + while ((exp) && (countdown >= 10)) {\ + usleep_range(10, 20); \ + countdown -= 10; \ + } \ +} + +/* Spin at most 'ms' milliseconds with polling interval 'interval' milliseconds + * while 'exp' is true. Caller should explicitly test 'exp' when this completes + * and take appropriate error action if 'exp' is still true. + */ +#define SPINWAIT_MS(exp, ms, interval) { \ + typeof(interval) interval_ = (interval); \ + uint countdown = (ms) + (interval_ - 1U); \ + while ((exp) && (countdown >= interval_)) { \ + msleep(interval_); \ + countdown -= interval_; \ + } \ +} + +/* osl multi-precedence packet queue */ +#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */ +#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ + +/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ +#define PKTBUFSZ 2048 + +#ifndef setbit +#ifndef NBBY /* the BSD family defines NBBY */ +#define NBBY 8 /* 8 bits per byte */ +#endif /* #ifndef NBBY */ +#define setbit(a, i) { \ + typeof(i) _i = (i); \ + (((u8 *)a)[(_i) / NBBY] |= 1 << ((_i) % NBBY)); \ + } +#define clrbit(a, i) { \ + typeof(i) _i = (i); \ + (((u8 *)a)[(_i) / NBBY] &= ~(1 << ((_i) % NBBY))); \ + } +#define isset(a, i) { \ + typeof(i) _i = (i); \ + (((const u8 *)a)[(_i) / NBBY] & (1 << ((_i) % NBBY))); \ + } +#define isclr(a, i) { \ + typeof(i) _i = (i); \ + ((((const u8 *)a)[(_i) / NBBY] & (1 << ((_i) % NBBY))) == 0); \ + } +#endif /* setbit */ + +#define NBITS(type) (sizeof(type) * 8) +#define NBITVAL(nbits) (1 << (nbits)) +#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) +#define NBITMASK(nbits) MAXBITVAL(nbits) +#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) + +/* crc defines */ +#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */ +#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */ + +/* 18-bytes of Ethernet address buffer length */ +#define ETHER_ADDR_STR_LEN 18 + +struct pktq_prec { + struct sk_buff_head skblist; + u16 max; /* maximum number of queued packets */ +}; + +/* multi-priority pkt queue */ +struct pktq { + u16 num_prec; /* number of precedences in use */ + u16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */ + u16 max; /* total max packets */ + u16 len; /* total number of packets */ + /* + * q array must be last since # of elements can be either + * PKTQ_MAX_PREC or 1 + */ + struct pktq_prec q[PKTQ_MAX_PREC]; +}; + +/* operations on a specific precedence in packet queue */ + +static inline int pktq_plen(struct pktq *pq, int prec) +{ + return pq->q[prec].skblist.qlen; +} + +static inline int pktq_pavail(struct pktq *pq, int prec) +{ + return pq->q[prec].max - pq->q[prec].skblist.qlen; +} + +static inline bool pktq_pfull(struct pktq *pq, int prec) +{ + return pq->q[prec].skblist.qlen >= pq->q[prec].max; +} + +static inline bool pktq_pempty(struct pktq *pq, int prec) +{ + return skb_queue_empty(&pq->q[prec].skblist); +} + +static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec) +{ + return skb_peek(&pq->q[prec].skblist); +} + +static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec) +{ + return skb_peek_tail(&pq->q[prec].skblist); +} + +struct sk_buff *inff_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p); +struct sk_buff *inff_pktq_penq_head(struct pktq *pq, int prec, + struct sk_buff *p); +struct sk_buff *inff_pktq_pdeq(struct pktq *pq, int prec); +struct sk_buff *inff_pktq_pdeq_tail(struct pktq *pq, int prec); +struct sk_buff *inff_pktq_pdeq_match(struct pktq *pq, int prec, + bool (*match_fn)(struct sk_buff *p, + void *arg), + void *arg); + +/* packet primitives */ +struct sk_buff *inff_pkt_buf_get_skb(uint len); +void inff_pkt_buf_free_skb(struct sk_buff *skb); +struct sk_buff *__inff_pkt_buf_get_skb(uint len, gfp_t gfp_mask); + +/* Empty the queue at particular precedence level */ +/* callback function fn(pkt, arg) returns true if pkt belongs to if */ +void inff_pktq_pflush(struct pktq *pq, int prec, bool dir, + bool (*fn)(struct sk_buff *, void *), void *arg); + +/* operations on a set of precedences in packet queue */ + +int inff_pktq_mlen(struct pktq *pq, uint prec_bmp); +struct sk_buff *inff_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); + +/* operations on packet queue as a whole */ + +static inline int pktq_len(struct pktq *pq) +{ + return (int)pq->len; +} + +static inline int pktq_max(struct pktq *pq) +{ + return (int)pq->max; +} + +static inline int pktq_avail(struct pktq *pq) +{ + return (int)(pq->max - pq->len); +} + +static inline bool pktq_full(struct pktq *pq) +{ + return pq->len >= pq->max; +} + +static inline bool pktq_empty(struct pktq *pq) +{ + return pq->len == 0; +} + +void inff_pktq_init(struct pktq *pq, int num_prec, int max_len); +/* prec_out may be NULL if caller is not interested in return value */ +struct sk_buff *inff_pktq_peek_tail(struct pktq *pq, int *prec_out); +void inff_pktq_flush(struct pktq *pq, bool dir, + bool (*fn)(struct sk_buff *, void *), void *arg); + +/* externs */ +/* ip address */ +struct ipv4_addr; + +/* + * bitfield macros using masking and shift + * + * remark: the mask parameter should be a shifted mask. + */ +static inline void inff_maskset32(u32 *var, u32 mask, u8 shift, u32 value) +{ + value = (value << shift) & mask; + *var = (*var & ~mask) | value; +} + +static inline u32 inff_maskget32(u32 var, u32 mask, u8 shift) +{ + return (var & mask) >> shift; +} + +static inline void inff_maskset16(u16 *var, u16 mask, u8 shift, u16 value) +{ + value = (value << shift) & mask; + *var = (*var & ~mask) | value; +} + +static inline u16 inff_maskget16(u16 var, u16 mask, u8 shift) +{ + return (var & mask) >> shift; +} + +static inline int inff_work_sched_cpu(int cpu) +{ + if (cpu >= 0 && cpu < nr_cpu_ids) + return cpu; + + return WORK_CPU_UNBOUND; +} + +/* externs */ +/* format/print */ +#ifdef DEBUG +void inff_prpkt(const char *msg, struct sk_buff *p0); +#else +#define inff_prpkt(a, b) +#endif /* DEBUG */ + +#ifdef DEBUG +__printf(3, 4) +void inff_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...); +#else +__printf(3, 4) +static inline +void inff_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...) +{ +} +#endif + +#define INFF_BOARDREV_LEN 8 +#define INFF_DOTREV_LEN 16 + +char *inff_boardrev_str(u32 brev, char *buf); +char *inff_dotrev_str(u32 dotrev, char *buf); + +#endif /* INFF_UTILS_H */ -- 2.25.1 file to define some generic definitions Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/defs.h | 21 ++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/defs.h diff --git a/drivers/net/wireless/infineon/inffmac/defs.h b/drivers/net/wireless/infineon/inffmac/defs.h new file mode 100644 index 000000000000..a9550e578b01 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/defs.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2010 Broadcom Corporation + * + * Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. + * All rights reserved. + */ + +#ifndef INFF_DEFS_H +#define INFF_DEFS_H + +#include + +/* cpp contortions to concatenate w/arg prescan */ +#ifndef PAD +#define _PADLINE(line) pad ## line +#define _XSTR(line) _PADLINE(line) +#define PAD _XSTR(__LINE__) +#endif + +#endif /* INFF_DEFS_H */ -- 2.25.1 Introduce a new Kconfig and Makefile the inffmac driver sub-directory. Signed-off-by: Gokul Sivakumar --- drivers/net/wireless/infineon/inffmac/Kconfig | 61 +++++++++++++++++ .../net/wireless/infineon/inffmac/Makefile | 65 +++++++++++++++++++ 2 files changed, 126 insertions(+) create mode 100644 drivers/net/wireless/infineon/inffmac/Kconfig create mode 100644 drivers/net/wireless/infineon/inffmac/Makefile diff --git a/drivers/net/wireless/infineon/inffmac/Kconfig b/drivers/net/wireless/infineon/inffmac/Kconfig new file mode 100644 index 000000000000..40b70ab2decb --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/Kconfig @@ -0,0 +1,61 @@ +# SPDX-License-Identifier: ISC + +config INFFMAC + tristate "Infineon FullMAC WLAN driver" + depends on CFG80211 + help + This module adds support for wireless adapters based on Infineon + FullMAC chipsets. It has to work with at least one of the bus + interface support. If you choose to build a module, it'll be called + inffmac.ko. + +config INFFMAC_PROTO_BCDC + bool + +config INFFMAC_PROTO_MSGBUF + bool + +config INFFMAC_SDIO + bool "SDIO bus interface support for FullMAC driver" + depends on (MMC = y || MMC = INFFMAC) + depends on INFFMAC + select INFFMAC_PROTO_BCDC + select FW_LOADER + default y + help + This option enables the SDIO bus interface support for Infineon + IEEE802.11 embedded FullMAC WLAN driver. Say Y if you want to + use the driver for a SDIO wireless card. + +config INFFMAC_PCIE + bool "PCIE bus interface support for FullMAC driver" + depends on INFFMAC + depends on PCI + select INFFMAC_PROTO_MSGBUF + select FW_LOADER + default y + help + This option enables the PCIE bus interface support for Infineon + IEEE802.11 embedded FullMAC WLAN driver. Say Y if you want to + use the driver for an PCIE wireless card. + +config INFFMAC_PCIE_BARWIN_SZ + bool "Custom PCIE BAR window size support for FullMAC driver" + depends on INFFMAC + depends on INFFMAC_PCIE + depends on PCI + default n + help + If you say Y here, the FMAC driver will use custom PCIE BAR + window size. Say Y to allow developers to use custom PCIE + BAR window size when HOST PCIE IP can support less then 4MB + BAR window. + +config INFFMAC_BT_SHARED_SDIO + bool "FMAC shares SDIO bus to Bluetooth" + depends on INFFMAC + depends on INFFMAC_SDIO + default n + help + Selecting this to enables sharing the SDIO bus interface between + Infineon BT and WiFi host drivers. diff --git a/drivers/net/wireless/infineon/inffmac/Makefile b/drivers/net/wireless/infineon/inffmac/Makefile new file mode 100644 index 000000000000..15ca17bf6cc2 --- /dev/null +++ b/drivers/net/wireless/infineon/inffmac/Makefile @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: ISC +# +# Copyright (c) 2025, Infineon Technologies AG, or an affiliate of Infineon Technologies AG. +# All rights reserved. +# +# Makefile fragment for Infineon 802.11 Networking Device Driver +# + +ccflags-y += -I $(src) + +obj-$(CONFIG_INFFMAC) += inffmac.o +inffmac-objs += \ + ie.o \ + scan.o \ + interface.o \ + security.o \ + cfg80211.o \ + chip.o \ + fwil.o \ + fweh.o \ + p2p.o \ + proto.o \ + common.o \ + core.o \ + firmware.o \ + feature.o \ + btcoex.o \ + vendor.o \ + pno.o \ + xtlv.o \ + vendor_inf.o \ + he.o \ + eht.o \ + twt.o \ + offload.o \ + pmsr.o \ + ftm.o \ + wlan_sense.o \ + chip_5557x.o \ + chip_5551x.o \ + chip_43022.o \ + chip_5591x.o \ + chanspec.o \ + utils.o +inffmac-$(CONFIG_INFFMAC_PROTO_BCDC) += \ + bcdc.o \ + fwsignal.o +inffmac-$(CONFIG_INFFMAC_PROTO_MSGBUF) += \ + commonring.o \ + flowring.o \ + msgbuf.o +inffmac-$(CONFIG_INFFMAC_SDIO) += \ + sdio.o \ + infsdh.o \ + dfu.o +inffmac-$(CONFIG_INFFMAC_PCIE) += \ + pcie.o \ + xdp.o +inffmac-${CONFIG_INFFMAC_BT_SHARED_SDIO} += \ + bt_shared_sdio.o +inffmac-$(CONFIG_INF_DEBUG) += \ + debug.o \ + logger.o +inffmac-$(CONFIG_INF_TRACING) += \ + tracepoint.o -- 2.25.1